1 // Copyright © Tavian Barnes <tavianator@tavianator.com>
2 // SPDX-License-Identifier: 0BSD
5 * The bftw() implementation consists of the following components:
7 * - struct bftw_file: A file that has been encountered during the traversal.
8 * They have reference-counted links to their parents in the directory tree.
10 * - struct bftw_list: A linked list of bftw_file's.
12 * - struct bftw_cache: An LRU list of bftw_file's with open file descriptors,
13 * used for openat() to minimize the amount of path re-traversals.
15 * - struct bftw_state: Represents the current state of the traversal, allowing
16 * various helper functions to take fewer parameters.
37 /** Caching bfs_stat(). */
38 static const struct bfs_stat *bftw_stat_impl(struct BFTW *ftwbuf, struct bftw_stat *cache, enum bfs_stat_flags flags) {
42 } else if (bfs_stat(ftwbuf->at_fd, ftwbuf->at_path, flags, &cache->storage) == 0) {
43 cache->buf = &cache->storage;
45 } else if (errno == ENOENT && ftwbuf->type == BFS_WHT) {
46 // This matches the behavior of FTS_WHITEOUT on BSD
47 memset(&cache->storage, 0, sizeof(cache->storage));
48 cache->storage.mode = S_IFWHT;
49 cache->buf = &cache->storage;
59 const struct bfs_stat *bftw_stat(const struct BFTW *ftwbuf, enum bfs_stat_flags flags) {
60 struct BFTW *mutbuf = (struct BFTW *)ftwbuf;
61 const struct bfs_stat *ret;
63 if (flags & BFS_STAT_NOFOLLOW) {
64 ret = bftw_stat_impl(mutbuf, &mutbuf->lstat_cache, BFS_STAT_NOFOLLOW);
65 if (ret && !S_ISLNK(ret->mode) && !mutbuf->stat_cache.buf) {
66 // Non-link, so share stat info
67 mutbuf->stat_cache.buf = ret;
70 ret = bftw_stat_impl(mutbuf, &mutbuf->stat_cache, BFS_STAT_FOLLOW);
71 if (!ret && (flags & BFS_STAT_TRYFOLLOW) && is_nonexistence_error(errno)) {
72 ret = bftw_stat_impl(mutbuf, &mutbuf->lstat_cache, BFS_STAT_NOFOLLOW);
79 const struct bfs_stat *bftw_cached_stat(const struct BFTW *ftwbuf, enum bfs_stat_flags flags) {
80 if (flags & BFS_STAT_NOFOLLOW) {
81 return ftwbuf->lstat_cache.buf;
82 } else if (ftwbuf->stat_cache.buf) {
83 return ftwbuf->stat_cache.buf;
84 } else if ((flags & BFS_STAT_TRYFOLLOW) && is_nonexistence_error(ftwbuf->stat_cache.error)) {
85 return ftwbuf->lstat_cache.buf;
91 enum bfs_type bftw_type(const struct BFTW *ftwbuf, enum bfs_stat_flags flags) {
92 if (flags & BFS_STAT_NOFOLLOW) {
93 if (ftwbuf->type == BFS_LNK || (ftwbuf->stat_flags & BFS_STAT_NOFOLLOW)) {
96 } else if (flags & BFS_STAT_TRYFOLLOW) {
97 if (ftwbuf->type != BFS_LNK || (ftwbuf->stat_flags & BFS_STAT_TRYFOLLOW)) {
101 if (ftwbuf->type != BFS_LNK) {
103 } else if (ftwbuf->stat_flags & BFS_STAT_TRYFOLLOW) {
108 const struct bfs_stat *statbuf = bftw_stat(ftwbuf, flags);
110 return bfs_mode_to_type(statbuf->mode);
120 /** The parent directory, if any. */
121 struct bftw_file *parent;
122 /** The root under which this file was found. */
123 struct bftw_file *root;
125 /** The next file to open/close/visit. */
126 struct bftw_file *next;
127 /** The next directory to read. */
128 struct { struct bftw_file *next; } to_read;
130 /** LRU list node. */
132 struct bftw_file *prev;
133 struct bftw_file *next;
136 /** This file's depth in the walk. */
138 /** Reference count (for ->parent). */
141 /** Pin count (for ->fd). */
143 /** An open descriptor to this file, or -1. */
145 /** Whether this file has a pending ioq request. */
147 /** An open directory for this file, if any. */
150 /** This file's type, if known. */
152 /** The device number, for cycle detection. */
154 /** The inode number, for cycle detection. */
157 /** The offset of this file in the full path. */
159 /** The length of the file's name. */
161 /** The file's name. */
166 * A linked list of bftw_file's.
169 struct bftw_file *head;
170 struct bftw_file **tail;
174 * A cache of open directories.
177 /** The head of the LRU list. */
178 struct bftw_file *head;
179 /** The tail of the LRU list. */
180 struct bftw_file *tail;
181 /** The insertion target for the LRU list. */
182 struct bftw_file *target;
183 /** The remaining capacity of the LRU list. */
186 /** bftw_file arena. */
188 /** bfs_dir arena. */
190 /** Remaining bfs_dir capacity. */
194 /** Initialize a cache. */
195 static void bftw_cache_init(struct bftw_cache *cache, size_t capacity) {
197 cache->target = NULL;
198 cache->capacity = capacity;
200 VARENA_INIT(&cache->files, struct bftw_file, name);
201 bfs_dir_arena(&cache->dirs);
203 cache->dirlimit = capacity - 1;
204 if (cache->dirlimit > 1024) {
205 cache->dirlimit = 1024;
209 /** Allocate a directory. */
210 static struct bfs_dir *bftw_allocdir(struct bftw_cache *cache) {
211 if (cache->dirlimit == 0) {
217 return arena_alloc(&cache->dirs);
220 /** Free a directory. */
221 static void bftw_freedir(struct bftw_cache *cache, struct bfs_dir *dir) {
223 arena_free(&cache->dirs, dir);
226 /** Remove a bftw_file from the LRU list. */
227 static void bftw_lru_remove(struct bftw_cache *cache, struct bftw_file *file) {
228 if (cache->target == file) {
229 cache->target = file->lru.prev;
232 LIST_REMOVE(cache, file, lru);
235 /** Remove a bftw_file from the cache. */
236 static void bftw_cache_remove(struct bftw_cache *cache, struct bftw_file *file) {
237 bftw_lru_remove(cache, file);
241 /** Close a bftw_file. */
242 static void bftw_file_close(struct bftw_cache *cache, struct bftw_file *file) {
243 bfs_assert(file->fd >= 0);
244 bfs_assert(file->pincount == 0);
247 bfs_closedir(file->dir);
248 bftw_freedir(cache, file->dir);
255 bftw_cache_remove(cache, file);
258 /** Pop the least recently used directory from the cache. */
259 static int bftw_cache_pop(struct bftw_cache *cache) {
260 struct bftw_file *file = cache->tail;
265 bftw_file_close(cache, file);
269 /** Add a bftw_file to the LRU list. */
270 static void bftw_lru_add(struct bftw_cache *cache, struct bftw_file *file) {
271 bfs_assert(file->fd >= 0);
273 LIST_INSERT(cache, cache->target, file, lru);
275 // Prefer to keep the root paths open by keeping them at the head of the list
276 if (file->depth == 0) {
277 cache->target = file;
281 /** Add a bftw_file to the cache. */
282 static int bftw_cache_add(struct bftw_cache *cache, struct bftw_file *file) {
283 bfs_assert(file->fd >= 0);
285 if (cache->capacity == 0 && bftw_cache_pop(cache) != 0) {
286 bftw_file_close(cache, file);
291 bfs_assert(cache->capacity > 0);
294 bftw_lru_add(cache, file);
298 /** Pin a cache entry so it won't be closed. */
299 static void bftw_cache_pin(struct bftw_cache *cache, struct bftw_file *file) {
300 bfs_assert(file->fd >= 0);
302 if (file->pincount++ == 0) {
303 bftw_lru_remove(cache, file);
307 /** Unpin a cache entry. */
308 static void bftw_cache_unpin(struct bftw_cache *cache, struct bftw_file *file) {
309 bfs_assert(file->fd >= 0);
310 bfs_assert(file->pincount > 0);
312 if (--file->pincount == 0) {
313 bftw_lru_add(cache, file);
317 /** Compute the name offset of a child path. */
318 static size_t bftw_child_nameoff(const struct bftw_file *parent) {
319 size_t ret = parent->nameoff + parent->namelen;
320 if (parent->name[parent->namelen - 1] != '/') {
326 /** Destroy a cache. */
327 static void bftw_cache_destroy(struct bftw_cache *cache) {
328 bfs_assert(LIST_EMPTY(cache));
329 bfs_assert(!cache->target);
331 varena_destroy(&cache->files);
332 arena_destroy(&cache->dirs);
335 /** Create a new bftw_file. */
336 static struct bftw_file *bftw_file_new(struct bftw_cache *cache, struct bftw_file *parent, const char *name) {
337 size_t namelen = strlen(name);
338 struct bftw_file *file = varena_alloc(&cache->files, namelen + 1);
343 file->parent = parent;
346 file->root = parent->root;
347 file->depth = parent->depth + 1;
348 file->nameoff = bftw_child_nameoff(parent);
356 SLIST_ITEM_INIT(file);
357 SLIST_ITEM_INIT(file, to_read);
358 LIST_ITEM_INIT(file, lru);
363 file->ioqueued = false;
366 file->type = BFS_UNKNOWN;
370 file->namelen = namelen;
371 memcpy(file->name, name, namelen + 1);
376 /** Associate an open directory with a bftw_file. */
377 static void bftw_file_set_dir(struct bftw_cache *cache, struct bftw_file *file, struct bfs_dir *dir) {
378 bfs_assert(!file->dir);
382 bfs_assert(file->fd == bfs_dirfd(dir));
384 file->fd = bfs_dirfd(dir);
385 bftw_cache_add(cache, file);
388 bftw_cache_pin(cache, file);
391 /** Free a bftw_file. */
392 static void bftw_file_free(struct bftw_cache *cache, struct bftw_file *file) {
393 bfs_assert(file->refcount == 0);
396 bftw_file_close(cache, file);
399 varena_free(&cache->files, file, file->namelen + 1);
403 * Holds the current state of the bftw() traversal.
406 /** The path(s) to start from. */
408 /** The number of starting paths. */
410 /** bftw() callback. */
411 bftw_callback *callback;
412 /** bftw() callback data. */
415 enum bftw_flags flags;
416 /** Search strategy. */
417 enum bftw_strategy strategy;
418 /** The mount table. */
419 const struct bfs_mtab *mtab;
420 /** bfs_opendir() flags. */
421 enum bfs_dir_flags dir_flags;
423 /** The appropriate errno value, if any. */
426 /** The cache of open directories. */
427 struct bftw_cache cache;
429 /** The async I/O queue. */
431 /** The number of I/O threads. */
434 /** A batch of directories to open. */
435 struct bftw_list dir_batch;
436 /** The queue of directories to open. */
437 struct bftw_list to_open;
438 /** The queue of directories to read. */
439 struct bftw_list to_read;
440 /** The queue of unpinned directories to unwrap. */
441 struct bftw_list to_close;
443 /** A batch of files to enqueue. */
444 struct bftw_list file_batch;
445 /** The queue of files to visit. */
446 struct bftw_list to_visit;
448 /** The current path. */
450 /** The current file. */
451 struct bftw_file *file;
452 /** The previous file. */
453 struct bftw_file *previous;
455 /** The currently open directory. */
457 /** The current directory entry. */
458 struct bfs_dirent *de;
459 /** Storage for the directory entry. */
460 struct bfs_dirent de_storage;
461 /** Any error encountered while reading the directory. */
464 /** Extra data about the current file. */
468 /** Check if we have to buffer files before visiting them. */
469 static bool bftw_must_buffer(const struct bftw_state *state) {
470 if (state->flags & BFTW_SORT) {
471 // Have to buffer the files to sort them
475 if (state->strategy == BFTW_DFS && state->nthreads == 0) {
476 // Without buffering, we would get a not-quite-depth-first
486 // This is okay for iterative deepening, since the caller only
487 // sees files at the target depth. We also deem it okay for
488 // parallel searches, since the order is unpredictable anyway.
495 /** Initialize the bftw() state. */
496 static int bftw_state_init(struct bftw_state *state, const struct bftw_args *args) {
497 state->paths = args->paths;
498 state->npaths = args->npaths;
499 state->callback = args->callback;
500 state->ptr = args->ptr;
501 state->flags = args->flags;
502 state->strategy = args->strategy;
503 state->mtab = args->mtab;
504 state->dir_flags = 0;
507 if (args->nopenfd < 2) {
512 size_t nopenfd = args->nopenfd;
513 size_t qdepth = 4096;
514 size_t nthreads = args->nthreads;
517 // io_uring uses one fd per ring, ioq uses one ring per thread
518 if (nthreads >= nopenfd - 1) {
519 nthreads = nopenfd - 2;
524 bftw_cache_init(&state->cache, nopenfd);
527 state->ioq = ioq_create(qdepth, nthreads);
534 state->nthreads = nthreads;
536 if (bftw_must_buffer(state)) {
537 state->flags |= BFTW_BUFFER;
540 if (state->flags & BFTW_WHITEOUTS) {
541 state->dir_flags |= BFS_DIR_WHITEOUTS;
544 SLIST_INIT(&state->dir_batch);
545 SLIST_INIT(&state->to_open);
546 SLIST_INIT(&state->to_read);
547 SLIST_INIT(&state->to_close);
549 SLIST_INIT(&state->file_batch);
550 SLIST_INIT(&state->to_visit);
554 state->previous = NULL;
563 /** Unpin a directory, and possibly queue it for unwrapping. */
564 static void bftw_unpin_dir(struct bftw_state *state, struct bftw_file *file, bool force) {
565 bftw_cache_unpin(&state->cache, file);
567 if (file->dir && (force || file->pincount == 0)) {
568 if (!SLIST_ATTACHED(&state->to_close, file)) {
569 SLIST_APPEND(&state->to_close, file);
574 /** Pop a response from the I/O queue. */
575 static int bftw_ioq_pop(struct bftw_state *state, bool block) {
576 struct ioq *ioq = state->ioq;
581 struct ioq_ent *ent = block ? ioq_pop(ioq) : ioq_trypop(ioq);
586 struct bftw_cache *cache = &state->cache;
587 struct bftw_file *file;
588 struct bftw_file *parent;
591 enum ioq_op op = ent->op;
599 dir = ent->closedir.dir;
600 bftw_freedir(cache, dir);
605 file->ioqueued = false;
608 parent = file->parent;
610 bftw_unpin_dir(state, parent, false);
613 dir = ent->opendir.dir;
615 bftw_file_set_dir(cache, file, dir);
617 bftw_freedir(cache, dir);
620 if (!(state->flags & BFTW_SORT)) {
621 SLIST_APPEND(&state->to_read, file, to_read);
630 /** Try to reserve space in the I/O queue. */
631 static int bftw_ioq_reserve(struct bftw_state *state) {
632 struct ioq *ioq = state->ioq;
637 if (ioq_capacity(ioq) > 0) {
641 // With more than two threads, it is faster to wait for an I/O operation
642 // to complete than it is to do it ourselves
643 bool block = state->nthreads > 2;
644 if (bftw_ioq_pop(state, block) < 0) {
651 /** Try to reserve space in the cache. */
652 static int bftw_cache_reserve(struct bftw_state *state) {
653 struct bftw_cache *cache = &state->cache;
654 if (cache->capacity > 0) {
658 while (bftw_ioq_pop(state, true) >= 0) {
659 if (cache->capacity > 0) {
664 if (bftw_cache_pop(cache) != 0) {
669 bfs_assert(cache->capacity > 0);
673 /** Open a bftw_file relative to another one. */
674 static int bftw_file_openat(struct bftw_state *state, struct bftw_file *file, struct bftw_file *base, const char *at_path) {
675 bfs_assert(file->fd < 0);
677 struct bftw_cache *cache = &state->cache;
679 int at_fd = AT_FDCWD;
681 bftw_cache_pin(cache, base);
686 if (bftw_cache_reserve(state) != 0) {
690 int flags = O_RDONLY | O_CLOEXEC | O_DIRECTORY;
691 fd = openat(at_fd, at_path, flags);
693 if (fd < 0 && errno == EMFILE) {
694 if (bftw_cache_pop(cache) == 0) {
695 fd = openat(at_fd, at_path, flags);
702 bftw_cache_unpin(cache, base);
707 bftw_cache_add(cache, file);
713 /** Open a bftw_file. */
714 static int bftw_file_open(struct bftw_state *state, struct bftw_file *file, const char *path) {
715 // Find the nearest open ancestor
716 struct bftw_file *base = file;
719 } while (base && base->fd < 0);
721 const char *at_path = path;
723 at_path += bftw_child_nameoff(base);
726 int fd = bftw_file_openat(state, file, base, at_path);
727 if (fd >= 0 || errno != ENAMETOOLONG) {
731 // Handle ENAMETOOLONG by manually traversing the path component-by-component
732 struct bftw_list parents;
733 SLIST_INIT(&parents);
735 struct bftw_file *cur;
736 for (cur = file; cur != base; cur = cur->parent) {
737 SLIST_PREPEND(&parents, cur);
740 while ((cur = SLIST_POP(&parents))) {
741 if (!cur->parent || cur->parent->fd >= 0) {
742 bftw_file_openat(state, cur, cur->parent, cur->name);
749 /** Close a directory, asynchronously if possible. */
750 static int bftw_ioq_closedir(struct bftw_state *state, struct bfs_dir *dir) {
751 if (bftw_ioq_reserve(state) == 0) {
752 if (ioq_closedir(state->ioq, dir, NULL) == 0) {
757 struct bftw_cache *cache = &state->cache;
758 int ret = bfs_closedir(dir);
759 bftw_freedir(cache, dir);
764 /** Close a file descriptor, asynchronously if possible. */
765 static int bftw_ioq_close(struct bftw_state *state, int fd) {
766 if (bftw_ioq_reserve(state) == 0) {
767 if (ioq_close(state->ioq, fd, NULL) == 0) {
772 struct bftw_cache *cache = &state->cache;
773 int ret = xclose(fd);
778 /** Close a file, asynchronously if possible. */
779 static int bftw_close(struct bftw_state *state, struct bftw_file *file) {
780 bfs_assert(file->fd >= 0);
781 bfs_assert(file->pincount == 0);
783 struct bfs_dir *dir = file->dir;
786 bftw_lru_remove(&state->cache, file);
791 return bftw_ioq_closedir(state, dir);
793 return bftw_ioq_close(state, fd);
797 /** Free an open directory. */
798 static int bftw_unwrapdir(struct bftw_state *state, struct bftw_file *file) {
799 struct bfs_dir *dir = file->dir;
804 struct bftw_cache *cache = &state->cache;
806 // Try to keep an open fd if any children exist
807 bool reffed = file->refcount > 1;
808 // Keep the fd the same if it's pinned
809 bool pinned = file->pincount > 0;
811 #if BFS_USE_UNWRAPDIR
812 if (reffed || pinned) {
814 bftw_freedir(cache, dir);
825 return bftw_close(state, file);
828 // Make room for dup()
829 bftw_cache_pin(cache, file);
830 int ret = bftw_cache_reserve(state);
831 bftw_cache_unpin(cache, file);
836 int fd = dup_cloexec(file->fd);
844 return bftw_ioq_closedir(state, dir);
847 /** Open a directory asynchronously. */
848 static int bftw_ioq_opendir(struct bftw_state *state, struct bftw_file *file) {
849 if (bftw_ioq_reserve(state) != 0) {
854 struct bftw_cache *cache = &state->cache;
855 struct bftw_file *parent = file->parent;
861 bftw_cache_pin(cache, parent);
864 if (bftw_cache_reserve(state) != 0) {
868 struct bfs_dir *dir = bftw_allocdir(cache);
873 if (ioq_opendir(state->ioq, dir, dfd, file->name, state->dir_flags, file) != 0) {
877 file->ioqueued = true;
882 bftw_freedir(cache, dir);
885 bftw_cache_unpin(cache, parent);
891 /** Open a batch of directories asynchronously. */
892 static void bftw_ioq_opendirs(struct bftw_state *state, struct bftw_list *queue) {
893 for_slist (struct bftw_file, dir, queue) {
894 if (bftw_ioq_opendir(state, dir) != 0) {
901 /** Push a directory onto the queue. */
902 static void bftw_push_dir(struct bftw_state *state, struct bftw_file *file) {
903 bfs_assert(file->type == BFS_DIR);
905 struct bftw_list *queue;
906 if (state->strategy == BFTW_BFS || (state->flags & BFTW_BUFFER)) {
907 // In breadth-first mode, or if we're already buffering files,
908 // we can push directly to the to_open queue
909 queue = &state->to_open;
911 // For a depth-first, unbuffered search, add directories to a
912 // batch, then push the patch to the front of the queue
913 queue = &state->dir_batch;
916 SLIST_APPEND(queue, file);
918 if (state->flags & BFTW_SORT) {
919 // When sorting, directories are kept in order on the to_read
920 // list; otherwise, they are only added once they are open
921 SLIST_APPEND(&state->to_read, file, to_read);
924 bftw_ioq_opendirs(state, queue);
927 /** Pop a directory to read from the queue. */
928 static bool bftw_pop_dir(struct bftw_state *state) {
929 bfs_assert(!state->file);
931 struct bftw_cache *cache = &state->cache;
932 bool have_files = !SLIST_EMPTY(&state->to_visit);
934 if (state->flags & BFTW_SORT) {
935 // Keep strict breadth-first order when sorting
936 if (state->strategy == BFTW_BFS && have_files) {
940 while (SLIST_EMPTY(&state->to_read)) {
941 // Block if we have no other files/dirs to visit, or no room in the cache
942 bool have_dirs = !SLIST_EMPTY(&state->to_open);
943 bool have_room = cache->capacity > 0 && cache->dirlimit > 0;
944 bool block = !(have_dirs || have_files) || !have_room;
946 if (bftw_ioq_pop(state, block) < 0) {
952 struct bftw_file *file = SLIST_POP(&state->to_read, to_read);
953 if (!file || file == state->to_open.head) {
954 file = SLIST_POP(&state->to_open);
960 while (file->ioqueued) {
961 bftw_ioq_pop(state, true);
968 /** Pop a file to visit from the queue. */
969 static bool bftw_pop_file(struct bftw_state *state) {
970 bfs_assert(!state->file);
971 state->file = SLIST_POP(&state->to_visit);
975 /** Build the path to the current file. */
976 static int bftw_build_path(struct bftw_state *state, const char *name) {
977 const struct bftw_file *file = state->file;
979 size_t pathlen = file ? file->nameoff + file->namelen : 0;
980 if (dstresize(&state->path, pathlen) != 0) {
981 state->error = errno;
985 // Try to find a common ancestor with the existing path
986 const struct bftw_file *ancestor = state->previous;
987 while (ancestor && ancestor->depth > file->depth) {
988 ancestor = ancestor->parent;
991 // Build the path backwards
992 while (file && file != ancestor) {
993 if (file->nameoff > 0) {
994 state->path[file->nameoff - 1] = '/';
996 memcpy(state->path + file->nameoff, file->name, file->namelen);
998 if (ancestor && ancestor->depth == file->depth) {
999 ancestor = ancestor->parent;
1001 file = file->parent;
1004 state->previous = state->file;
1007 if (pathlen > 0 && state->path[pathlen - 1] != '/') {
1008 if (dstrapp(&state->path, '/') != 0) {
1009 state->error = errno;
1013 if (dstrcat(&state->path, name) != 0) {
1014 state->error = errno;
1022 /** Open a bftw_file as a directory. */
1023 static struct bfs_dir *bftw_file_opendir(struct bftw_state *state, struct bftw_file *file, const char *path) {
1024 int fd = bftw_file_open(state, file, path);
1029 struct bftw_cache *cache = &state->cache;
1030 struct bfs_dir *dir = bftw_allocdir(cache);
1035 if (bfs_opendir(dir, fd, NULL, state->dir_flags) != 0) {
1036 bftw_freedir(cache, dir);
1040 bftw_file_set_dir(cache, file, dir);
1044 /** Open the current directory. */
1045 static int bftw_opendir(struct bftw_state *state) {
1046 bfs_assert(!state->dir);
1047 bfs_assert(!state->de);
1049 state->direrror = 0;
1051 struct bftw_file *file = state->file;
1052 state->dir = file->dir;
1057 if (bftw_build_path(state, NULL) != 0) {
1061 state->dir = bftw_file_opendir(state, file, state->path);
1063 state->direrror = errno;
1069 /** Read an entry from the current directory. */
1070 static int bftw_readdir(struct bftw_state *state) {
1075 int ret = bfs_readdir(state->dir, &state->de_storage);
1077 state->de = &state->de_storage;
1078 } else if (ret == 0) {
1082 state->direrror = errno;
1088 /** Check if a stat() call is needed for this visit. */
1089 static bool bftw_need_stat(const struct bftw_state *state) {
1090 if (state->flags & BFTW_STAT) {
1094 const struct BFTW *ftwbuf = &state->ftwbuf;
1095 if (ftwbuf->type == BFS_UNKNOWN) {
1099 if (ftwbuf->type == BFS_LNK && !(ftwbuf->stat_flags & BFS_STAT_NOFOLLOW)) {
1103 if (ftwbuf->type == BFS_DIR) {
1104 if (state->flags & (BFTW_DETECT_CYCLES | BFTW_SKIP_MOUNTS | BFTW_PRUNE_MOUNTS)) {
1108 } else if (state->mtab) {
1109 // Linux fills in d_type from the underlying inode, even when
1110 // the directory entry is a bind mount point. In that case, we
1111 // need to stat() to get the correct type. We don't need to
1112 // check for directories because they can only be mounted over
1113 // by other directories.
1114 if (bfs_might_be_mount(state->mtab, ftwbuf->path)) {
1123 /** Initialize bftw_stat cache. */
1124 static void bftw_stat_init(struct bftw_stat *cache) {
1129 /** Open a file if necessary. */
1130 static int bftw_ensure_open(struct bftw_state *state, struct bftw_file *file, const char *path) {
1134 char *copy = strndup(path, file->nameoff + file->namelen);
1139 ret = bftw_file_open(state, file, copy);
1146 /** Initialize the buffers with data about the current path. */
1147 static void bftw_init_ftwbuf(struct bftw_state *state, enum bftw_visit visit) {
1148 struct bftw_file *file = state->file;
1149 const struct bfs_dirent *de = state->de;
1151 struct BFTW *ftwbuf = &state->ftwbuf;
1152 ftwbuf->path = state->path;
1153 ftwbuf->root = file ? file->root->name : ftwbuf->path;
1155 ftwbuf->visit = visit;
1156 ftwbuf->type = BFS_UNKNOWN;
1157 ftwbuf->error = state->direrror;
1158 ftwbuf->at_fd = AT_FDCWD;
1159 ftwbuf->at_path = ftwbuf->path;
1160 ftwbuf->stat_flags = BFS_STAT_NOFOLLOW;
1161 bftw_stat_init(&ftwbuf->lstat_cache);
1162 bftw_stat_init(&ftwbuf->stat_cache);
1164 struct bftw_file *parent = NULL;
1167 ftwbuf->depth = file->depth + 1;
1168 ftwbuf->type = de->type;
1169 ftwbuf->nameoff = bftw_child_nameoff(file);
1171 parent = file->parent;
1172 ftwbuf->depth = file->depth;
1173 ftwbuf->type = file->type;
1174 ftwbuf->nameoff = file->nameoff;
1178 // Try to ensure the immediate parent is open, to avoid ENAMETOOLONG
1179 if (bftw_ensure_open(state, parent, state->path) >= 0) {
1180 ftwbuf->at_fd = parent->fd;
1181 ftwbuf->at_path += ftwbuf->nameoff;
1183 ftwbuf->error = errno;
1187 if (ftwbuf->depth == 0) {
1188 // Compute the name offset for root paths like "foo/bar"
1189 ftwbuf->nameoff = xbaseoff(ftwbuf->path);
1192 if (ftwbuf->error != 0) {
1193 ftwbuf->type = BFS_ERROR;
1197 int follow_flags = BFTW_FOLLOW_ALL;
1198 if (ftwbuf->depth == 0) {
1199 follow_flags |= BFTW_FOLLOW_ROOTS;
1201 bool follow = state->flags & follow_flags;
1203 ftwbuf->stat_flags = BFS_STAT_TRYFOLLOW;
1206 const struct bfs_stat *statbuf = NULL;
1207 if (bftw_need_stat(state)) {
1208 statbuf = bftw_stat(ftwbuf, ftwbuf->stat_flags);
1210 ftwbuf->type = bfs_mode_to_type(statbuf->mode);
1212 ftwbuf->type = BFS_ERROR;
1213 ftwbuf->error = errno;
1218 if (ftwbuf->type == BFS_DIR && (state->flags & BFTW_DETECT_CYCLES)) {
1219 for (const struct bftw_file *ancestor = parent; ancestor; ancestor = ancestor->parent) {
1220 if (ancestor->dev == statbuf->dev && ancestor->ino == statbuf->ino) {
1221 ftwbuf->type = BFS_ERROR;
1222 ftwbuf->error = ELOOP;
1229 /** Check if the current file is a mount point. */
1230 static bool bftw_is_mount(struct bftw_state *state, const char *name) {
1231 const struct bftw_file *file = state->file;
1236 const struct bftw_file *parent = name ? file : file->parent;
1241 const struct BFTW *ftwbuf = &state->ftwbuf;
1242 const struct bfs_stat *statbuf = bftw_stat(ftwbuf, ftwbuf->stat_flags);
1243 return statbuf && statbuf->dev != parent->dev;
1246 /** Invoke the callback. */
1247 static enum bftw_action bftw_call_back(struct bftw_state *state, const char *name, enum bftw_visit visit) {
1248 if (visit == BFTW_POST && !(state->flags & BFTW_POST_ORDER)) {
1252 if (bftw_build_path(state, name) != 0) {
1256 const struct BFTW *ftwbuf = &state->ftwbuf;
1257 bftw_init_ftwbuf(state, visit);
1259 // Never give the callback BFS_ERROR unless BFTW_RECOVER is specified
1260 if (ftwbuf->type == BFS_ERROR && !(state->flags & BFTW_RECOVER)) {
1261 state->error = ftwbuf->error;
1265 if ((state->flags & BFTW_SKIP_MOUNTS) && bftw_is_mount(state, name)) {
1269 enum bftw_action ret = state->callback(ftwbuf, state->ptr);
1272 if (visit != BFTW_PRE) {
1275 if (ftwbuf->type != BFS_DIR) {
1278 if ((state->flags & BFTW_PRUNE_MOUNTS) && bftw_is_mount(state, name)) {
1287 state->error = EINVAL;
1293 * Flags controlling which files get visited when done with a directory.
1295 enum bftw_gc_flags {
1296 /** Don't visit anything. */
1297 BFTW_VISIT_NONE = 0,
1298 /** Report directory errors. */
1299 BFTW_VISIT_ERROR = 1 << 0,
1300 /** Visit the file itself. */
1301 BFTW_VISIT_FILE = 1 << 1,
1302 /** Visit the file's ancestors. */
1303 BFTW_VISIT_PARENTS = 1 << 2,
1304 /** Visit both the file and its ancestors. */
1305 BFTW_VISIT_ALL = BFTW_VISIT_ERROR | BFTW_VISIT_FILE | BFTW_VISIT_PARENTS,
1308 /** Garbage collect the current file and its parents. */
1309 static int bftw_gc(struct bftw_state *state, enum bftw_gc_flags flags) {
1312 struct bftw_file *file = state->file;
1313 if (file && file->dir) {
1314 bftw_unpin_dir(state, file, true);
1319 if (state->direrror != 0) {
1320 if (flags & BFTW_VISIT_ERROR) {
1321 if (bftw_call_back(state, NULL, BFTW_PRE) == BFTW_STOP) {
1326 state->error = state->direrror;
1329 state->direrror = 0;
1331 while ((file = SLIST_POP(&state->to_close))) {
1332 bftw_unwrapdir(state, file);
1335 enum bftw_gc_flags visit = BFTW_VISIT_FILE;
1336 while ((file = state->file)) {
1337 if (--file->refcount > 0) {
1342 if (flags & visit) {
1343 if (bftw_call_back(state, NULL, BFTW_POST) == BFTW_STOP) {
1348 visit = BFTW_VISIT_PARENTS;
1350 struct bftw_file *parent = file->parent;
1351 if (state->previous == file) {
1352 state->previous = parent;
1354 state->file = parent;
1356 if (file->fd >= 0) {
1357 bftw_close(state, file);
1359 bftw_file_free(&state->cache, file);
1365 /** Sort a bftw_list by filename. */
1366 static void bftw_list_sort(struct bftw_list *list) {
1367 if (!list->head || !list->head->next) {
1371 struct bftw_list left, right;
1376 for (struct bftw_file *hare = list->head; hare && (hare = hare->next); hare = hare->next) {
1377 struct bftw_file *tortoise = SLIST_POP(list);
1378 SLIST_APPEND(&left, tortoise);
1380 SLIST_EXTEND(&right, list);
1383 bftw_list_sort(&left);
1384 bftw_list_sort(&right);
1387 while (!SLIST_EMPTY(&left) && !SLIST_EMPTY(&right)) {
1388 struct bftw_file *lf = left.head;
1389 struct bftw_file *rf = right.head;
1391 if (strcoll(lf->name, rf->name) <= 0) {
1393 SLIST_APPEND(list, lf);
1396 SLIST_APPEND(list, rf);
1399 SLIST_EXTEND(list, &left);
1400 SLIST_EXTEND(list, &right);
1403 /** Finish adding a batch of files. */
1404 static void bftw_batch_finish(struct bftw_state *state) {
1405 if (state->flags & BFTW_SORT) {
1406 bftw_list_sort(&state->file_batch);
1409 if (state->strategy != BFTW_BFS) {
1410 SLIST_EXTEND(&state->dir_batch, &state->to_open);
1411 SLIST_EXTEND(&state->file_batch, &state->to_visit);
1414 SLIST_EXTEND(&state->to_open, &state->dir_batch);
1415 SLIST_EXTEND(&state->to_visit, &state->file_batch);
1417 bftw_ioq_opendirs(state, &state->to_open);
1420 /** Close the current directory. */
1421 static int bftw_closedir(struct bftw_state *state) {
1422 if (bftw_gc(state, BFTW_VISIT_ALL) != 0) {
1426 bftw_batch_finish(state);
1430 /** Fill file identity information from an ftwbuf. */
1431 static void bftw_save_ftwbuf(struct bftw_file *file, const struct BFTW *ftwbuf) {
1432 file->type = ftwbuf->type;
1434 const struct bfs_stat *statbuf = ftwbuf->stat_cache.buf;
1435 if (!statbuf || (ftwbuf->stat_flags & BFS_STAT_NOFOLLOW)) {
1436 statbuf = ftwbuf->lstat_cache.buf;
1439 file->dev = statbuf->dev;
1440 file->ino = statbuf->ino;
1444 /** Visit and/or enqueue the current file. */
1445 static int bftw_visit(struct bftw_state *state, const char *name) {
1446 struct bftw_file *file = state->file;
1448 if (name && (state->flags & BFTW_BUFFER)) {
1449 file = bftw_file_new(&state->cache, file, name);
1451 state->error = errno;
1456 file->type = state->de->type;
1459 SLIST_APPEND(&state->file_batch, file);
1463 switch (bftw_call_back(state, name, BFTW_PRE)) {
1466 file = bftw_file_new(&state->cache, state->file, name);
1471 state->error = errno;
1475 bftw_save_ftwbuf(file, &state->ftwbuf);
1476 bftw_push_dir(state, file);
1480 if (file && !name) {
1481 return bftw_gc(state, BFTW_VISIT_PARENTS);
1492 * Dispose of the bftw() state.
1495 * The bftw() return value.
1497 static int bftw_state_destroy(struct bftw_state *state) {
1498 dstrfree(state->path);
1500 struct ioq *ioq = state->ioq;
1503 while (bftw_ioq_pop(state, true) >= 0);
1507 SLIST_EXTEND(&state->to_open, &state->dir_batch);
1508 SLIST_EXTEND(&state->to_visit, &state->file_batch);
1510 bftw_gc(state, BFTW_VISIT_NONE);
1511 } while (bftw_pop_dir(state) || bftw_pop_file(state));
1515 bftw_cache_destroy(&state->cache);
1517 errno = state->error;
1518 return state->error ? -1 : 0;
1522 * Shared implementation for all search strategies.
1524 static int bftw_impl(struct bftw_state *state) {
1525 for (size_t i = 0; i < state->npaths; ++i) {
1526 if (bftw_visit(state, state->paths[i]) != 0) {
1530 bftw_batch_finish(state);
1533 while (bftw_pop_dir(state)) {
1534 if (bftw_opendir(state) != 0) {
1537 while (bftw_readdir(state) > 0) {
1538 if (bftw_visit(state, state->de->name) != 0) {
1542 if (bftw_closedir(state) != 0) {
1547 if (!bftw_pop_file(state)) {
1550 if (bftw_visit(state, NULL) != 0) {
1559 * bftw() implementation for simple breadth-/depth-first search.
1561 static int bftw_walk(const struct bftw_args *args) {
1562 struct bftw_state state;
1563 if (bftw_state_init(&state, args) != 0) {
1568 return bftw_state_destroy(&state);
1572 * Iterative deepening search state.
1574 struct bftw_ids_state {
1575 /** Nested walk state. */
1576 struct bftw_state nested;
1577 /** The wrapped callback. */
1578 bftw_callback *delegate;
1579 /** The wrapped callback arguments. */
1581 /** Which visit this search corresponds to. */
1582 enum bftw_visit visit;
1583 /** Whether to override the bftw_visit. */
1585 /** The current minimum depth (inclusive). */
1587 /** The current maximum depth (exclusive). */
1589 /** The set of pruned paths. */
1591 /** Whether the bottom has been found. */
1595 /** Iterative deepening callback function. */
1596 static enum bftw_action bftw_ids_callback(const struct BFTW *ftwbuf, void *ptr) {
1597 struct bftw_ids_state *state = ptr;
1599 if (state->force_visit) {
1600 struct BFTW *mutbuf = (struct BFTW *)ftwbuf;
1601 mutbuf->visit = state->visit;
1604 if (ftwbuf->type == BFS_ERROR) {
1605 if (ftwbuf->depth + 1 >= state->min_depth) {
1606 return state->delegate(ftwbuf, state->ptr);
1612 if (ftwbuf->depth < state->min_depth) {
1613 if (trie_find_str(&state->pruned, ftwbuf->path)) {
1616 return BFTW_CONTINUE;
1618 } else if (state->visit == BFTW_POST) {
1619 if (trie_find_str(&state->pruned, ftwbuf->path)) {
1624 enum bftw_action ret = BFTW_CONTINUE;
1625 if (ftwbuf->visit == state->visit) {
1626 ret = state->delegate(ftwbuf, state->ptr);
1631 if (ftwbuf->type == BFS_DIR && ftwbuf->depth + 1 >= state->max_depth) {
1632 state->bottom = false;
1638 if (ftwbuf->type == BFS_DIR) {
1639 if (!trie_insert_str(&state->pruned, ftwbuf->path)) {
1640 state->nested.error = errno;
1653 /** Initialize iterative deepening state. */
1654 static int bftw_ids_init(struct bftw_ids_state *state, const struct bftw_args *args) {
1655 state->delegate = args->callback;
1656 state->ptr = args->ptr;
1657 state->visit = BFTW_PRE;
1658 state->force_visit = false;
1659 state->min_depth = 0;
1660 state->max_depth = 1;
1661 trie_init(&state->pruned);
1662 state->bottom = false;
1664 struct bftw_args ids_args = *args;
1665 ids_args.callback = bftw_ids_callback;
1666 ids_args.ptr = state;
1667 ids_args.flags &= ~BFTW_POST_ORDER;
1668 return bftw_state_init(&state->nested, &ids_args);
1671 /** Finish an iterative deepening search. */
1672 static int bftw_ids_destroy(struct bftw_ids_state *state) {
1673 trie_destroy(&state->pruned);
1674 return bftw_state_destroy(&state->nested);
1678 * Iterative deepening bftw() wrapper.
1680 static int bftw_ids(const struct bftw_args *args) {
1681 struct bftw_ids_state state;
1682 if (bftw_ids_init(&state, args) != 0) {
1686 while (!state.bottom) {
1687 state.bottom = true;
1689 if (bftw_impl(&state.nested) != 0) {
1697 if (args->flags & BFTW_POST_ORDER) {
1698 state.visit = BFTW_POST;
1699 state.force_visit = true;
1701 while (state.min_depth > 0) {
1705 if (bftw_impl(&state.nested) != 0) {
1712 return bftw_ids_destroy(&state);
1716 * Exponential deepening bftw() wrapper.
1718 static int bftw_eds(const struct bftw_args *args) {
1719 struct bftw_ids_state state;
1720 if (bftw_ids_init(&state, args) != 0) {
1724 while (!state.bottom) {
1725 state.bottom = true;
1727 if (bftw_impl(&state.nested) != 0) {
1731 state.min_depth = state.max_depth;
1732 state.max_depth *= 2;
1735 if (args->flags & BFTW_POST_ORDER) {
1736 state.visit = BFTW_POST;
1737 state.min_depth = 0;
1738 state.nested.flags |= BFTW_POST_ORDER;
1740 bftw_impl(&state.nested);
1744 return bftw_ids_destroy(&state);
1747 int bftw(const struct bftw_args *args) {
1748 switch (args->strategy) {
1751 return bftw_walk(args);
1753 return bftw_ids(args);
1755 return bftw_eds(args);