dir.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. /*
  2. * dir.c - NILFS directory entry operations
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * Modified for NILFS by Amagai Yoshiji.
  17. */
  18. /*
  19. * linux/fs/ext2/dir.c
  20. *
  21. * Copyright (C) 1992, 1993, 1994, 1995
  22. * Remy Card (card@masi.ibp.fr)
  23. * Laboratoire MASI - Institut Blaise Pascal
  24. * Universite Pierre et Marie Curie (Paris VI)
  25. *
  26. * from
  27. *
  28. * linux/fs/minix/dir.c
  29. *
  30. * Copyright (C) 1991, 1992 Linus Torvalds
  31. *
  32. * ext2 directory handling functions
  33. *
  34. * Big-endian to little-endian byte-swapping/bitmaps by
  35. * David S. Miller (davem@caip.rutgers.edu), 1995
  36. *
  37. * All code that works with directory layout had been switched to pagecache
  38. * and moved here. AV
  39. */
  40. #include <linux/pagemap.h>
  41. #include "nilfs.h"
  42. #include "page.h"
  43. /*
  44. * nilfs uses block-sized chunks. Arguably, sector-sized ones would be
  45. * more robust, but we have what we have
  46. */
  47. static inline unsigned int nilfs_chunk_size(struct inode *inode)
  48. {
  49. return inode->i_sb->s_blocksize;
  50. }
  51. static inline void nilfs_put_page(struct page *page)
  52. {
  53. kunmap(page);
  54. put_page(page);
  55. }
  56. /*
  57. * Return the offset into page `page_nr' of the last valid
  58. * byte in that page, plus one.
  59. */
  60. static unsigned int nilfs_last_byte(struct inode *inode, unsigned long page_nr)
  61. {
  62. unsigned int last_byte = inode->i_size;
  63. last_byte -= page_nr << PAGE_SHIFT;
  64. if (last_byte > PAGE_SIZE)
  65. last_byte = PAGE_SIZE;
  66. return last_byte;
  67. }
  68. static int nilfs_prepare_chunk(struct page *page, unsigned int from,
  69. unsigned int to)
  70. {
  71. loff_t pos = page_offset(page) + from;
  72. return __block_write_begin(page, pos, to - from, nilfs_get_block);
  73. }
  74. static void nilfs_commit_chunk(struct page *page,
  75. struct address_space *mapping,
  76. unsigned int from, unsigned int to)
  77. {
  78. struct inode *dir = mapping->host;
  79. loff_t pos = page_offset(page) + from;
  80. unsigned int len = to - from;
  81. unsigned int nr_dirty, copied;
  82. int err;
  83. nr_dirty = nilfs_page_count_clean_buffers(page, from, to);
  84. copied = block_write_end(NULL, mapping, pos, len, len, page, NULL);
  85. if (pos + copied > dir->i_size)
  86. i_size_write(dir, pos + copied);
  87. if (IS_DIRSYNC(dir))
  88. nilfs_set_transaction_flag(NILFS_TI_SYNC);
  89. err = nilfs_set_file_dirty(dir, nr_dirty);
  90. WARN_ON(err); /* do not happen */
  91. unlock_page(page);
  92. }
  93. static bool nilfs_check_page(struct page *page)
  94. {
  95. struct inode *dir = page->mapping->host;
  96. struct super_block *sb = dir->i_sb;
  97. unsigned int chunk_size = nilfs_chunk_size(dir);
  98. char *kaddr = page_address(page);
  99. unsigned int offs, rec_len;
  100. unsigned int limit = PAGE_SIZE;
  101. struct nilfs_dir_entry *p;
  102. char *error;
  103. if ((dir->i_size >> PAGE_SHIFT) == page->index) {
  104. limit = dir->i_size & ~PAGE_MASK;
  105. if (limit & (chunk_size - 1))
  106. goto Ebadsize;
  107. if (!limit)
  108. goto out;
  109. }
  110. for (offs = 0; offs <= limit - NILFS_DIR_REC_LEN(1); offs += rec_len) {
  111. p = (struct nilfs_dir_entry *)(kaddr + offs);
  112. rec_len = nilfs_rec_len_from_disk(p->rec_len);
  113. if (rec_len < NILFS_DIR_REC_LEN(1))
  114. goto Eshort;
  115. if (rec_len & 3)
  116. goto Ealign;
  117. if (rec_len < NILFS_DIR_REC_LEN(p->name_len))
  118. goto Enamelen;
  119. if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
  120. goto Espan;
  121. }
  122. if (offs != limit)
  123. goto Eend;
  124. out:
  125. SetPageChecked(page);
  126. return true;
  127. /* Too bad, we had an error */
  128. Ebadsize:
  129. nilfs_error(sb, "nilfs_check_page",
  130. "size of directory #%lu is not a multiple of chunk size",
  131. dir->i_ino
  132. );
  133. goto fail;
  134. Eshort:
  135. error = "rec_len is smaller than minimal";
  136. goto bad_entry;
  137. Ealign:
  138. error = "unaligned directory entry";
  139. goto bad_entry;
  140. Enamelen:
  141. error = "rec_len is too small for name_len";
  142. goto bad_entry;
  143. Espan:
  144. error = "directory entry across blocks";
  145. bad_entry:
  146. nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - "
  147. "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
  148. dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
  149. (unsigned long) le64_to_cpu(p->inode),
  150. rec_len, p->name_len);
  151. goto fail;
  152. Eend:
  153. p = (struct nilfs_dir_entry *)(kaddr + offs);
  154. nilfs_error(sb, "nilfs_check_page",
  155. "entry in directory #%lu spans the page boundary"
  156. "offset=%lu, inode=%lu",
  157. dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
  158. (unsigned long) le64_to_cpu(p->inode));
  159. fail:
  160. SetPageError(page);
  161. return false;
  162. }
  163. static struct page *nilfs_get_page(struct inode *dir, unsigned long n)
  164. {
  165. struct address_space *mapping = dir->i_mapping;
  166. struct page *page = read_mapping_page(mapping, n, NULL);
  167. if (!IS_ERR(page)) {
  168. kmap(page);
  169. if (unlikely(!PageChecked(page))) {
  170. if (PageError(page) || !nilfs_check_page(page))
  171. goto fail;
  172. }
  173. }
  174. return page;
  175. fail:
  176. nilfs_put_page(page);
  177. return ERR_PTR(-EIO);
  178. }
  179. /*
  180. * NOTE! unlike strncmp, nilfs_match returns 1 for success, 0 for failure.
  181. *
  182. * len <= NILFS_NAME_LEN and de != NULL are guaranteed by caller.
  183. */
  184. static int
  185. nilfs_match(int len, const unsigned char *name, struct nilfs_dir_entry *de)
  186. {
  187. if (len != de->name_len)
  188. return 0;
  189. if (!de->inode)
  190. return 0;
  191. return !memcmp(name, de->name, len);
  192. }
  193. /*
  194. * p is at least 6 bytes before the end of page
  195. */
  196. static struct nilfs_dir_entry *nilfs_next_entry(struct nilfs_dir_entry *p)
  197. {
  198. return (struct nilfs_dir_entry *)((char *)p +
  199. nilfs_rec_len_from_disk(p->rec_len));
  200. }
  201. static unsigned char
  202. nilfs_filetype_table[NILFS_FT_MAX] = {
  203. [NILFS_FT_UNKNOWN] = DT_UNKNOWN,
  204. [NILFS_FT_REG_FILE] = DT_REG,
  205. [NILFS_FT_DIR] = DT_DIR,
  206. [NILFS_FT_CHRDEV] = DT_CHR,
  207. [NILFS_FT_BLKDEV] = DT_BLK,
  208. [NILFS_FT_FIFO] = DT_FIFO,
  209. [NILFS_FT_SOCK] = DT_SOCK,
  210. [NILFS_FT_SYMLINK] = DT_LNK,
  211. };
  212. #define S_SHIFT 12
  213. static unsigned char
  214. nilfs_type_by_mode[S_IFMT >> S_SHIFT] = {
  215. [S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE,
  216. [S_IFDIR >> S_SHIFT] = NILFS_FT_DIR,
  217. [S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV,
  218. [S_IFBLK >> S_SHIFT] = NILFS_FT_BLKDEV,
  219. [S_IFIFO >> S_SHIFT] = NILFS_FT_FIFO,
  220. [S_IFSOCK >> S_SHIFT] = NILFS_FT_SOCK,
  221. [S_IFLNK >> S_SHIFT] = NILFS_FT_SYMLINK,
  222. };
  223. static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode)
  224. {
  225. umode_t mode = inode->i_mode;
  226. de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
  227. }
  228. static int nilfs_readdir(struct file *file, struct dir_context *ctx)
  229. {
  230. loff_t pos = ctx->pos;
  231. struct inode *inode = file_inode(file);
  232. struct super_block *sb = inode->i_sb;
  233. unsigned int offset = pos & ~PAGE_MASK;
  234. unsigned long n = pos >> PAGE_SHIFT;
  235. unsigned long npages = dir_pages(inode);
  236. if (pos > inode->i_size - NILFS_DIR_REC_LEN(1))
  237. return 0;
  238. for ( ; n < npages; n++, offset = 0) {
  239. char *kaddr, *limit;
  240. struct nilfs_dir_entry *de;
  241. struct page *page = nilfs_get_page(inode, n);
  242. if (IS_ERR(page)) {
  243. nilfs_error(sb, __func__, "bad page in #%lu",
  244. inode->i_ino);
  245. ctx->pos += PAGE_SIZE - offset;
  246. return -EIO;
  247. }
  248. kaddr = page_address(page);
  249. de = (struct nilfs_dir_entry *)(kaddr + offset);
  250. limit = kaddr + nilfs_last_byte(inode, n) -
  251. NILFS_DIR_REC_LEN(1);
  252. for ( ; (char *)de <= limit; de = nilfs_next_entry(de)) {
  253. if (de->rec_len == 0) {
  254. nilfs_error(sb, __func__,
  255. "zero-length directory entry");
  256. nilfs_put_page(page);
  257. return -EIO;
  258. }
  259. if (de->inode) {
  260. unsigned char t;
  261. if (de->file_type < NILFS_FT_MAX)
  262. t = nilfs_filetype_table[de->file_type];
  263. else
  264. t = DT_UNKNOWN;
  265. if (!dir_emit(ctx, de->name, de->name_len,
  266. le64_to_cpu(de->inode), t)) {
  267. nilfs_put_page(page);
  268. return 0;
  269. }
  270. }
  271. ctx->pos += nilfs_rec_len_from_disk(de->rec_len);
  272. }
  273. nilfs_put_page(page);
  274. }
  275. return 0;
  276. }
  277. /*
  278. * nilfs_find_entry()
  279. *
  280. * finds an entry in the specified directory with the wanted name. It
  281. * returns the page in which the entry was found, and the entry itself
  282. * (as a parameter - res_dir). Page is returned mapped and unlocked.
  283. * Entry is guaranteed to be valid.
  284. */
  285. struct nilfs_dir_entry *
  286. nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
  287. struct page **res_page)
  288. {
  289. const unsigned char *name = qstr->name;
  290. int namelen = qstr->len;
  291. unsigned int reclen = NILFS_DIR_REC_LEN(namelen);
  292. unsigned long start, n;
  293. unsigned long npages = dir_pages(dir);
  294. struct page *page = NULL;
  295. struct nilfs_inode_info *ei = NILFS_I(dir);
  296. struct nilfs_dir_entry *de;
  297. if (npages == 0)
  298. goto out;
  299. /* OFFSET_CACHE */
  300. *res_page = NULL;
  301. start = ei->i_dir_start_lookup;
  302. if (start >= npages)
  303. start = 0;
  304. n = start;
  305. do {
  306. char *kaddr;
  307. page = nilfs_get_page(dir, n);
  308. if (!IS_ERR(page)) {
  309. kaddr = page_address(page);
  310. de = (struct nilfs_dir_entry *)kaddr;
  311. kaddr += nilfs_last_byte(dir, n) - reclen;
  312. while ((char *) de <= kaddr) {
  313. if (de->rec_len == 0) {
  314. nilfs_error(dir->i_sb, __func__,
  315. "zero-length directory entry");
  316. nilfs_put_page(page);
  317. goto out;
  318. }
  319. if (nilfs_match(namelen, name, de))
  320. goto found;
  321. de = nilfs_next_entry(de);
  322. }
  323. nilfs_put_page(page);
  324. }
  325. if (++n >= npages)
  326. n = 0;
  327. /* next page is past the blocks we've got */
  328. if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
  329. nilfs_error(dir->i_sb, __func__,
  330. "dir %lu size %lld exceeds block count %llu",
  331. dir->i_ino, dir->i_size,
  332. (unsigned long long)dir->i_blocks);
  333. goto out;
  334. }
  335. } while (n != start);
  336. out:
  337. return NULL;
  338. found:
  339. *res_page = page;
  340. ei->i_dir_start_lookup = n;
  341. return de;
  342. }
  343. struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
  344. {
  345. struct page *page = nilfs_get_page(dir, 0);
  346. struct nilfs_dir_entry *de = NULL;
  347. if (!IS_ERR(page)) {
  348. de = nilfs_next_entry(
  349. (struct nilfs_dir_entry *)page_address(page));
  350. *p = page;
  351. }
  352. return de;
  353. }
  354. ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
  355. {
  356. ino_t res = 0;
  357. struct nilfs_dir_entry *de;
  358. struct page *page;
  359. de = nilfs_find_entry(dir, qstr, &page);
  360. if (de) {
  361. res = le64_to_cpu(de->inode);
  362. kunmap(page);
  363. put_page(page);
  364. }
  365. return res;
  366. }
  367. /* Releases the page */
  368. void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
  369. struct page *page, struct inode *inode)
  370. {
  371. unsigned int from = (char *)de - (char *)page_address(page);
  372. unsigned int to = from + nilfs_rec_len_from_disk(de->rec_len);
  373. struct address_space *mapping = page->mapping;
  374. int err;
  375. lock_page(page);
  376. err = nilfs_prepare_chunk(page, from, to);
  377. BUG_ON(err);
  378. de->inode = cpu_to_le64(inode->i_ino);
  379. nilfs_set_de_type(de, inode);
  380. nilfs_commit_chunk(page, mapping, from, to);
  381. nilfs_put_page(page);
  382. dir->i_mtime = dir->i_ctime = CURRENT_TIME;
  383. }
  384. /*
  385. * Parent is locked.
  386. */
  387. int nilfs_add_link(struct dentry *dentry, struct inode *inode)
  388. {
  389. struct inode *dir = d_inode(dentry->d_parent);
  390. const unsigned char *name = dentry->d_name.name;
  391. int namelen = dentry->d_name.len;
  392. unsigned int chunk_size = nilfs_chunk_size(dir);
  393. unsigned int reclen = NILFS_DIR_REC_LEN(namelen);
  394. unsigned short rec_len, name_len;
  395. struct page *page = NULL;
  396. struct nilfs_dir_entry *de;
  397. unsigned long npages = dir_pages(dir);
  398. unsigned long n;
  399. char *kaddr;
  400. unsigned int from, to;
  401. int err;
  402. /*
  403. * We take care of directory expansion in the same loop.
  404. * This code plays outside i_size, so it locks the page
  405. * to protect that region.
  406. */
  407. for (n = 0; n <= npages; n++) {
  408. char *dir_end;
  409. page = nilfs_get_page(dir, n);
  410. err = PTR_ERR(page);
  411. if (IS_ERR(page))
  412. goto out;
  413. lock_page(page);
  414. kaddr = page_address(page);
  415. dir_end = kaddr + nilfs_last_byte(dir, n);
  416. de = (struct nilfs_dir_entry *)kaddr;
  417. kaddr += PAGE_SIZE - reclen;
  418. while ((char *)de <= kaddr) {
  419. if ((char *)de == dir_end) {
  420. /* We hit i_size */
  421. name_len = 0;
  422. rec_len = chunk_size;
  423. de->rec_len = nilfs_rec_len_to_disk(chunk_size);
  424. de->inode = 0;
  425. goto got_it;
  426. }
  427. if (de->rec_len == 0) {
  428. nilfs_error(dir->i_sb, __func__,
  429. "zero-length directory entry");
  430. err = -EIO;
  431. goto out_unlock;
  432. }
  433. err = -EEXIST;
  434. if (nilfs_match(namelen, name, de))
  435. goto out_unlock;
  436. name_len = NILFS_DIR_REC_LEN(de->name_len);
  437. rec_len = nilfs_rec_len_from_disk(de->rec_len);
  438. if (!de->inode && rec_len >= reclen)
  439. goto got_it;
  440. if (rec_len >= name_len + reclen)
  441. goto got_it;
  442. de = (struct nilfs_dir_entry *)((char *)de + rec_len);
  443. }
  444. unlock_page(page);
  445. nilfs_put_page(page);
  446. }
  447. BUG();
  448. return -EINVAL;
  449. got_it:
  450. from = (char *)de - (char *)page_address(page);
  451. to = from + rec_len;
  452. err = nilfs_prepare_chunk(page, from, to);
  453. if (err)
  454. goto out_unlock;
  455. if (de->inode) {
  456. struct nilfs_dir_entry *de1;
  457. de1 = (struct nilfs_dir_entry *)((char *)de + name_len);
  458. de1->rec_len = nilfs_rec_len_to_disk(rec_len - name_len);
  459. de->rec_len = nilfs_rec_len_to_disk(name_len);
  460. de = de1;
  461. }
  462. de->name_len = namelen;
  463. memcpy(de->name, name, namelen);
  464. de->inode = cpu_to_le64(inode->i_ino);
  465. nilfs_set_de_type(de, inode);
  466. nilfs_commit_chunk(page, page->mapping, from, to);
  467. dir->i_mtime = dir->i_ctime = CURRENT_TIME;
  468. nilfs_mark_inode_dirty(dir);
  469. /* OFFSET_CACHE */
  470. out_put:
  471. nilfs_put_page(page);
  472. out:
  473. return err;
  474. out_unlock:
  475. unlock_page(page);
  476. goto out_put;
  477. }
  478. /*
  479. * nilfs_delete_entry deletes a directory entry by merging it with the
  480. * previous entry. Page is up-to-date. Releases the page.
  481. */
  482. int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
  483. {
  484. struct address_space *mapping = page->mapping;
  485. struct inode *inode = mapping->host;
  486. char *kaddr = page_address(page);
  487. unsigned int from, to;
  488. struct nilfs_dir_entry *de, *pde = NULL;
  489. int err;
  490. from = ((char *)dir - kaddr) & ~(nilfs_chunk_size(inode) - 1);
  491. to = ((char *)dir - kaddr) + nilfs_rec_len_from_disk(dir->rec_len);
  492. de = (struct nilfs_dir_entry *)(kaddr + from);
  493. while ((char *)de < (char *)dir) {
  494. if (de->rec_len == 0) {
  495. nilfs_error(inode->i_sb, __func__,
  496. "zero-length directory entry");
  497. err = -EIO;
  498. goto out;
  499. }
  500. pde = de;
  501. de = nilfs_next_entry(de);
  502. }
  503. if (pde)
  504. from = (char *)pde - (char *)page_address(page);
  505. lock_page(page);
  506. err = nilfs_prepare_chunk(page, from, to);
  507. BUG_ON(err);
  508. if (pde)
  509. pde->rec_len = nilfs_rec_len_to_disk(to - from);
  510. dir->inode = 0;
  511. nilfs_commit_chunk(page, mapping, from, to);
  512. inode->i_ctime = inode->i_mtime = CURRENT_TIME;
  513. out:
  514. nilfs_put_page(page);
  515. return err;
  516. }
  517. /*
  518. * Set the first fragment of directory.
  519. */
  520. int nilfs_make_empty(struct inode *inode, struct inode *parent)
  521. {
  522. struct address_space *mapping = inode->i_mapping;
  523. struct page *page = grab_cache_page(mapping, 0);
  524. unsigned int chunk_size = nilfs_chunk_size(inode);
  525. struct nilfs_dir_entry *de;
  526. int err;
  527. void *kaddr;
  528. if (!page)
  529. return -ENOMEM;
  530. err = nilfs_prepare_chunk(page, 0, chunk_size);
  531. if (unlikely(err)) {
  532. unlock_page(page);
  533. goto fail;
  534. }
  535. kaddr = kmap_atomic(page);
  536. memset(kaddr, 0, chunk_size);
  537. de = (struct nilfs_dir_entry *)kaddr;
  538. de->name_len = 1;
  539. de->rec_len = nilfs_rec_len_to_disk(NILFS_DIR_REC_LEN(1));
  540. memcpy(de->name, ".\0\0", 4);
  541. de->inode = cpu_to_le64(inode->i_ino);
  542. nilfs_set_de_type(de, inode);
  543. de = (struct nilfs_dir_entry *)(kaddr + NILFS_DIR_REC_LEN(1));
  544. de->name_len = 2;
  545. de->rec_len = nilfs_rec_len_to_disk(chunk_size - NILFS_DIR_REC_LEN(1));
  546. de->inode = cpu_to_le64(parent->i_ino);
  547. memcpy(de->name, "..\0", 4);
  548. nilfs_set_de_type(de, inode);
  549. kunmap_atomic(kaddr);
  550. nilfs_commit_chunk(page, mapping, 0, chunk_size);
  551. fail:
  552. put_page(page);
  553. return err;
  554. }
  555. /*
  556. * routine to check that the specified directory is empty (for rmdir)
  557. */
  558. int nilfs_empty_dir(struct inode *inode)
  559. {
  560. struct page *page = NULL;
  561. unsigned long i, npages = dir_pages(inode);
  562. for (i = 0; i < npages; i++) {
  563. char *kaddr;
  564. struct nilfs_dir_entry *de;
  565. page = nilfs_get_page(inode, i);
  566. if (IS_ERR(page))
  567. continue;
  568. kaddr = page_address(page);
  569. de = (struct nilfs_dir_entry *)kaddr;
  570. kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1);
  571. while ((char *)de <= kaddr) {
  572. if (de->rec_len == 0) {
  573. nilfs_error(inode->i_sb, __func__,
  574. "zero-length directory entry (kaddr=%p, de=%p)",
  575. kaddr, de);
  576. goto not_empty;
  577. }
  578. if (de->inode != 0) {
  579. /* check for . and .. */
  580. if (de->name[0] != '.')
  581. goto not_empty;
  582. if (de->name_len > 2)
  583. goto not_empty;
  584. if (de->name_len < 2) {
  585. if (de->inode !=
  586. cpu_to_le64(inode->i_ino))
  587. goto not_empty;
  588. } else if (de->name[1] != '.')
  589. goto not_empty;
  590. }
  591. de = nilfs_next_entry(de);
  592. }
  593. nilfs_put_page(page);
  594. }
  595. return 1;
  596. not_empty:
  597. nilfs_put_page(page);
  598. return 0;
  599. }
  600. const struct file_operations nilfs_dir_operations = {
  601. .llseek = generic_file_llseek,
  602. .read = generic_read_dir,
  603. .iterate_shared = nilfs_readdir,
  604. .unlocked_ioctl = nilfs_ioctl,
  605. #ifdef CONFIG_COMPAT
  606. .compat_ioctl = nilfs_compat_ioctl,
  607. #endif /* CONFIG_COMPAT */
  608. .fsync = nilfs_sync_file,
  609. };