direct.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072
  1. /*
  2. * linux/fs/nfs/direct.c
  3. *
  4. * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
  5. *
  6. * High-performance uncached I/O for the Linux NFS client
  7. *
  8. * There are important applications whose performance or correctness
  9. * depends on uncached access to file data. Database clusters
  10. * (multiple copies of the same instance running on separate hosts)
  11. * implement their own cache coherency protocol that subsumes file
  12. * system cache protocols. Applications that process datasets
  13. * considerably larger than the client's memory do not always benefit
  14. * from a local cache. A streaming video server, for instance, has no
  15. * need to cache the contents of a file.
  16. *
  17. * When an application requests uncached I/O, all read and write requests
  18. * are made directly to the server; data stored or fetched via these
  19. * requests is not cached in the Linux page cache. The client does not
  20. * correct unaligned requests from applications. All requested bytes are
  21. * held on permanent storage before a direct write system call returns to
  22. * an application.
  23. *
  24. * Solaris implements an uncached I/O facility called directio() that
  25. * is used for backups and sequential I/O to very large files. Solaris
  26. * also supports uncaching whole NFS partitions with "-o forcedirectio,"
  27. * an undocumented mount option.
  28. *
  29. * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
  30. * help from Andrew Morton.
  31. *
  32. * 18 Dec 2001 Initial implementation for 2.4 --cel
  33. * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
  34. * 08 Jun 2003 Port to 2.5 APIs --cel
  35. * 31 Mar 2004 Handle direct I/O without VFS support --cel
  36. * 15 Sep 2004 Parallel async reads --cel
  37. * 04 May 2005 support O_DIRECT with aio --cel
  38. *
  39. */
  40. #include <linux/errno.h>
  41. #include <linux/sched.h>
  42. #include <linux/kernel.h>
  43. #include <linux/file.h>
  44. #include <linux/pagemap.h>
  45. #include <linux/kref.h>
  46. #include <linux/slab.h>
  47. #include <linux/task_io_accounting_ops.h>
  48. #include <linux/module.h>
  49. #include <linux/nfs_fs.h>
  50. #include <linux/nfs_page.h>
  51. #include <linux/sunrpc/clnt.h>
  52. #include <asm/uaccess.h>
  53. #include <linux/atomic.h>
  54. #include "internal.h"
  55. #include "iostat.h"
  56. #include "pnfs.h"
  57. #define NFSDBG_FACILITY NFSDBG_VFS
  58. static struct kmem_cache *nfs_direct_cachep;
  59. /*
  60. * This represents a set of asynchronous requests that we're waiting on
  61. */
  62. struct nfs_direct_mirror {
  63. ssize_t count;
  64. };
  65. struct nfs_direct_req {
  66. struct kref kref; /* release manager */
  67. /* I/O parameters */
  68. struct nfs_open_context *ctx; /* file open context info */
  69. struct nfs_lock_context *l_ctx; /* Lock context info */
  70. struct kiocb * iocb; /* controlling i/o request */
  71. struct inode * inode; /* target file of i/o */
  72. /* completion state */
  73. atomic_t io_count; /* i/os we're waiting for */
  74. spinlock_t lock; /* protect completion state */
  75. struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
  76. int mirror_count;
  77. ssize_t count, /* bytes actually processed */
  78. max_count, /* max expected count */
  79. bytes_left, /* bytes left to be sent */
  80. io_start, /* start of IO */
  81. error; /* any reported error */
  82. struct completion completion; /* wait for i/o completion */
  83. /* commit state */
  84. struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */
  85. struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
  86. struct work_struct work;
  87. int flags;
  88. #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
  89. #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
  90. struct nfs_writeverf verf; /* unstable write verifier */
  91. };
  92. static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
  93. static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
  94. static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
  95. static void nfs_direct_write_schedule_work(struct work_struct *work);
  96. static inline void get_dreq(struct nfs_direct_req *dreq)
  97. {
  98. atomic_inc(&dreq->io_count);
  99. }
  100. static inline int put_dreq(struct nfs_direct_req *dreq)
  101. {
  102. return atomic_dec_and_test(&dreq->io_count);
  103. }
  104. static void
  105. nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
  106. {
  107. int i;
  108. ssize_t count;
  109. WARN_ON_ONCE(dreq->count >= dreq->max_count);
  110. if (dreq->mirror_count == 1) {
  111. dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
  112. dreq->count += hdr->good_bytes;
  113. } else {
  114. /* mirrored writes */
  115. count = dreq->mirrors[hdr->pgio_mirror_idx].count;
  116. if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
  117. count = hdr->io_start + hdr->good_bytes - dreq->io_start;
  118. dreq->mirrors[hdr->pgio_mirror_idx].count = count;
  119. }
  120. /* update the dreq->count by finding the minimum agreed count from all
  121. * mirrors */
  122. count = dreq->mirrors[0].count;
  123. for (i = 1; i < dreq->mirror_count; i++)
  124. count = min(count, dreq->mirrors[i].count);
  125. dreq->count = count;
  126. }
  127. }
  128. /*
  129. * nfs_direct_select_verf - select the right verifier
  130. * @dreq - direct request possibly spanning multiple servers
  131. * @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs
  132. * @commit_idx - commit bucket index for the DS
  133. *
  134. * returns the correct verifier to use given the role of the server
  135. */
  136. static struct nfs_writeverf *
  137. nfs_direct_select_verf(struct nfs_direct_req *dreq,
  138. struct nfs_client *ds_clp,
  139. int commit_idx)
  140. {
  141. struct nfs_writeverf *verfp = &dreq->verf;
  142. #ifdef CONFIG_NFS_V4_1
  143. /*
  144. * pNFS is in use, use the DS verf except commit_through_mds is set
  145. * for layout segment where nbuckets is zero.
  146. */
  147. if (ds_clp && dreq->ds_cinfo.nbuckets > 0) {
  148. if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
  149. verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
  150. else
  151. WARN_ON_ONCE(1);
  152. }
  153. #endif
  154. return verfp;
  155. }
  156. /*
  157. * nfs_direct_set_hdr_verf - set the write/commit verifier
  158. * @dreq - direct request possibly spanning multiple servers
  159. * @hdr - pageio header to validate against previously seen verfs
  160. *
  161. * Set the server's (MDS or DS) "seen" verifier
  162. */
  163. static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
  164. struct nfs_pgio_header *hdr)
  165. {
  166. struct nfs_writeverf *verfp;
  167. verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
  168. WARN_ON_ONCE(verfp->committed >= 0);
  169. memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
  170. WARN_ON_ONCE(verfp->committed < 0);
  171. }
  172. static int nfs_direct_cmp_verf(const struct nfs_writeverf *v1,
  173. const struct nfs_writeverf *v2)
  174. {
  175. return nfs_write_verifier_cmp(&v1->verifier, &v2->verifier);
  176. }
  177. /*
  178. * nfs_direct_cmp_hdr_verf - compare verifier for pgio header
  179. * @dreq - direct request possibly spanning multiple servers
  180. * @hdr - pageio header to validate against previously seen verf
  181. *
  182. * set the server's "seen" verf if not initialized.
  183. * returns result of comparison between @hdr->verf and the "seen"
  184. * verf of the server used by @hdr (DS or MDS)
  185. */
  186. static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
  187. struct nfs_pgio_header *hdr)
  188. {
  189. struct nfs_writeverf *verfp;
  190. verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
  191. if (verfp->committed < 0) {
  192. nfs_direct_set_hdr_verf(dreq, hdr);
  193. return 0;
  194. }
  195. return nfs_direct_cmp_verf(verfp, &hdr->verf);
  196. }
  197. /*
  198. * nfs_direct_cmp_commit_data_verf - compare verifier for commit data
  199. * @dreq - direct request possibly spanning multiple servers
  200. * @data - commit data to validate against previously seen verf
  201. *
  202. * returns result of comparison between @data->verf and the verf of
  203. * the server used by @data (DS or MDS)
  204. */
  205. static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
  206. struct nfs_commit_data *data)
  207. {
  208. struct nfs_writeverf *verfp;
  209. verfp = nfs_direct_select_verf(dreq, data->ds_clp,
  210. data->ds_commit_index);
  211. /* verifier not set so always fail */
  212. if (verfp->committed < 0)
  213. return 1;
  214. return nfs_direct_cmp_verf(verfp, &data->verf);
  215. }
  216. /**
  217. * nfs_direct_IO - NFS address space operation for direct I/O
  218. * @iocb: target I/O control block
  219. * @iter: I/O buffer
  220. *
  221. * The presence of this routine in the address space ops vector means
  222. * the NFS client supports direct I/O. However, for most direct IO, we
  223. * shunt off direct read and write requests before the VFS gets them,
  224. * so this method is only ever called for swap.
  225. */
  226. ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
  227. {
  228. struct inode *inode = iocb->ki_filp->f_mapping->host;
  229. /* we only support swap file calling nfs_direct_IO */
  230. if (!IS_SWAPFILE(inode))
  231. return 0;
  232. VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
  233. if (iov_iter_rw(iter) == READ)
  234. return nfs_file_direct_read(iocb, iter);
  235. return nfs_file_direct_write(iocb, iter);
  236. }
  237. static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
  238. {
  239. unsigned int i;
  240. for (i = 0; i < npages; i++)
  241. put_page(pages[i]);
  242. }
  243. void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
  244. struct nfs_direct_req *dreq)
  245. {
  246. cinfo->inode = dreq->inode;
  247. cinfo->mds = &dreq->mds_cinfo;
  248. cinfo->ds = &dreq->ds_cinfo;
  249. cinfo->dreq = dreq;
  250. cinfo->completion_ops = &nfs_direct_commit_completion_ops;
  251. }
  252. static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
  253. struct nfs_pageio_descriptor *pgio,
  254. struct nfs_page *req)
  255. {
  256. int mirror_count = 1;
  257. if (pgio->pg_ops->pg_get_mirror_count)
  258. mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
  259. dreq->mirror_count = mirror_count;
  260. }
  261. static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
  262. {
  263. struct nfs_direct_req *dreq;
  264. dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
  265. if (!dreq)
  266. return NULL;
  267. kref_init(&dreq->kref);
  268. kref_get(&dreq->kref);
  269. init_completion(&dreq->completion);
  270. INIT_LIST_HEAD(&dreq->mds_cinfo.list);
  271. dreq->verf.committed = NFS_INVALID_STABLE_HOW; /* not set yet */
  272. INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
  273. dreq->mirror_count = 1;
  274. spin_lock_init(&dreq->lock);
  275. return dreq;
  276. }
  277. static void nfs_direct_req_free(struct kref *kref)
  278. {
  279. struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
  280. nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
  281. if (dreq->l_ctx != NULL)
  282. nfs_put_lock_context(dreq->l_ctx);
  283. if (dreq->ctx != NULL)
  284. put_nfs_open_context(dreq->ctx);
  285. kmem_cache_free(nfs_direct_cachep, dreq);
  286. }
  287. static void nfs_direct_req_release(struct nfs_direct_req *dreq)
  288. {
  289. kref_put(&dreq->kref, nfs_direct_req_free);
  290. }
  291. ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
  292. {
  293. return dreq->bytes_left;
  294. }
  295. EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
  296. /*
  297. * Collects and returns the final error value/byte-count.
  298. */
  299. static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
  300. {
  301. ssize_t result = -EIOCBQUEUED;
  302. /* Async requests don't wait here */
  303. if (dreq->iocb)
  304. goto out;
  305. result = wait_for_completion_killable(&dreq->completion);
  306. if (!result) {
  307. result = dreq->count;
  308. WARN_ON_ONCE(dreq->count < 0);
  309. }
  310. if (!result)
  311. result = dreq->error;
  312. out:
  313. return (ssize_t) result;
  314. }
  315. /*
  316. * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
  317. * the iocb is still valid here if this is a synchronous request.
  318. */
  319. static void nfs_direct_complete(struct nfs_direct_req *dreq)
  320. {
  321. struct inode *inode = dreq->inode;
  322. inode_dio_end(inode);
  323. if (dreq->iocb) {
  324. long res = (long) dreq->error;
  325. if (dreq->count != 0) {
  326. res = (long) dreq->count;
  327. WARN_ON_ONCE(dreq->count < 0);
  328. }
  329. dreq->iocb->ki_complete(dreq->iocb, res, 0);
  330. }
  331. complete(&dreq->completion);
  332. nfs_direct_req_release(dreq);
  333. }
  334. static void nfs_direct_readpage_release(struct nfs_page *req)
  335. {
  336. dprintk("NFS: direct read done (%s/%llu %d@%lld)\n",
  337. req->wb_context->dentry->d_sb->s_id,
  338. (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)),
  339. req->wb_bytes,
  340. (long long)req_offset(req));
  341. nfs_release_request(req);
  342. }
  343. static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
  344. {
  345. unsigned long bytes = 0;
  346. struct nfs_direct_req *dreq = hdr->dreq;
  347. if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
  348. goto out_put;
  349. spin_lock(&dreq->lock);
  350. if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
  351. dreq->error = hdr->error;
  352. else
  353. nfs_direct_good_bytes(dreq, hdr);
  354. spin_unlock(&dreq->lock);
  355. while (!list_empty(&hdr->pages)) {
  356. struct nfs_page *req = nfs_list_entry(hdr->pages.next);
  357. struct page *page = req->wb_page;
  358. if (!PageCompound(page) && bytes < hdr->good_bytes)
  359. set_page_dirty(page);
  360. bytes += req->wb_bytes;
  361. nfs_list_remove_request(req);
  362. nfs_direct_readpage_release(req);
  363. }
  364. out_put:
  365. if (put_dreq(dreq))
  366. nfs_direct_complete(dreq);
  367. hdr->release(hdr);
  368. }
  369. static void nfs_read_sync_pgio_error(struct list_head *head)
  370. {
  371. struct nfs_page *req;
  372. while (!list_empty(head)) {
  373. req = nfs_list_entry(head->next);
  374. nfs_list_remove_request(req);
  375. nfs_release_request(req);
  376. }
  377. }
  378. static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
  379. {
  380. get_dreq(hdr->dreq);
  381. }
  382. static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
  383. .error_cleanup = nfs_read_sync_pgio_error,
  384. .init_hdr = nfs_direct_pgio_init,
  385. .completion = nfs_direct_read_completion,
  386. };
  387. /*
  388. * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
  389. * operation. If nfs_readdata_alloc() or get_user_pages() fails,
  390. * bail and stop sending more reads. Read length accounting is
  391. * handled automatically by nfs_direct_read_result(). Otherwise, if
  392. * no requests have been sent, just return an error.
  393. */
  394. static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
  395. struct iov_iter *iter,
  396. loff_t pos)
  397. {
  398. struct nfs_pageio_descriptor desc;
  399. struct inode *inode = dreq->inode;
  400. ssize_t result = -EINVAL;
  401. size_t requested_bytes = 0;
  402. size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
  403. nfs_pageio_init_read(&desc, dreq->inode, false,
  404. &nfs_direct_read_completion_ops);
  405. get_dreq(dreq);
  406. desc.pg_dreq = dreq;
  407. inode_dio_begin(inode);
  408. while (iov_iter_count(iter)) {
  409. struct page **pagevec;
  410. size_t bytes;
  411. size_t pgbase;
  412. unsigned npages, i;
  413. result = iov_iter_get_pages_alloc(iter, &pagevec,
  414. rsize, &pgbase);
  415. if (result < 0)
  416. break;
  417. bytes = result;
  418. iov_iter_advance(iter, bytes);
  419. npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
  420. for (i = 0; i < npages; i++) {
  421. struct nfs_page *req;
  422. unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
  423. /* XXX do we need to do the eof zeroing found in async_filler? */
  424. req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
  425. pgbase, req_len);
  426. if (IS_ERR(req)) {
  427. result = PTR_ERR(req);
  428. break;
  429. }
  430. req->wb_index = pos >> PAGE_SHIFT;
  431. req->wb_offset = pos & ~PAGE_MASK;
  432. if (!nfs_pageio_add_request(&desc, req)) {
  433. result = desc.pg_error;
  434. nfs_release_request(req);
  435. break;
  436. }
  437. pgbase = 0;
  438. bytes -= req_len;
  439. requested_bytes += req_len;
  440. pos += req_len;
  441. dreq->bytes_left -= req_len;
  442. }
  443. nfs_direct_release_pages(pagevec, npages);
  444. kvfree(pagevec);
  445. if (result < 0)
  446. break;
  447. }
  448. nfs_pageio_complete(&desc);
  449. /*
  450. * If no bytes were started, return the error, and let the
  451. * generic layer handle the completion.
  452. */
  453. if (requested_bytes == 0) {
  454. inode_dio_end(inode);
  455. nfs_direct_req_release(dreq);
  456. return result < 0 ? result : -EIO;
  457. }
  458. if (put_dreq(dreq))
  459. nfs_direct_complete(dreq);
  460. return 0;
  461. }
  462. /**
  463. * nfs_file_direct_read - file direct read operation for NFS files
  464. * @iocb: target I/O control block
  465. * @iter: vector of user buffers into which to read data
  466. *
  467. * We use this function for direct reads instead of calling
  468. * generic_file_aio_read() in order to avoid gfar's check to see if
  469. * the request starts before the end of the file. For that check
  470. * to work, we must generate a GETATTR before each direct read, and
  471. * even then there is a window between the GETATTR and the subsequent
  472. * READ where the file size could change. Our preference is simply
  473. * to do all reads the application wants, and the server will take
  474. * care of managing the end of file boundary.
  475. *
  476. * This function also eliminates unnecessarily updating the file's
  477. * atime locally, as the NFS server sets the file's atime, and this
  478. * client must read the updated atime from the server back into its
  479. * cache.
  480. */
  481. ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
  482. {
  483. struct file *file = iocb->ki_filp;
  484. struct address_space *mapping = file->f_mapping;
  485. struct inode *inode = mapping->host;
  486. struct nfs_direct_req *dreq;
  487. struct nfs_lock_context *l_ctx;
  488. ssize_t result = -EINVAL;
  489. size_t count = iov_iter_count(iter);
  490. nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
  491. dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
  492. file, count, (long long) iocb->ki_pos);
  493. result = 0;
  494. if (!count)
  495. goto out;
  496. task_io_account_read(count);
  497. result = -ENOMEM;
  498. dreq = nfs_direct_req_alloc();
  499. if (dreq == NULL)
  500. goto out;
  501. dreq->inode = inode;
  502. dreq->bytes_left = dreq->max_count = count;
  503. dreq->io_start = iocb->ki_pos;
  504. dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
  505. l_ctx = nfs_get_lock_context(dreq->ctx);
  506. if (IS_ERR(l_ctx)) {
  507. result = PTR_ERR(l_ctx);
  508. goto out_release;
  509. }
  510. dreq->l_ctx = l_ctx;
  511. if (!is_sync_kiocb(iocb))
  512. dreq->iocb = iocb;
  513. nfs_start_io_direct(inode);
  514. NFS_I(inode)->read_io += count;
  515. result = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
  516. nfs_end_io_direct(inode);
  517. if (!result) {
  518. result = nfs_direct_wait(dreq);
  519. if (result > 0)
  520. iocb->ki_pos += result;
  521. }
  522. out_release:
  523. nfs_direct_req_release(dreq);
  524. out:
  525. return result;
  526. }
  527. static void
  528. nfs_direct_write_scan_commit_list(struct inode *inode,
  529. struct list_head *list,
  530. struct nfs_commit_info *cinfo)
  531. {
  532. spin_lock(&cinfo->inode->i_lock);
  533. #ifdef CONFIG_NFS_V4_1
  534. if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
  535. NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
  536. #endif
  537. nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
  538. spin_unlock(&cinfo->inode->i_lock);
  539. }
  540. static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
  541. {
  542. struct nfs_pageio_descriptor desc;
  543. struct nfs_page *req, *tmp;
  544. LIST_HEAD(reqs);
  545. struct nfs_commit_info cinfo;
  546. LIST_HEAD(failed);
  547. int i;
  548. nfs_init_cinfo_from_dreq(&cinfo, dreq);
  549. nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
  550. dreq->count = 0;
  551. dreq->verf.committed = NFS_INVALID_STABLE_HOW;
  552. nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
  553. for (i = 0; i < dreq->mirror_count; i++)
  554. dreq->mirrors[i].count = 0;
  555. get_dreq(dreq);
  556. nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
  557. &nfs_direct_write_completion_ops);
  558. desc.pg_dreq = dreq;
  559. req = nfs_list_entry(reqs.next);
  560. nfs_direct_setup_mirroring(dreq, &desc, req);
  561. if (desc.pg_error < 0) {
  562. list_splice_init(&reqs, &failed);
  563. goto out_failed;
  564. }
  565. list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
  566. if (!nfs_pageio_add_request(&desc, req)) {
  567. nfs_list_remove_request(req);
  568. nfs_list_add_request(req, &failed);
  569. spin_lock(&cinfo.inode->i_lock);
  570. dreq->flags = 0;
  571. if (desc.pg_error < 0)
  572. dreq->error = desc.pg_error;
  573. else
  574. dreq->error = -EIO;
  575. spin_unlock(&cinfo.inode->i_lock);
  576. }
  577. nfs_release_request(req);
  578. }
  579. nfs_pageio_complete(&desc);
  580. out_failed:
  581. while (!list_empty(&failed)) {
  582. req = nfs_list_entry(failed.next);
  583. nfs_list_remove_request(req);
  584. nfs_unlock_and_release_request(req);
  585. }
  586. if (put_dreq(dreq))
  587. nfs_direct_write_complete(dreq, dreq->inode);
  588. }
  589. static void nfs_direct_commit_complete(struct nfs_commit_data *data)
  590. {
  591. struct nfs_direct_req *dreq = data->dreq;
  592. struct nfs_commit_info cinfo;
  593. struct nfs_page *req;
  594. int status = data->task.tk_status;
  595. nfs_init_cinfo_from_dreq(&cinfo, dreq);
  596. if (status < 0) {
  597. dprintk("NFS: %5u commit failed with error %d.\n",
  598. data->task.tk_pid, status);
  599. dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
  600. } else if (nfs_direct_cmp_commit_data_verf(dreq, data)) {
  601. dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
  602. dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
  603. }
  604. dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
  605. while (!list_empty(&data->pages)) {
  606. req = nfs_list_entry(data->pages.next);
  607. nfs_list_remove_request(req);
  608. if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
  609. /* Note the rewrite will go through mds */
  610. nfs_mark_request_commit(req, NULL, &cinfo, 0);
  611. } else
  612. nfs_release_request(req);
  613. nfs_unlock_and_release_request(req);
  614. }
  615. if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
  616. nfs_direct_write_complete(dreq, data->inode);
  617. }
  618. static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
  619. struct nfs_page *req)
  620. {
  621. struct nfs_direct_req *dreq = cinfo->dreq;
  622. spin_lock(&dreq->lock);
  623. dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
  624. spin_unlock(&dreq->lock);
  625. nfs_mark_request_commit(req, NULL, cinfo, 0);
  626. }
  627. static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
  628. .completion = nfs_direct_commit_complete,
  629. .resched_write = nfs_direct_resched_write,
  630. };
  631. static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
  632. {
  633. int res;
  634. struct nfs_commit_info cinfo;
  635. LIST_HEAD(mds_list);
  636. nfs_init_cinfo_from_dreq(&cinfo, dreq);
  637. nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
  638. res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
  639. if (res < 0) /* res == -ENOMEM */
  640. nfs_direct_write_reschedule(dreq);
  641. }
  642. static void nfs_direct_write_schedule_work(struct work_struct *work)
  643. {
  644. struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
  645. int flags = dreq->flags;
  646. dreq->flags = 0;
  647. switch (flags) {
  648. case NFS_ODIRECT_DO_COMMIT:
  649. nfs_direct_commit_schedule(dreq);
  650. break;
  651. case NFS_ODIRECT_RESCHED_WRITES:
  652. nfs_direct_write_reschedule(dreq);
  653. break;
  654. default:
  655. nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
  656. nfs_direct_complete(dreq);
  657. }
  658. }
  659. static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
  660. {
  661. schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
  662. }
  663. static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
  664. {
  665. struct nfs_direct_req *dreq = hdr->dreq;
  666. struct nfs_commit_info cinfo;
  667. bool request_commit = false;
  668. struct nfs_page *req = nfs_list_entry(hdr->pages.next);
  669. if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
  670. goto out_put;
  671. nfs_init_cinfo_from_dreq(&cinfo, dreq);
  672. spin_lock(&dreq->lock);
  673. if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
  674. dreq->flags = 0;
  675. dreq->error = hdr->error;
  676. }
  677. if (dreq->error == 0) {
  678. nfs_direct_good_bytes(dreq, hdr);
  679. if (nfs_write_need_commit(hdr)) {
  680. if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
  681. request_commit = true;
  682. else if (dreq->flags == 0) {
  683. nfs_direct_set_hdr_verf(dreq, hdr);
  684. request_commit = true;
  685. dreq->flags = NFS_ODIRECT_DO_COMMIT;
  686. } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
  687. request_commit = true;
  688. if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr))
  689. dreq->flags =
  690. NFS_ODIRECT_RESCHED_WRITES;
  691. }
  692. }
  693. }
  694. spin_unlock(&dreq->lock);
  695. while (!list_empty(&hdr->pages)) {
  696. req = nfs_list_entry(hdr->pages.next);
  697. nfs_list_remove_request(req);
  698. if (request_commit) {
  699. kref_get(&req->wb_kref);
  700. nfs_mark_request_commit(req, hdr->lseg, &cinfo,
  701. hdr->ds_commit_idx);
  702. }
  703. nfs_unlock_and_release_request(req);
  704. }
  705. out_put:
  706. if (put_dreq(dreq))
  707. nfs_direct_write_complete(dreq, hdr->inode);
  708. hdr->release(hdr);
  709. }
  710. static void nfs_write_sync_pgio_error(struct list_head *head)
  711. {
  712. struct nfs_page *req;
  713. while (!list_empty(head)) {
  714. req = nfs_list_entry(head->next);
  715. nfs_list_remove_request(req);
  716. nfs_unlock_and_release_request(req);
  717. }
  718. }
  719. static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
  720. {
  721. struct nfs_direct_req *dreq = hdr->dreq;
  722. spin_lock(&dreq->lock);
  723. if (dreq->error == 0) {
  724. dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
  725. /* fake unstable write to let common nfs resend pages */
  726. hdr->verf.committed = NFS_UNSTABLE;
  727. hdr->good_bytes = hdr->args.count;
  728. }
  729. spin_unlock(&dreq->lock);
  730. }
  731. static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
  732. .error_cleanup = nfs_write_sync_pgio_error,
  733. .init_hdr = nfs_direct_pgio_init,
  734. .completion = nfs_direct_write_completion,
  735. .reschedule_io = nfs_direct_write_reschedule_io,
  736. };
  737. /*
  738. * NB: Return the value of the first error return code. Subsequent
  739. * errors after the first one are ignored.
  740. */
  741. /*
  742. * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
  743. * operation. If nfs_writedata_alloc() or get_user_pages() fails,
  744. * bail and stop sending more writes. Write length accounting is
  745. * handled automatically by nfs_direct_write_result(). Otherwise, if
  746. * no requests have been sent, just return an error.
  747. */
  748. static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
  749. struct iov_iter *iter,
  750. loff_t pos)
  751. {
  752. struct nfs_pageio_descriptor desc;
  753. struct inode *inode = dreq->inode;
  754. ssize_t result = 0;
  755. size_t requested_bytes = 0;
  756. size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
  757. nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
  758. &nfs_direct_write_completion_ops);
  759. desc.pg_dreq = dreq;
  760. get_dreq(dreq);
  761. inode_dio_begin(inode);
  762. NFS_I(inode)->write_io += iov_iter_count(iter);
  763. while (iov_iter_count(iter)) {
  764. struct page **pagevec;
  765. size_t bytes;
  766. size_t pgbase;
  767. unsigned npages, i;
  768. result = iov_iter_get_pages_alloc(iter, &pagevec,
  769. wsize, &pgbase);
  770. if (result < 0)
  771. break;
  772. bytes = result;
  773. iov_iter_advance(iter, bytes);
  774. npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
  775. for (i = 0; i < npages; i++) {
  776. struct nfs_page *req;
  777. unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
  778. req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
  779. pgbase, req_len);
  780. if (IS_ERR(req)) {
  781. result = PTR_ERR(req);
  782. break;
  783. }
  784. nfs_direct_setup_mirroring(dreq, &desc, req);
  785. if (desc.pg_error < 0) {
  786. nfs_free_request(req);
  787. result = desc.pg_error;
  788. break;
  789. }
  790. nfs_lock_request(req);
  791. req->wb_index = pos >> PAGE_SHIFT;
  792. req->wb_offset = pos & ~PAGE_MASK;
  793. if (!nfs_pageio_add_request(&desc, req)) {
  794. result = desc.pg_error;
  795. nfs_unlock_and_release_request(req);
  796. break;
  797. }
  798. pgbase = 0;
  799. bytes -= req_len;
  800. requested_bytes += req_len;
  801. pos += req_len;
  802. dreq->bytes_left -= req_len;
  803. }
  804. nfs_direct_release_pages(pagevec, npages);
  805. kvfree(pagevec);
  806. if (result < 0)
  807. break;
  808. }
  809. nfs_pageio_complete(&desc);
  810. /*
  811. * If no bytes were started, return the error, and let the
  812. * generic layer handle the completion.
  813. */
  814. if (requested_bytes == 0) {
  815. inode_dio_end(inode);
  816. nfs_direct_req_release(dreq);
  817. return result < 0 ? result : -EIO;
  818. }
  819. if (put_dreq(dreq))
  820. nfs_direct_write_complete(dreq, dreq->inode);
  821. return 0;
  822. }
  823. /**
  824. * nfs_file_direct_write - file direct write operation for NFS files
  825. * @iocb: target I/O control block
  826. * @iter: vector of user buffers from which to write data
  827. *
  828. * We use this function for direct writes instead of calling
  829. * generic_file_aio_write() in order to avoid taking the inode
  830. * semaphore and updating the i_size. The NFS server will set
  831. * the new i_size and this client must read the updated size
  832. * back into its cache. We let the server do generic write
  833. * parameter checking and report problems.
  834. *
  835. * We eliminate local atime updates, see direct read above.
  836. *
  837. * We avoid unnecessary page cache invalidations for normal cached
  838. * readers of this file.
  839. *
  840. * Note that O_APPEND is not supported for NFS direct writes, as there
  841. * is no atomic O_APPEND write facility in the NFS protocol.
  842. */
  843. ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
  844. {
  845. ssize_t result = -EINVAL;
  846. size_t count;
  847. struct file *file = iocb->ki_filp;
  848. struct address_space *mapping = file->f_mapping;
  849. struct inode *inode = mapping->host;
  850. struct nfs_direct_req *dreq;
  851. struct nfs_lock_context *l_ctx;
  852. loff_t pos, end;
  853. dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
  854. file, iov_iter_count(iter), (long long) iocb->ki_pos);
  855. result = generic_write_checks(iocb, iter);
  856. if (result <= 0)
  857. return result;
  858. count = result;
  859. nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
  860. pos = iocb->ki_pos;
  861. end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
  862. task_io_account_write(count);
  863. result = -ENOMEM;
  864. dreq = nfs_direct_req_alloc();
  865. if (!dreq)
  866. goto out;
  867. dreq->inode = inode;
  868. dreq->bytes_left = dreq->max_count = count;
  869. dreq->io_start = pos;
  870. dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
  871. l_ctx = nfs_get_lock_context(dreq->ctx);
  872. if (IS_ERR(l_ctx)) {
  873. result = PTR_ERR(l_ctx);
  874. goto out_release;
  875. }
  876. dreq->l_ctx = l_ctx;
  877. if (!is_sync_kiocb(iocb))
  878. dreq->iocb = iocb;
  879. nfs_start_io_direct(inode);
  880. result = nfs_direct_write_schedule_iovec(dreq, iter, pos);
  881. if (mapping->nrpages) {
  882. invalidate_inode_pages2_range(mapping,
  883. pos >> PAGE_SHIFT, end);
  884. }
  885. nfs_end_io_direct(inode);
  886. if (!result) {
  887. result = nfs_direct_wait(dreq);
  888. if (result > 0) {
  889. iocb->ki_pos = pos + result;
  890. /* XXX: should check the generic_write_sync retval */
  891. generic_write_sync(iocb, result);
  892. }
  893. }
  894. out_release:
  895. nfs_direct_req_release(dreq);
  896. out:
  897. return result;
  898. }
  899. /**
  900. * nfs_init_directcache - create a slab cache for nfs_direct_req structures
  901. *
  902. */
  903. int __init nfs_init_directcache(void)
  904. {
  905. nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
  906. sizeof(struct nfs_direct_req),
  907. 0, (SLAB_RECLAIM_ACCOUNT|
  908. SLAB_MEM_SPREAD),
  909. NULL);
  910. if (nfs_direct_cachep == NULL)
  911. return -ENOMEM;
  912. return 0;
  913. }
  914. /**
  915. * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
  916. *
  917. */
  918. void nfs_destroy_directcache(void)
  919. {
  920. kmem_cache_destroy(nfs_direct_cachep);
  921. }