libiscsi_tcp.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219
  1. /*
  2. * iSCSI over TCP/IP Data-Path lib
  3. *
  4. * Copyright (C) 2004 Dmitry Yusupov
  5. * Copyright (C) 2004 Alex Aizman
  6. * Copyright (C) 2005 - 2006 Mike Christie
  7. * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
  8. * maintained by open-iscsi@googlegroups.com
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published
  12. * by the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * See the file COPYING included with this distribution for more details.
  21. *
  22. * Credits:
  23. * Christoph Hellwig
  24. * FUJITA Tomonori
  25. * Arne Redlich
  26. * Zhenyu Wang
  27. */
  28. #include <crypto/hash.h>
  29. #include <linux/types.h>
  30. #include <linux/list.h>
  31. #include <linux/inet.h>
  32. #include <linux/slab.h>
  33. #include <linux/file.h>
  34. #include <linux/blkdev.h>
  35. #include <linux/delay.h>
  36. #include <linux/kfifo.h>
  37. #include <linux/scatterlist.h>
  38. #include <linux/module.h>
  39. #include <net/tcp.h>
  40. #include <scsi/scsi_cmnd.h>
  41. #include <scsi/scsi_device.h>
  42. #include <scsi/scsi_host.h>
  43. #include <scsi/scsi.h>
  44. #include <scsi/scsi_transport_iscsi.h>
  45. #include "iscsi_tcp.h"
  46. MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
  47. "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
  48. "Alex Aizman <itn780@yahoo.com>");
  49. MODULE_DESCRIPTION("iSCSI/TCP data-path");
  50. MODULE_LICENSE("GPL");
  51. static int iscsi_dbg_libtcp;
  52. module_param_named(debug_libiscsi_tcp, iscsi_dbg_libtcp, int,
  53. S_IRUGO | S_IWUSR);
  54. MODULE_PARM_DESC(debug_libiscsi_tcp, "Turn on debugging for libiscsi_tcp "
  55. "module. Set to 1 to turn on, and zero to turn off. Default "
  56. "is off.");
  57. #define ISCSI_DBG_TCP(_conn, dbg_fmt, arg...) \
  58. do { \
  59. if (iscsi_dbg_libtcp) \
  60. iscsi_conn_printk(KERN_INFO, _conn, \
  61. "%s " dbg_fmt, \
  62. __func__, ##arg); \
  63. } while (0);
  64. static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
  65. struct iscsi_segment *segment);
  66. /*
  67. * Scatterlist handling: inside the iscsi_segment, we
  68. * remember an index into the scatterlist, and set data/size
  69. * to the current scatterlist entry. For highmem pages, we
  70. * kmap as needed.
  71. *
  72. * Note that the page is unmapped when we return from
  73. * TCP's data_ready handler, so we may end up mapping and
  74. * unmapping the same page repeatedly. The whole reason
  75. * for this is that we shouldn't keep the page mapped
  76. * outside the softirq.
  77. */
  78. /**
  79. * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
  80. * @segment: the buffer object
  81. * @sg: scatterlist
  82. * @offset: byte offset into that sg entry
  83. *
  84. * This function sets up the segment so that subsequent
  85. * data is copied to the indicated sg entry, at the given
  86. * offset.
  87. */
  88. static inline void
  89. iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
  90. struct scatterlist *sg, unsigned int offset)
  91. {
  92. segment->sg = sg;
  93. segment->sg_offset = offset;
  94. segment->size = min(sg->length - offset,
  95. segment->total_size - segment->total_copied);
  96. segment->data = NULL;
  97. }
  98. /**
  99. * iscsi_tcp_segment_map - map the current S/G page
  100. * @segment: iscsi_segment
  101. * @recv: 1 if called from recv path
  102. *
  103. * We only need to possibly kmap data if scatter lists are being used,
  104. * because the iscsi passthrough and internal IO paths will never use high
  105. * mem pages.
  106. */
  107. static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
  108. {
  109. struct scatterlist *sg;
  110. if (segment->data != NULL || !segment->sg)
  111. return;
  112. sg = segment->sg;
  113. BUG_ON(segment->sg_mapped);
  114. BUG_ON(sg->length == 0);
  115. /*
  116. * If the page count is greater than one it is ok to send
  117. * to the network layer's zero copy send path. If not we
  118. * have to go the slow sendmsg path. We always map for the
  119. * recv path.
  120. */
  121. if (page_count(sg_page(sg)) >= 1 && !recv)
  122. return;
  123. if (recv) {
  124. segment->atomic_mapped = true;
  125. segment->sg_mapped = kmap_atomic(sg_page(sg));
  126. } else {
  127. segment->atomic_mapped = false;
  128. /* the xmit path can sleep with the page mapped so use kmap */
  129. segment->sg_mapped = kmap(sg_page(sg));
  130. }
  131. segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
  132. }
  133. void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
  134. {
  135. if (segment->sg_mapped) {
  136. if (segment->atomic_mapped)
  137. kunmap_atomic(segment->sg_mapped);
  138. else
  139. kunmap(sg_page(segment->sg));
  140. segment->sg_mapped = NULL;
  141. segment->data = NULL;
  142. }
  143. }
  144. EXPORT_SYMBOL_GPL(iscsi_tcp_segment_unmap);
  145. /*
  146. * Splice the digest buffer into the buffer
  147. */
  148. static inline void
  149. iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
  150. {
  151. segment->data = digest;
  152. segment->digest_len = ISCSI_DIGEST_SIZE;
  153. segment->total_size += ISCSI_DIGEST_SIZE;
  154. segment->size = ISCSI_DIGEST_SIZE;
  155. segment->copied = 0;
  156. segment->sg = NULL;
  157. segment->hash = NULL;
  158. }
  159. /**
  160. * iscsi_tcp_segment_done - check whether the segment is complete
  161. * @tcp_conn: iscsi tcp connection
  162. * @segment: iscsi segment to check
  163. * @recv: set to one of this is called from the recv path
  164. * @copied: number of bytes copied
  165. *
  166. * Check if we're done receiving this segment. If the receive
  167. * buffer is full but we expect more data, move on to the
  168. * next entry in the scatterlist.
  169. *
  170. * If the amount of data we received isn't a multiple of 4,
  171. * we will transparently receive the pad bytes, too.
  172. *
  173. * This function must be re-entrant.
  174. */
  175. int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
  176. struct iscsi_segment *segment, int recv,
  177. unsigned copied)
  178. {
  179. struct scatterlist sg;
  180. unsigned int pad;
  181. ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copied %u %u size %u %s\n",
  182. segment->copied, copied, segment->size,
  183. recv ? "recv" : "xmit");
  184. if (segment->hash && copied) {
  185. /*
  186. * If a segment is kmapd we must unmap it before sending
  187. * to the crypto layer since that will try to kmap it again.
  188. */
  189. iscsi_tcp_segment_unmap(segment);
  190. if (!segment->data) {
  191. sg_init_table(&sg, 1);
  192. sg_set_page(&sg, sg_page(segment->sg), copied,
  193. segment->copied + segment->sg_offset +
  194. segment->sg->offset);
  195. } else
  196. sg_init_one(&sg, segment->data + segment->copied,
  197. copied);
  198. ahash_request_set_crypt(segment->hash, &sg, NULL, copied);
  199. crypto_ahash_update(segment->hash);
  200. }
  201. segment->copied += copied;
  202. if (segment->copied < segment->size) {
  203. iscsi_tcp_segment_map(segment, recv);
  204. return 0;
  205. }
  206. segment->total_copied += segment->copied;
  207. segment->copied = 0;
  208. segment->size = 0;
  209. /* Unmap the current scatterlist page, if there is one. */
  210. iscsi_tcp_segment_unmap(segment);
  211. /* Do we have more scatterlist entries? */
  212. ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "total copied %u total size %u\n",
  213. segment->total_copied, segment->total_size);
  214. if (segment->total_copied < segment->total_size) {
  215. /* Proceed to the next entry in the scatterlist. */
  216. iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
  217. 0);
  218. iscsi_tcp_segment_map(segment, recv);
  219. BUG_ON(segment->size == 0);
  220. return 0;
  221. }
  222. /* Do we need to handle padding? */
  223. if (!(tcp_conn->iscsi_conn->session->tt->caps & CAP_PADDING_OFFLOAD)) {
  224. pad = iscsi_padding(segment->total_copied);
  225. if (pad != 0) {
  226. ISCSI_DBG_TCP(tcp_conn->iscsi_conn,
  227. "consume %d pad bytes\n", pad);
  228. segment->total_size += pad;
  229. segment->size = pad;
  230. segment->data = segment->padbuf;
  231. return 0;
  232. }
  233. }
  234. /*
  235. * Set us up for transferring the data digest. hdr digest
  236. * is completely handled in hdr done function.
  237. */
  238. if (segment->hash) {
  239. ahash_request_set_crypt(segment->hash, NULL,
  240. segment->digest, 0);
  241. crypto_ahash_final(segment->hash);
  242. iscsi_tcp_segment_splice_digest(segment,
  243. recv ? segment->recv_digest : segment->digest);
  244. return 0;
  245. }
  246. return 1;
  247. }
  248. EXPORT_SYMBOL_GPL(iscsi_tcp_segment_done);
  249. /**
  250. * iscsi_tcp_segment_recv - copy data to segment
  251. * @tcp_conn: the iSCSI TCP connection
  252. * @segment: the buffer to copy to
  253. * @ptr: data pointer
  254. * @len: amount of data available
  255. *
  256. * This function copies up to @len bytes to the
  257. * given buffer, and returns the number of bytes
  258. * consumed, which can actually be less than @len.
  259. *
  260. * If hash digest is enabled, the function will update the
  261. * hash while copying.
  262. * Combining these two operations doesn't buy us a lot (yet),
  263. * but in the future we could implement combined copy+crc,
  264. * just way we do for network layer checksums.
  265. */
  266. static int
  267. iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
  268. struct iscsi_segment *segment, const void *ptr,
  269. unsigned int len)
  270. {
  271. unsigned int copy = 0, copied = 0;
  272. while (!iscsi_tcp_segment_done(tcp_conn, segment, 1, copy)) {
  273. if (copied == len) {
  274. ISCSI_DBG_TCP(tcp_conn->iscsi_conn,
  275. "copied %d bytes\n", len);
  276. break;
  277. }
  278. copy = min(len - copied, segment->size - segment->copied);
  279. ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copying %d\n", copy);
  280. memcpy(segment->data + segment->copied, ptr + copied, copy);
  281. copied += copy;
  282. }
  283. return copied;
  284. }
  285. inline void
  286. iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr,
  287. size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE])
  288. {
  289. struct scatterlist sg;
  290. sg_init_one(&sg, hdr, hdrlen);
  291. ahash_request_set_crypt(hash, &sg, digest, hdrlen);
  292. crypto_ahash_digest(hash);
  293. }
  294. EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header);
  295. static inline int
  296. iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
  297. struct iscsi_segment *segment)
  298. {
  299. if (!segment->digest_len)
  300. return 1;
  301. if (memcmp(segment->recv_digest, segment->digest,
  302. segment->digest_len)) {
  303. ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "digest mismatch\n");
  304. return 0;
  305. }
  306. return 1;
  307. }
  308. /*
  309. * Helper function to set up segment buffer
  310. */
  311. static inline void
  312. __iscsi_segment_init(struct iscsi_segment *segment, size_t size,
  313. iscsi_segment_done_fn_t *done, struct ahash_request *hash)
  314. {
  315. memset(segment, 0, sizeof(*segment));
  316. segment->total_size = size;
  317. segment->done = done;
  318. if (hash) {
  319. segment->hash = hash;
  320. crypto_ahash_init(hash);
  321. }
  322. }
  323. inline void
  324. iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
  325. size_t size, iscsi_segment_done_fn_t *done,
  326. struct ahash_request *hash)
  327. {
  328. __iscsi_segment_init(segment, size, done, hash);
  329. segment->data = data;
  330. segment->size = size;
  331. }
  332. EXPORT_SYMBOL_GPL(iscsi_segment_init_linear);
  333. inline int
  334. iscsi_segment_seek_sg(struct iscsi_segment *segment,
  335. struct scatterlist *sg_list, unsigned int sg_count,
  336. unsigned int offset, size_t size,
  337. iscsi_segment_done_fn_t *done,
  338. struct ahash_request *hash)
  339. {
  340. struct scatterlist *sg;
  341. unsigned int i;
  342. __iscsi_segment_init(segment, size, done, hash);
  343. for_each_sg(sg_list, sg, sg_count, i) {
  344. if (offset < sg->length) {
  345. iscsi_tcp_segment_init_sg(segment, sg, offset);
  346. return 0;
  347. }
  348. offset -= sg->length;
  349. }
  350. return ISCSI_ERR_DATA_OFFSET;
  351. }
  352. EXPORT_SYMBOL_GPL(iscsi_segment_seek_sg);
  353. /**
  354. * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
  355. * @tcp_conn: iscsi connection to prep for
  356. *
  357. * This function always passes NULL for the hash argument, because when this
  358. * function is called we do not yet know the final size of the header and want
  359. * to delay the digest processing until we know that.
  360. */
  361. void iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
  362. {
  363. ISCSI_DBG_TCP(tcp_conn->iscsi_conn,
  364. "(%s)\n", tcp_conn->iscsi_conn->hdrdgst_en ?
  365. "digest enabled" : "digest disabled");
  366. iscsi_segment_init_linear(&tcp_conn->in.segment,
  367. tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
  368. iscsi_tcp_hdr_recv_done, NULL);
  369. }
  370. EXPORT_SYMBOL_GPL(iscsi_tcp_hdr_recv_prep);
  371. /*
  372. * Handle incoming reply to any other type of command
  373. */
  374. static int
  375. iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
  376. struct iscsi_segment *segment)
  377. {
  378. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  379. int rc = 0;
  380. if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
  381. return ISCSI_ERR_DATA_DGST;
  382. rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
  383. conn->data, tcp_conn->in.datalen);
  384. if (rc)
  385. return rc;
  386. iscsi_tcp_hdr_recv_prep(tcp_conn);
  387. return 0;
  388. }
  389. static void
  390. iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
  391. {
  392. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  393. struct ahash_request *rx_hash = NULL;
  394. if (conn->datadgst_en &&
  395. !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
  396. rx_hash = tcp_conn->rx_hash;
  397. iscsi_segment_init_linear(&tcp_conn->in.segment,
  398. conn->data, tcp_conn->in.datalen,
  399. iscsi_tcp_data_recv_done, rx_hash);
  400. }
  401. /**
  402. * iscsi_tcp_cleanup_task - free tcp_task resources
  403. * @task: iscsi task
  404. *
  405. * must be called with session back_lock
  406. */
  407. void iscsi_tcp_cleanup_task(struct iscsi_task *task)
  408. {
  409. struct iscsi_tcp_task *tcp_task = task->dd_data;
  410. struct iscsi_r2t_info *r2t;
  411. /* nothing to do for mgmt */
  412. if (!task->sc)
  413. return;
  414. spin_lock_bh(&tcp_task->queue2pool);
  415. /* flush task's r2t queues */
  416. while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
  417. kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
  418. sizeof(void*));
  419. ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n");
  420. }
  421. r2t = tcp_task->r2t;
  422. if (r2t != NULL) {
  423. kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
  424. sizeof(void*));
  425. tcp_task->r2t = NULL;
  426. }
  427. spin_unlock_bh(&tcp_task->queue2pool);
  428. }
  429. EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task);
  430. /**
  431. * iscsi_tcp_data_in - SCSI Data-In Response processing
  432. * @conn: iscsi connection
  433. * @task: scsi command task
  434. */
  435. static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
  436. {
  437. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  438. struct iscsi_tcp_task *tcp_task = task->dd_data;
  439. struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
  440. int datasn = be32_to_cpu(rhdr->datasn);
  441. unsigned total_in_length = scsi_in(task->sc)->length;
  442. /*
  443. * lib iscsi will update this in the completion handling if there
  444. * is status.
  445. */
  446. if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
  447. iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
  448. if (tcp_conn->in.datalen == 0)
  449. return 0;
  450. if (tcp_task->exp_datasn != datasn) {
  451. ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->datasn(%d)"
  452. "\n", tcp_task->exp_datasn, datasn);
  453. return ISCSI_ERR_DATASN;
  454. }
  455. tcp_task->exp_datasn++;
  456. tcp_task->data_offset = be32_to_cpu(rhdr->offset);
  457. if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
  458. ISCSI_DBG_TCP(conn, "data_offset(%d) + data_len(%d) > "
  459. "total_length_in(%d)\n", tcp_task->data_offset,
  460. tcp_conn->in.datalen, total_in_length);
  461. return ISCSI_ERR_DATA_OFFSET;
  462. }
  463. conn->datain_pdus_cnt++;
  464. return 0;
  465. }
  466. /**
  467. * iscsi_tcp_r2t_rsp - iSCSI R2T Response processing
  468. * @conn: iscsi connection
  469. * @task: scsi command task
  470. */
  471. static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
  472. {
  473. struct iscsi_session *session = conn->session;
  474. struct iscsi_tcp_task *tcp_task = task->dd_data;
  475. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  476. struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
  477. struct iscsi_r2t_info *r2t;
  478. int r2tsn = be32_to_cpu(rhdr->r2tsn);
  479. u32 data_length;
  480. u32 data_offset;
  481. int rc;
  482. if (tcp_conn->in.datalen) {
  483. iscsi_conn_printk(KERN_ERR, conn,
  484. "invalid R2t with datalen %d\n",
  485. tcp_conn->in.datalen);
  486. return ISCSI_ERR_DATALEN;
  487. }
  488. if (tcp_task->exp_datasn != r2tsn){
  489. ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
  490. tcp_task->exp_datasn, r2tsn);
  491. return ISCSI_ERR_R2TSN;
  492. }
  493. /* fill-in new R2T associated with the task */
  494. iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
  495. if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
  496. iscsi_conn_printk(KERN_INFO, conn,
  497. "dropping R2T itt %d in recovery.\n",
  498. task->itt);
  499. return 0;
  500. }
  501. data_length = be32_to_cpu(rhdr->data_length);
  502. if (data_length == 0) {
  503. iscsi_conn_printk(KERN_ERR, conn,
  504. "invalid R2T with zero data len\n");
  505. return ISCSI_ERR_DATALEN;
  506. }
  507. if (data_length > session->max_burst)
  508. ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max "
  509. "burst %u. Attempting to execute request.\n",
  510. data_length, session->max_burst);
  511. data_offset = be32_to_cpu(rhdr->data_offset);
  512. if (data_offset + data_length > scsi_out(task->sc)->length) {
  513. iscsi_conn_printk(KERN_ERR, conn,
  514. "invalid R2T with data len %u at offset %u "
  515. "and total length %d\n", data_length,
  516. data_offset, scsi_out(task->sc)->length);
  517. return ISCSI_ERR_DATALEN;
  518. }
  519. spin_lock(&tcp_task->pool2queue);
  520. rc = kfifo_out(&tcp_task->r2tpool.queue, (void *)&r2t, sizeof(void *));
  521. if (!rc) {
  522. iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
  523. "Target has sent more R2Ts than it "
  524. "negotiated for or driver has leaked.\n");
  525. spin_unlock(&tcp_task->pool2queue);
  526. return ISCSI_ERR_PROTO;
  527. }
  528. r2t->exp_statsn = rhdr->statsn;
  529. r2t->data_length = data_length;
  530. r2t->data_offset = data_offset;
  531. r2t->ttt = rhdr->ttt; /* no flip */
  532. r2t->datasn = 0;
  533. r2t->sent = 0;
  534. tcp_task->exp_datasn = r2tsn + 1;
  535. kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
  536. conn->r2t_pdus_cnt++;
  537. spin_unlock(&tcp_task->pool2queue);
  538. iscsi_requeue_task(task);
  539. return 0;
  540. }
  541. /*
  542. * Handle incoming reply to DataIn command
  543. */
  544. static int
  545. iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
  546. struct iscsi_segment *segment)
  547. {
  548. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  549. struct iscsi_hdr *hdr = tcp_conn->in.hdr;
  550. int rc;
  551. if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
  552. return ISCSI_ERR_DATA_DGST;
  553. /* check for non-exceptional status */
  554. if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
  555. rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
  556. if (rc)
  557. return rc;
  558. }
  559. iscsi_tcp_hdr_recv_prep(tcp_conn);
  560. return 0;
  561. }
  562. /**
  563. * iscsi_tcp_hdr_dissect - process PDU header
  564. * @conn: iSCSI connection
  565. * @hdr: PDU header
  566. *
  567. * This function analyzes the header of the PDU received,
  568. * and performs several sanity checks. If the PDU is accompanied
  569. * by data, the receive buffer is set up to copy the incoming data
  570. * to the correct location.
  571. */
  572. static int
  573. iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
  574. {
  575. int rc = 0, opcode, ahslen;
  576. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  577. struct iscsi_task *task;
  578. /* verify PDU length */
  579. tcp_conn->in.datalen = ntoh24(hdr->dlength);
  580. if (tcp_conn->in.datalen > conn->max_recv_dlength) {
  581. iscsi_conn_printk(KERN_ERR, conn,
  582. "iscsi_tcp: datalen %d > %d\n",
  583. tcp_conn->in.datalen, conn->max_recv_dlength);
  584. return ISCSI_ERR_DATALEN;
  585. }
  586. /* Additional header segments. So far, we don't
  587. * process additional headers.
  588. */
  589. ahslen = hdr->hlength << 2;
  590. opcode = hdr->opcode & ISCSI_OPCODE_MASK;
  591. /* verify itt (itt encoding: age+cid+itt) */
  592. rc = iscsi_verify_itt(conn, hdr->itt);
  593. if (rc)
  594. return rc;
  595. ISCSI_DBG_TCP(conn, "opcode 0x%x ahslen %d datalen %d\n",
  596. opcode, ahslen, tcp_conn->in.datalen);
  597. switch(opcode) {
  598. case ISCSI_OP_SCSI_DATA_IN:
  599. spin_lock(&conn->session->back_lock);
  600. task = iscsi_itt_to_ctask(conn, hdr->itt);
  601. if (!task)
  602. rc = ISCSI_ERR_BAD_ITT;
  603. else
  604. rc = iscsi_tcp_data_in(conn, task);
  605. if (rc) {
  606. spin_unlock(&conn->session->back_lock);
  607. break;
  608. }
  609. if (tcp_conn->in.datalen) {
  610. struct iscsi_tcp_task *tcp_task = task->dd_data;
  611. struct ahash_request *rx_hash = NULL;
  612. struct scsi_data_buffer *sdb = scsi_in(task->sc);
  613. /*
  614. * Setup copy of Data-In into the Scsi_Cmnd
  615. * Scatterlist case:
  616. * We set up the iscsi_segment to point to the next
  617. * scatterlist entry to copy to. As we go along,
  618. * we move on to the next scatterlist entry and
  619. * update the digest per-entry.
  620. */
  621. if (conn->datadgst_en &&
  622. !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
  623. rx_hash = tcp_conn->rx_hash;
  624. ISCSI_DBG_TCP(conn, "iscsi_tcp_begin_data_in( "
  625. "offset=%d, datalen=%d)\n",
  626. tcp_task->data_offset,
  627. tcp_conn->in.datalen);
  628. task->last_xfer = jiffies;
  629. rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
  630. sdb->table.sgl,
  631. sdb->table.nents,
  632. tcp_task->data_offset,
  633. tcp_conn->in.datalen,
  634. iscsi_tcp_process_data_in,
  635. rx_hash);
  636. spin_unlock(&conn->session->back_lock);
  637. return rc;
  638. }
  639. rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
  640. spin_unlock(&conn->session->back_lock);
  641. break;
  642. case ISCSI_OP_SCSI_CMD_RSP:
  643. if (tcp_conn->in.datalen) {
  644. iscsi_tcp_data_recv_prep(tcp_conn);
  645. return 0;
  646. }
  647. rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
  648. break;
  649. case ISCSI_OP_R2T:
  650. spin_lock(&conn->session->back_lock);
  651. task = iscsi_itt_to_ctask(conn, hdr->itt);
  652. spin_unlock(&conn->session->back_lock);
  653. if (!task)
  654. rc = ISCSI_ERR_BAD_ITT;
  655. else if (ahslen)
  656. rc = ISCSI_ERR_AHSLEN;
  657. else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
  658. task->last_xfer = jiffies;
  659. spin_lock(&conn->session->frwd_lock);
  660. rc = iscsi_tcp_r2t_rsp(conn, task);
  661. spin_unlock(&conn->session->frwd_lock);
  662. } else
  663. rc = ISCSI_ERR_PROTO;
  664. break;
  665. case ISCSI_OP_LOGIN_RSP:
  666. case ISCSI_OP_TEXT_RSP:
  667. case ISCSI_OP_REJECT:
  668. case ISCSI_OP_ASYNC_EVENT:
  669. /*
  670. * It is possible that we could get a PDU with a buffer larger
  671. * than 8K, but there are no targets that currently do this.
  672. * For now we fail until we find a vendor that needs it
  673. */
  674. if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
  675. iscsi_conn_printk(KERN_ERR, conn,
  676. "iscsi_tcp: received buffer of "
  677. "len %u but conn buffer is only %u "
  678. "(opcode %0x)\n",
  679. tcp_conn->in.datalen,
  680. ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
  681. rc = ISCSI_ERR_PROTO;
  682. break;
  683. }
  684. /* If there's data coming in with the response,
  685. * receive it to the connection's buffer.
  686. */
  687. if (tcp_conn->in.datalen) {
  688. iscsi_tcp_data_recv_prep(tcp_conn);
  689. return 0;
  690. }
  691. /* fall through */
  692. case ISCSI_OP_LOGOUT_RSP:
  693. case ISCSI_OP_NOOP_IN:
  694. case ISCSI_OP_SCSI_TMFUNC_RSP:
  695. rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
  696. break;
  697. default:
  698. rc = ISCSI_ERR_BAD_OPCODE;
  699. break;
  700. }
  701. if (rc == 0) {
  702. /* Anything that comes with data should have
  703. * been handled above. */
  704. if (tcp_conn->in.datalen)
  705. return ISCSI_ERR_PROTO;
  706. iscsi_tcp_hdr_recv_prep(tcp_conn);
  707. }
  708. return rc;
  709. }
  710. /**
  711. * iscsi_tcp_hdr_recv_done - process PDU header
  712. *
  713. * This is the callback invoked when the PDU header has
  714. * been received. If the header is followed by additional
  715. * header segments, we go back for more data.
  716. */
  717. static int
  718. iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
  719. struct iscsi_segment *segment)
  720. {
  721. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  722. struct iscsi_hdr *hdr;
  723. /* Check if there are additional header segments
  724. * *prior* to computing the digest, because we
  725. * may need to go back to the caller for more.
  726. */
  727. hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
  728. if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
  729. /* Bump the header length - the caller will
  730. * just loop around and get the AHS for us, and
  731. * call again. */
  732. unsigned int ahslen = hdr->hlength << 2;
  733. /* Make sure we don't overflow */
  734. if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
  735. return ISCSI_ERR_AHSLEN;
  736. segment->total_size += ahslen;
  737. segment->size += ahslen;
  738. return 0;
  739. }
  740. /* We're done processing the header. See if we're doing
  741. * header digests; if so, set up the recv_digest buffer
  742. * and go back for more. */
  743. if (conn->hdrdgst_en &&
  744. !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) {
  745. if (segment->digest_len == 0) {
  746. /*
  747. * Even if we offload the digest processing we
  748. * splice it in so we can increment the skb/segment
  749. * counters in preparation for the data segment.
  750. */
  751. iscsi_tcp_segment_splice_digest(segment,
  752. segment->recv_digest);
  753. return 0;
  754. }
  755. iscsi_tcp_dgst_header(tcp_conn->rx_hash, hdr,
  756. segment->total_copied - ISCSI_DIGEST_SIZE,
  757. segment->digest);
  758. if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
  759. return ISCSI_ERR_HDR_DGST;
  760. }
  761. tcp_conn->in.hdr = hdr;
  762. return iscsi_tcp_hdr_dissect(conn, hdr);
  763. }
  764. /**
  765. * iscsi_tcp_recv_segment_is_hdr - tests if we are reading in a header
  766. * @tcp_conn: iscsi tcp conn
  767. *
  768. * returns non zero if we are currently processing or setup to process
  769. * a header.
  770. */
  771. inline int iscsi_tcp_recv_segment_is_hdr(struct iscsi_tcp_conn *tcp_conn)
  772. {
  773. return tcp_conn->in.segment.done == iscsi_tcp_hdr_recv_done;
  774. }
  775. EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr);
  776. /**
  777. * iscsi_tcp_recv_skb - Process skb
  778. * @conn: iscsi connection
  779. * @skb: network buffer with header and/or data segment
  780. * @offset: offset in skb
  781. * @offload: bool indicating if transfer was offloaded
  782. *
  783. * Will return status of transfer in status. And will return
  784. * number of bytes copied.
  785. */
  786. int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
  787. unsigned int offset, bool offloaded, int *status)
  788. {
  789. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  790. struct iscsi_segment *segment = &tcp_conn->in.segment;
  791. struct skb_seq_state seq;
  792. unsigned int consumed = 0;
  793. int rc = 0;
  794. ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset);
  795. /*
  796. * Update for each skb instead of pdu, because over slow networks a
  797. * data_in's data could take a while to read in. We also want to
  798. * account for r2ts.
  799. */
  800. conn->last_recv = jiffies;
  801. if (unlikely(conn->suspend_rx)) {
  802. ISCSI_DBG_TCP(conn, "Rx suspended!\n");
  803. *status = ISCSI_TCP_SUSPENDED;
  804. return 0;
  805. }
  806. if (offloaded) {
  807. segment->total_copied = segment->total_size;
  808. goto segment_done;
  809. }
  810. skb_prepare_seq_read(skb, offset, skb->len, &seq);
  811. while (1) {
  812. unsigned int avail;
  813. const u8 *ptr;
  814. avail = skb_seq_read(consumed, &ptr, &seq);
  815. if (avail == 0) {
  816. ISCSI_DBG_TCP(conn, "no more data avail. Consumed %d\n",
  817. consumed);
  818. *status = ISCSI_TCP_SKB_DONE;
  819. goto skb_done;
  820. }
  821. BUG_ON(segment->copied >= segment->size);
  822. ISCSI_DBG_TCP(conn, "skb %p ptr=%p avail=%u\n", skb, ptr,
  823. avail);
  824. rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
  825. BUG_ON(rc == 0);
  826. consumed += rc;
  827. if (segment->total_copied >= segment->total_size) {
  828. skb_abort_seq_read(&seq);
  829. goto segment_done;
  830. }
  831. }
  832. segment_done:
  833. *status = ISCSI_TCP_SEGMENT_DONE;
  834. ISCSI_DBG_TCP(conn, "segment done\n");
  835. rc = segment->done(tcp_conn, segment);
  836. if (rc != 0) {
  837. *status = ISCSI_TCP_CONN_ERR;
  838. ISCSI_DBG_TCP(conn, "Error receiving PDU, errno=%d\n", rc);
  839. iscsi_conn_failure(conn, rc);
  840. return 0;
  841. }
  842. /* The done() functions sets up the next segment. */
  843. skb_done:
  844. conn->rxdata_octets += consumed;
  845. return consumed;
  846. }
  847. EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb);
  848. /**
  849. * iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
  850. * @conn: iscsi connection
  851. * @task: scsi command task
  852. * @sc: scsi command
  853. */
  854. int iscsi_tcp_task_init(struct iscsi_task *task)
  855. {
  856. struct iscsi_tcp_task *tcp_task = task->dd_data;
  857. struct iscsi_conn *conn = task->conn;
  858. struct scsi_cmnd *sc = task->sc;
  859. int err;
  860. if (!sc) {
  861. /*
  862. * mgmt tasks do not have a scatterlist since they come
  863. * in from the iscsi interface.
  864. */
  865. ISCSI_DBG_TCP(conn, "mtask deq [itt 0x%x]\n", task->itt);
  866. return conn->session->tt->init_pdu(task, 0, task->data_count);
  867. }
  868. BUG_ON(kfifo_len(&tcp_task->r2tqueue));
  869. tcp_task->exp_datasn = 0;
  870. /* Prepare PDU, optionally w/ immediate data */
  871. ISCSI_DBG_TCP(conn, "task deq [itt 0x%x imm %d unsol %d]\n",
  872. task->itt, task->imm_count, task->unsol_r2t.data_length);
  873. err = conn->session->tt->init_pdu(task, 0, task->imm_count);
  874. if (err)
  875. return err;
  876. task->imm_count = 0;
  877. return 0;
  878. }
  879. EXPORT_SYMBOL_GPL(iscsi_tcp_task_init);
  880. static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
  881. {
  882. struct iscsi_tcp_task *tcp_task = task->dd_data;
  883. struct iscsi_r2t_info *r2t = NULL;
  884. if (iscsi_task_has_unsol_data(task))
  885. r2t = &task->unsol_r2t;
  886. else {
  887. spin_lock_bh(&tcp_task->queue2pool);
  888. if (tcp_task->r2t) {
  889. r2t = tcp_task->r2t;
  890. /* Continue with this R2T? */
  891. if (r2t->data_length <= r2t->sent) {
  892. ISCSI_DBG_TCP(task->conn,
  893. " done with r2t %p\n", r2t);
  894. kfifo_in(&tcp_task->r2tpool.queue,
  895. (void *)&tcp_task->r2t,
  896. sizeof(void *));
  897. tcp_task->r2t = r2t = NULL;
  898. }
  899. }
  900. if (r2t == NULL) {
  901. if (kfifo_out(&tcp_task->r2tqueue,
  902. (void *)&tcp_task->r2t, sizeof(void *)) !=
  903. sizeof(void *))
  904. r2t = NULL;
  905. else
  906. r2t = tcp_task->r2t;
  907. }
  908. spin_unlock_bh(&tcp_task->queue2pool);
  909. }
  910. return r2t;
  911. }
  912. /**
  913. * iscsi_tcp_task_xmit - xmit normal PDU task
  914. * @task: iscsi command task
  915. *
  916. * We're expected to return 0 when everything was transmitted successfully,
  917. * -EAGAIN if there's still data in the queue, or != 0 for any other kind
  918. * of error.
  919. */
  920. int iscsi_tcp_task_xmit(struct iscsi_task *task)
  921. {
  922. struct iscsi_conn *conn = task->conn;
  923. struct iscsi_session *session = conn->session;
  924. struct iscsi_r2t_info *r2t;
  925. int rc = 0;
  926. flush:
  927. /* Flush any pending data first. */
  928. rc = session->tt->xmit_pdu(task);
  929. if (rc < 0)
  930. return rc;
  931. /* mgmt command */
  932. if (!task->sc) {
  933. if (task->hdr->itt == RESERVED_ITT)
  934. iscsi_put_task(task);
  935. return 0;
  936. }
  937. /* Are we done already? */
  938. if (task->sc->sc_data_direction != DMA_TO_DEVICE)
  939. return 0;
  940. r2t = iscsi_tcp_get_curr_r2t(task);
  941. if (r2t == NULL) {
  942. /* Waiting for more R2Ts to arrive. */
  943. ISCSI_DBG_TCP(conn, "no R2Ts yet\n");
  944. return 0;
  945. }
  946. rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_DATA_OUT);
  947. if (rc)
  948. return rc;
  949. iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr);
  950. ISCSI_DBG_TCP(conn, "sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
  951. r2t, r2t->datasn - 1, task->hdr->itt,
  952. r2t->data_offset + r2t->sent, r2t->data_count);
  953. rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent,
  954. r2t->data_count);
  955. if (rc) {
  956. iscsi_conn_failure(conn, ISCSI_ERR_XMIT_FAILED);
  957. return rc;
  958. }
  959. r2t->sent += r2t->data_count;
  960. goto flush;
  961. }
  962. EXPORT_SYMBOL_GPL(iscsi_tcp_task_xmit);
  963. struct iscsi_cls_conn *
  964. iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
  965. uint32_t conn_idx)
  966. {
  967. struct iscsi_conn *conn;
  968. struct iscsi_cls_conn *cls_conn;
  969. struct iscsi_tcp_conn *tcp_conn;
  970. cls_conn = iscsi_conn_setup(cls_session,
  971. sizeof(*tcp_conn) + dd_data_size, conn_idx);
  972. if (!cls_conn)
  973. return NULL;
  974. conn = cls_conn->dd_data;
  975. /*
  976. * due to strange issues with iser these are not set
  977. * in iscsi_conn_setup
  978. */
  979. conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
  980. tcp_conn = conn->dd_data;
  981. tcp_conn->iscsi_conn = conn;
  982. tcp_conn->dd_data = conn->dd_data + sizeof(*tcp_conn);
  983. return cls_conn;
  984. }
  985. EXPORT_SYMBOL_GPL(iscsi_tcp_conn_setup);
  986. void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn)
  987. {
  988. iscsi_conn_teardown(cls_conn);
  989. }
  990. EXPORT_SYMBOL_GPL(iscsi_tcp_conn_teardown);
  991. int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)
  992. {
  993. int i;
  994. int cmd_i;
  995. /*
  996. * initialize per-task: R2T pool and xmit queue
  997. */
  998. for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
  999. struct iscsi_task *task = session->cmds[cmd_i];
  1000. struct iscsi_tcp_task *tcp_task = task->dd_data;
  1001. /*
  1002. * pre-allocated x2 as much r2ts to handle race when
  1003. * target acks DataOut faster than we data_xmit() queues
  1004. * could replenish r2tqueue.
  1005. */
  1006. /* R2T pool */
  1007. if (iscsi_pool_init(&tcp_task->r2tpool,
  1008. session->max_r2t * 2, NULL,
  1009. sizeof(struct iscsi_r2t_info))) {
  1010. goto r2t_alloc_fail;
  1011. }
  1012. /* R2T xmit queue */
  1013. if (kfifo_alloc(&tcp_task->r2tqueue,
  1014. session->max_r2t * 4 * sizeof(void*), GFP_KERNEL)) {
  1015. iscsi_pool_free(&tcp_task->r2tpool);
  1016. goto r2t_alloc_fail;
  1017. }
  1018. spin_lock_init(&tcp_task->pool2queue);
  1019. spin_lock_init(&tcp_task->queue2pool);
  1020. }
  1021. return 0;
  1022. r2t_alloc_fail:
  1023. for (i = 0; i < cmd_i; i++) {
  1024. struct iscsi_task *task = session->cmds[i];
  1025. struct iscsi_tcp_task *tcp_task = task->dd_data;
  1026. kfifo_free(&tcp_task->r2tqueue);
  1027. iscsi_pool_free(&tcp_task->r2tpool);
  1028. }
  1029. return -ENOMEM;
  1030. }
  1031. EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_alloc);
  1032. void iscsi_tcp_r2tpool_free(struct iscsi_session *session)
  1033. {
  1034. int i;
  1035. for (i = 0; i < session->cmds_max; i++) {
  1036. struct iscsi_task *task = session->cmds[i];
  1037. struct iscsi_tcp_task *tcp_task = task->dd_data;
  1038. kfifo_free(&tcp_task->r2tqueue);
  1039. iscsi_pool_free(&tcp_task->r2tpool);
  1040. }
  1041. }
  1042. EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_free);
  1043. int iscsi_tcp_set_max_r2t(struct iscsi_conn *conn, char *buf)
  1044. {
  1045. struct iscsi_session *session = conn->session;
  1046. unsigned short r2ts = 0;
  1047. sscanf(buf, "%hu", &r2ts);
  1048. if (session->max_r2t == r2ts)
  1049. return 0;
  1050. if (!r2ts || !is_power_of_2(r2ts))
  1051. return -EINVAL;
  1052. session->max_r2t = r2ts;
  1053. iscsi_tcp_r2tpool_free(session);
  1054. return iscsi_tcp_r2tpool_alloc(session);
  1055. }
  1056. EXPORT_SYMBOL_GPL(iscsi_tcp_set_max_r2t);
  1057. void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
  1058. struct iscsi_stats *stats)
  1059. {
  1060. struct iscsi_conn *conn = cls_conn->dd_data;
  1061. stats->txdata_octets = conn->txdata_octets;
  1062. stats->rxdata_octets = conn->rxdata_octets;
  1063. stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
  1064. stats->dataout_pdus = conn->dataout_pdus_cnt;
  1065. stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
  1066. stats->datain_pdus = conn->datain_pdus_cnt;
  1067. stats->r2t_pdus = conn->r2t_pdus_cnt;
  1068. stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
  1069. stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
  1070. }
  1071. EXPORT_SYMBOL_GPL(iscsi_tcp_conn_get_stats);