rc.c 69 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624
  1. /*
  2. * Copyright(c) 2015, 2016 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #include <linux/io.h>
  48. #include <rdma/rdma_vt.h>
  49. #include <rdma/rdmavt_qp.h>
  50. #include "hfi.h"
  51. #include "qp.h"
  52. #include "verbs_txreq.h"
  53. #include "trace.h"
  54. /* cut down ridiculously long IB macro names */
  55. #define OP(x) RC_OP(x)
  56. /**
  57. * hfi1_add_retry_timer - add/start a retry timer
  58. * @qp - the QP
  59. *
  60. * add a retry timer on the QP
  61. */
  62. static inline void hfi1_add_retry_timer(struct rvt_qp *qp)
  63. {
  64. struct ib_qp *ibqp = &qp->ibqp;
  65. struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
  66. lockdep_assert_held(&qp->s_lock);
  67. qp->s_flags |= RVT_S_TIMER;
  68. /* 4.096 usec. * (1 << qp->timeout) */
  69. qp->s_timer.expires = jiffies + qp->timeout_jiffies +
  70. rdi->busy_jiffies;
  71. add_timer(&qp->s_timer);
  72. }
  73. /**
  74. * hfi1_add_rnr_timer - add/start an rnr timer
  75. * @qp - the QP
  76. * @to - timeout in usecs
  77. *
  78. * add an rnr timer on the QP
  79. */
  80. void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
  81. {
  82. struct hfi1_qp_priv *priv = qp->priv;
  83. lockdep_assert_held(&qp->s_lock);
  84. qp->s_flags |= RVT_S_WAIT_RNR;
  85. priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to);
  86. add_timer(&priv->s_rnr_timer);
  87. }
  88. /**
  89. * hfi1_mod_retry_timer - mod a retry timer
  90. * @qp - the QP
  91. *
  92. * Modify a potentially already running retry
  93. * timer
  94. */
  95. static inline void hfi1_mod_retry_timer(struct rvt_qp *qp)
  96. {
  97. struct ib_qp *ibqp = &qp->ibqp;
  98. struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
  99. lockdep_assert_held(&qp->s_lock);
  100. qp->s_flags |= RVT_S_TIMER;
  101. /* 4.096 usec. * (1 << qp->timeout) */
  102. mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies +
  103. rdi->busy_jiffies);
  104. }
  105. /**
  106. * hfi1_stop_retry_timer - stop a retry timer
  107. * @qp - the QP
  108. *
  109. * stop a retry timer and return if the timer
  110. * had been pending.
  111. */
  112. static inline int hfi1_stop_retry_timer(struct rvt_qp *qp)
  113. {
  114. int rval = 0;
  115. lockdep_assert_held(&qp->s_lock);
  116. /* Remove QP from retry */
  117. if (qp->s_flags & RVT_S_TIMER) {
  118. qp->s_flags &= ~RVT_S_TIMER;
  119. rval = del_timer(&qp->s_timer);
  120. }
  121. return rval;
  122. }
  123. /**
  124. * hfi1_stop_rc_timers - stop all timers
  125. * @qp - the QP
  126. *
  127. * stop any pending timers
  128. */
  129. void hfi1_stop_rc_timers(struct rvt_qp *qp)
  130. {
  131. struct hfi1_qp_priv *priv = qp->priv;
  132. lockdep_assert_held(&qp->s_lock);
  133. /* Remove QP from all timers */
  134. if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
  135. qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
  136. del_timer(&qp->s_timer);
  137. del_timer(&priv->s_rnr_timer);
  138. }
  139. }
  140. /**
  141. * hfi1_stop_rnr_timer - stop an rnr timer
  142. * @qp - the QP
  143. *
  144. * stop an rnr timer and return if the timer
  145. * had been pending.
  146. */
  147. static inline int hfi1_stop_rnr_timer(struct rvt_qp *qp)
  148. {
  149. int rval = 0;
  150. struct hfi1_qp_priv *priv = qp->priv;
  151. lockdep_assert_held(&qp->s_lock);
  152. /* Remove QP from rnr timer */
  153. if (qp->s_flags & RVT_S_WAIT_RNR) {
  154. qp->s_flags &= ~RVT_S_WAIT_RNR;
  155. rval = del_timer(&priv->s_rnr_timer);
  156. }
  157. return rval;
  158. }
  159. /**
  160. * hfi1_del_timers_sync - wait for any timeout routines to exit
  161. * @qp - the QP
  162. */
  163. void hfi1_del_timers_sync(struct rvt_qp *qp)
  164. {
  165. struct hfi1_qp_priv *priv = qp->priv;
  166. del_timer_sync(&qp->s_timer);
  167. del_timer_sync(&priv->s_rnr_timer);
  168. }
  169. static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
  170. u32 psn, u32 pmtu)
  171. {
  172. u32 len;
  173. len = delta_psn(psn, wqe->psn) * pmtu;
  174. ss->sge = wqe->sg_list[0];
  175. ss->sg_list = wqe->sg_list + 1;
  176. ss->num_sge = wqe->wr.num_sge;
  177. ss->total_len = wqe->length;
  178. hfi1_skip_sge(ss, len, 0);
  179. return wqe->length - len;
  180. }
  181. /**
  182. * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
  183. * @dev: the device for this QP
  184. * @qp: a pointer to the QP
  185. * @ohdr: a pointer to the IB header being constructed
  186. * @ps: the xmit packet state
  187. *
  188. * Return 1 if constructed; otherwise, return 0.
  189. * Note that we are in the responder's side of the QP context.
  190. * Note the QP s_lock must be held.
  191. */
  192. static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
  193. struct ib_other_headers *ohdr,
  194. struct hfi1_pkt_state *ps)
  195. {
  196. struct rvt_ack_entry *e;
  197. u32 hwords;
  198. u32 len;
  199. u32 bth0;
  200. u32 bth2;
  201. int middle = 0;
  202. u32 pmtu = qp->pmtu;
  203. struct hfi1_qp_priv *priv = qp->priv;
  204. lockdep_assert_held(&qp->s_lock);
  205. /* Don't send an ACK if we aren't supposed to. */
  206. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
  207. goto bail;
  208. /* header size in 32-bit words LRH+BTH = (8+12)/4. */
  209. hwords = 5;
  210. switch (qp->s_ack_state) {
  211. case OP(RDMA_READ_RESPONSE_LAST):
  212. case OP(RDMA_READ_RESPONSE_ONLY):
  213. e = &qp->s_ack_queue[qp->s_tail_ack_queue];
  214. if (e->rdma_sge.mr) {
  215. rvt_put_mr(e->rdma_sge.mr);
  216. e->rdma_sge.mr = NULL;
  217. }
  218. /* FALLTHROUGH */
  219. case OP(ATOMIC_ACKNOWLEDGE):
  220. /*
  221. * We can increment the tail pointer now that the last
  222. * response has been sent instead of only being
  223. * constructed.
  224. */
  225. if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC)
  226. qp->s_tail_ack_queue = 0;
  227. /* FALLTHROUGH */
  228. case OP(SEND_ONLY):
  229. case OP(ACKNOWLEDGE):
  230. /* Check for no next entry in the queue. */
  231. if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
  232. if (qp->s_flags & RVT_S_ACK_PENDING)
  233. goto normal;
  234. goto bail;
  235. }
  236. e = &qp->s_ack_queue[qp->s_tail_ack_queue];
  237. if (e->opcode == OP(RDMA_READ_REQUEST)) {
  238. /*
  239. * If a RDMA read response is being resent and
  240. * we haven't seen the duplicate request yet,
  241. * then stop sending the remaining responses the
  242. * responder has seen until the requester re-sends it.
  243. */
  244. len = e->rdma_sge.sge_length;
  245. if (len && !e->rdma_sge.mr) {
  246. qp->s_tail_ack_queue = qp->r_head_ack_queue;
  247. goto bail;
  248. }
  249. /* Copy SGE state in case we need to resend */
  250. ps->s_txreq->mr = e->rdma_sge.mr;
  251. if (ps->s_txreq->mr)
  252. rvt_get_mr(ps->s_txreq->mr);
  253. qp->s_ack_rdma_sge.sge = e->rdma_sge;
  254. qp->s_ack_rdma_sge.num_sge = 1;
  255. qp->s_cur_sge = &qp->s_ack_rdma_sge;
  256. if (len > pmtu) {
  257. len = pmtu;
  258. qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
  259. } else {
  260. qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
  261. e->sent = 1;
  262. }
  263. ohdr->u.aeth = hfi1_compute_aeth(qp);
  264. hwords++;
  265. qp->s_ack_rdma_psn = e->psn;
  266. bth2 = mask_psn(qp->s_ack_rdma_psn++);
  267. } else {
  268. /* COMPARE_SWAP or FETCH_ADD */
  269. qp->s_cur_sge = NULL;
  270. len = 0;
  271. qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
  272. ohdr->u.at.aeth = hfi1_compute_aeth(qp);
  273. ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
  274. hwords += sizeof(ohdr->u.at) / sizeof(u32);
  275. bth2 = mask_psn(e->psn);
  276. e->sent = 1;
  277. }
  278. bth0 = qp->s_ack_state << 24;
  279. break;
  280. case OP(RDMA_READ_RESPONSE_FIRST):
  281. qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
  282. /* FALLTHROUGH */
  283. case OP(RDMA_READ_RESPONSE_MIDDLE):
  284. qp->s_cur_sge = &qp->s_ack_rdma_sge;
  285. ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
  286. if (ps->s_txreq->mr)
  287. rvt_get_mr(ps->s_txreq->mr);
  288. len = qp->s_ack_rdma_sge.sge.sge_length;
  289. if (len > pmtu) {
  290. len = pmtu;
  291. middle = HFI1_CAP_IS_KSET(SDMA_AHG);
  292. } else {
  293. ohdr->u.aeth = hfi1_compute_aeth(qp);
  294. hwords++;
  295. qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
  296. e = &qp->s_ack_queue[qp->s_tail_ack_queue];
  297. e->sent = 1;
  298. }
  299. bth0 = qp->s_ack_state << 24;
  300. bth2 = mask_psn(qp->s_ack_rdma_psn++);
  301. break;
  302. default:
  303. normal:
  304. /*
  305. * Send a regular ACK.
  306. * Set the s_ack_state so we wait until after sending
  307. * the ACK before setting s_ack_state to ACKNOWLEDGE
  308. * (see above).
  309. */
  310. qp->s_ack_state = OP(SEND_ONLY);
  311. qp->s_flags &= ~RVT_S_ACK_PENDING;
  312. qp->s_cur_sge = NULL;
  313. if (qp->s_nak_state)
  314. ohdr->u.aeth =
  315. cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
  316. (qp->s_nak_state <<
  317. HFI1_AETH_CREDIT_SHIFT));
  318. else
  319. ohdr->u.aeth = hfi1_compute_aeth(qp);
  320. hwords++;
  321. len = 0;
  322. bth0 = OP(ACKNOWLEDGE) << 24;
  323. bth2 = mask_psn(qp->s_ack_psn);
  324. }
  325. qp->s_rdma_ack_cnt++;
  326. qp->s_hdrwords = hwords;
  327. ps->s_txreq->sde = priv->s_sde;
  328. qp->s_cur_size = len;
  329. hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps);
  330. /* pbc */
  331. ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
  332. return 1;
  333. bail:
  334. qp->s_ack_state = OP(ACKNOWLEDGE);
  335. /*
  336. * Ensure s_rdma_ack_cnt changes are committed prior to resetting
  337. * RVT_S_RESP_PENDING
  338. */
  339. smp_wmb();
  340. qp->s_flags &= ~(RVT_S_RESP_PENDING
  341. | RVT_S_ACK_PENDING
  342. | RVT_S_AHG_VALID);
  343. return 0;
  344. }
  345. /**
  346. * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
  347. * @qp: a pointer to the QP
  348. *
  349. * Assumes s_lock is held.
  350. *
  351. * Return 1 if constructed; otherwise, return 0.
  352. */
  353. int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
  354. {
  355. struct hfi1_qp_priv *priv = qp->priv;
  356. struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
  357. struct ib_other_headers *ohdr;
  358. struct rvt_sge_state *ss;
  359. struct rvt_swqe *wqe;
  360. /* header size in 32-bit words LRH+BTH = (8+12)/4. */
  361. u32 hwords = 5;
  362. u32 len;
  363. u32 bth0 = 0;
  364. u32 bth2;
  365. u32 pmtu = qp->pmtu;
  366. char newreq;
  367. int middle = 0;
  368. int delta;
  369. lockdep_assert_held(&qp->s_lock);
  370. ps->s_txreq = get_txreq(ps->dev, qp);
  371. if (IS_ERR(ps->s_txreq))
  372. goto bail_no_tx;
  373. ohdr = &ps->s_txreq->phdr.hdr.u.oth;
  374. if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
  375. ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
  376. /* Sending responses has higher priority over sending requests. */
  377. if ((qp->s_flags & RVT_S_RESP_PENDING) &&
  378. make_rc_ack(dev, qp, ohdr, ps))
  379. return 1;
  380. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
  381. if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
  382. goto bail;
  383. /* We are in the error state, flush the work request. */
  384. smp_read_barrier_depends(); /* see post_one_send() */
  385. if (qp->s_last == ACCESS_ONCE(qp->s_head))
  386. goto bail;
  387. /* If DMAs are in progress, we can't flush immediately. */
  388. if (iowait_sdma_pending(&priv->s_iowait)) {
  389. qp->s_flags |= RVT_S_WAIT_DMA;
  390. goto bail;
  391. }
  392. clear_ahg(qp);
  393. wqe = rvt_get_swqe_ptr(qp, qp->s_last);
  394. hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
  395. IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
  396. /* will get called again */
  397. goto done_free_tx;
  398. }
  399. if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
  400. goto bail;
  401. if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
  402. if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
  403. qp->s_flags |= RVT_S_WAIT_PSN;
  404. goto bail;
  405. }
  406. qp->s_sending_psn = qp->s_psn;
  407. qp->s_sending_hpsn = qp->s_psn - 1;
  408. }
  409. /* Send a request. */
  410. wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
  411. switch (qp->s_state) {
  412. default:
  413. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
  414. goto bail;
  415. /*
  416. * Resend an old request or start a new one.
  417. *
  418. * We keep track of the current SWQE so that
  419. * we don't reset the "furthest progress" state
  420. * if we need to back up.
  421. */
  422. newreq = 0;
  423. if (qp->s_cur == qp->s_tail) {
  424. /* Check if send work queue is empty. */
  425. if (qp->s_tail == qp->s_head) {
  426. clear_ahg(qp);
  427. goto bail;
  428. }
  429. /*
  430. * If a fence is requested, wait for previous
  431. * RDMA read and atomic operations to finish.
  432. */
  433. if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
  434. qp->s_num_rd_atomic) {
  435. qp->s_flags |= RVT_S_WAIT_FENCE;
  436. goto bail;
  437. }
  438. /*
  439. * Local operations are processed immediately
  440. * after all prior requests have completed
  441. */
  442. if (wqe->wr.opcode == IB_WR_REG_MR ||
  443. wqe->wr.opcode == IB_WR_LOCAL_INV) {
  444. int local_ops = 0;
  445. int err = 0;
  446. if (qp->s_last != qp->s_cur)
  447. goto bail;
  448. if (++qp->s_cur == qp->s_size)
  449. qp->s_cur = 0;
  450. if (++qp->s_tail == qp->s_size)
  451. qp->s_tail = 0;
  452. if (!(wqe->wr.send_flags &
  453. RVT_SEND_COMPLETION_ONLY)) {
  454. err = rvt_invalidate_rkey(
  455. qp,
  456. wqe->wr.ex.invalidate_rkey);
  457. local_ops = 1;
  458. }
  459. hfi1_send_complete(qp, wqe,
  460. err ? IB_WC_LOC_PROT_ERR
  461. : IB_WC_SUCCESS);
  462. if (local_ops)
  463. atomic_dec(&qp->local_ops_pending);
  464. qp->s_hdrwords = 0;
  465. goto done_free_tx;
  466. }
  467. newreq = 1;
  468. qp->s_psn = wqe->psn;
  469. }
  470. /*
  471. * Note that we have to be careful not to modify the
  472. * original work request since we may need to resend
  473. * it.
  474. */
  475. len = wqe->length;
  476. ss = &qp->s_sge;
  477. bth2 = mask_psn(qp->s_psn);
  478. switch (wqe->wr.opcode) {
  479. case IB_WR_SEND:
  480. case IB_WR_SEND_WITH_IMM:
  481. case IB_WR_SEND_WITH_INV:
  482. /* If no credit, return. */
  483. if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
  484. cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
  485. qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
  486. goto bail;
  487. }
  488. if (len > pmtu) {
  489. qp->s_state = OP(SEND_FIRST);
  490. len = pmtu;
  491. break;
  492. }
  493. if (wqe->wr.opcode == IB_WR_SEND) {
  494. qp->s_state = OP(SEND_ONLY);
  495. } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
  496. qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
  497. /* Immediate data comes after the BTH */
  498. ohdr->u.imm_data = wqe->wr.ex.imm_data;
  499. hwords += 1;
  500. } else {
  501. qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
  502. /* Invalidate rkey comes after the BTH */
  503. ohdr->u.ieth = cpu_to_be32(
  504. wqe->wr.ex.invalidate_rkey);
  505. hwords += 1;
  506. }
  507. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  508. bth0 |= IB_BTH_SOLICITED;
  509. bth2 |= IB_BTH_REQ_ACK;
  510. if (++qp->s_cur == qp->s_size)
  511. qp->s_cur = 0;
  512. break;
  513. case IB_WR_RDMA_WRITE:
  514. if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
  515. qp->s_lsn++;
  516. goto no_flow_control;
  517. case IB_WR_RDMA_WRITE_WITH_IMM:
  518. /* If no credit, return. */
  519. if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
  520. cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
  521. qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
  522. goto bail;
  523. }
  524. no_flow_control:
  525. put_ib_reth_vaddr(
  526. wqe->rdma_wr.remote_addr,
  527. &ohdr->u.rc.reth);
  528. ohdr->u.rc.reth.rkey =
  529. cpu_to_be32(wqe->rdma_wr.rkey);
  530. ohdr->u.rc.reth.length = cpu_to_be32(len);
  531. hwords += sizeof(struct ib_reth) / sizeof(u32);
  532. if (len > pmtu) {
  533. qp->s_state = OP(RDMA_WRITE_FIRST);
  534. len = pmtu;
  535. break;
  536. }
  537. if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
  538. qp->s_state = OP(RDMA_WRITE_ONLY);
  539. } else {
  540. qp->s_state =
  541. OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
  542. /* Immediate data comes after RETH */
  543. ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
  544. hwords += 1;
  545. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  546. bth0 |= IB_BTH_SOLICITED;
  547. }
  548. bth2 |= IB_BTH_REQ_ACK;
  549. if (++qp->s_cur == qp->s_size)
  550. qp->s_cur = 0;
  551. break;
  552. case IB_WR_RDMA_READ:
  553. /*
  554. * Don't allow more operations to be started
  555. * than the QP limits allow.
  556. */
  557. if (newreq) {
  558. if (qp->s_num_rd_atomic >=
  559. qp->s_max_rd_atomic) {
  560. qp->s_flags |= RVT_S_WAIT_RDMAR;
  561. goto bail;
  562. }
  563. qp->s_num_rd_atomic++;
  564. if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
  565. qp->s_lsn++;
  566. }
  567. put_ib_reth_vaddr(
  568. wqe->rdma_wr.remote_addr,
  569. &ohdr->u.rc.reth);
  570. ohdr->u.rc.reth.rkey =
  571. cpu_to_be32(wqe->rdma_wr.rkey);
  572. ohdr->u.rc.reth.length = cpu_to_be32(len);
  573. qp->s_state = OP(RDMA_READ_REQUEST);
  574. hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
  575. ss = NULL;
  576. len = 0;
  577. bth2 |= IB_BTH_REQ_ACK;
  578. if (++qp->s_cur == qp->s_size)
  579. qp->s_cur = 0;
  580. break;
  581. case IB_WR_ATOMIC_CMP_AND_SWP:
  582. case IB_WR_ATOMIC_FETCH_AND_ADD:
  583. /*
  584. * Don't allow more operations to be started
  585. * than the QP limits allow.
  586. */
  587. if (newreq) {
  588. if (qp->s_num_rd_atomic >=
  589. qp->s_max_rd_atomic) {
  590. qp->s_flags |= RVT_S_WAIT_RDMAR;
  591. goto bail;
  592. }
  593. qp->s_num_rd_atomic++;
  594. if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
  595. qp->s_lsn++;
  596. }
  597. if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
  598. qp->s_state = OP(COMPARE_SWAP);
  599. put_ib_ateth_swap(wqe->atomic_wr.swap,
  600. &ohdr->u.atomic_eth);
  601. put_ib_ateth_compare(wqe->atomic_wr.compare_add,
  602. &ohdr->u.atomic_eth);
  603. } else {
  604. qp->s_state = OP(FETCH_ADD);
  605. put_ib_ateth_swap(wqe->atomic_wr.compare_add,
  606. &ohdr->u.atomic_eth);
  607. put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
  608. }
  609. put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
  610. &ohdr->u.atomic_eth);
  611. ohdr->u.atomic_eth.rkey = cpu_to_be32(
  612. wqe->atomic_wr.rkey);
  613. hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
  614. ss = NULL;
  615. len = 0;
  616. bth2 |= IB_BTH_REQ_ACK;
  617. if (++qp->s_cur == qp->s_size)
  618. qp->s_cur = 0;
  619. break;
  620. default:
  621. goto bail;
  622. }
  623. qp->s_sge.sge = wqe->sg_list[0];
  624. qp->s_sge.sg_list = wqe->sg_list + 1;
  625. qp->s_sge.num_sge = wqe->wr.num_sge;
  626. qp->s_sge.total_len = wqe->length;
  627. qp->s_len = wqe->length;
  628. if (newreq) {
  629. qp->s_tail++;
  630. if (qp->s_tail >= qp->s_size)
  631. qp->s_tail = 0;
  632. }
  633. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  634. qp->s_psn = wqe->lpsn + 1;
  635. else
  636. qp->s_psn++;
  637. break;
  638. case OP(RDMA_READ_RESPONSE_FIRST):
  639. /*
  640. * qp->s_state is normally set to the opcode of the
  641. * last packet constructed for new requests and therefore
  642. * is never set to RDMA read response.
  643. * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
  644. * thread to indicate a SEND needs to be restarted from an
  645. * earlier PSN without interfering with the sending thread.
  646. * See restart_rc().
  647. */
  648. qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
  649. /* FALLTHROUGH */
  650. case OP(SEND_FIRST):
  651. qp->s_state = OP(SEND_MIDDLE);
  652. /* FALLTHROUGH */
  653. case OP(SEND_MIDDLE):
  654. bth2 = mask_psn(qp->s_psn++);
  655. ss = &qp->s_sge;
  656. len = qp->s_len;
  657. if (len > pmtu) {
  658. len = pmtu;
  659. middle = HFI1_CAP_IS_KSET(SDMA_AHG);
  660. break;
  661. }
  662. if (wqe->wr.opcode == IB_WR_SEND) {
  663. qp->s_state = OP(SEND_LAST);
  664. } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
  665. qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
  666. /* Immediate data comes after the BTH */
  667. ohdr->u.imm_data = wqe->wr.ex.imm_data;
  668. hwords += 1;
  669. } else {
  670. qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
  671. /* invalidate data comes after the BTH */
  672. ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
  673. hwords += 1;
  674. }
  675. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  676. bth0 |= IB_BTH_SOLICITED;
  677. bth2 |= IB_BTH_REQ_ACK;
  678. qp->s_cur++;
  679. if (qp->s_cur >= qp->s_size)
  680. qp->s_cur = 0;
  681. break;
  682. case OP(RDMA_READ_RESPONSE_LAST):
  683. /*
  684. * qp->s_state is normally set to the opcode of the
  685. * last packet constructed for new requests and therefore
  686. * is never set to RDMA read response.
  687. * RDMA_READ_RESPONSE_LAST is used by the ACK processing
  688. * thread to indicate a RDMA write needs to be restarted from
  689. * an earlier PSN without interfering with the sending thread.
  690. * See restart_rc().
  691. */
  692. qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
  693. /* FALLTHROUGH */
  694. case OP(RDMA_WRITE_FIRST):
  695. qp->s_state = OP(RDMA_WRITE_MIDDLE);
  696. /* FALLTHROUGH */
  697. case OP(RDMA_WRITE_MIDDLE):
  698. bth2 = mask_psn(qp->s_psn++);
  699. ss = &qp->s_sge;
  700. len = qp->s_len;
  701. if (len > pmtu) {
  702. len = pmtu;
  703. middle = HFI1_CAP_IS_KSET(SDMA_AHG);
  704. break;
  705. }
  706. if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
  707. qp->s_state = OP(RDMA_WRITE_LAST);
  708. } else {
  709. qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
  710. /* Immediate data comes after the BTH */
  711. ohdr->u.imm_data = wqe->wr.ex.imm_data;
  712. hwords += 1;
  713. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  714. bth0 |= IB_BTH_SOLICITED;
  715. }
  716. bth2 |= IB_BTH_REQ_ACK;
  717. qp->s_cur++;
  718. if (qp->s_cur >= qp->s_size)
  719. qp->s_cur = 0;
  720. break;
  721. case OP(RDMA_READ_RESPONSE_MIDDLE):
  722. /*
  723. * qp->s_state is normally set to the opcode of the
  724. * last packet constructed for new requests and therefore
  725. * is never set to RDMA read response.
  726. * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
  727. * thread to indicate a RDMA read needs to be restarted from
  728. * an earlier PSN without interfering with the sending thread.
  729. * See restart_rc().
  730. */
  731. len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
  732. put_ib_reth_vaddr(
  733. wqe->rdma_wr.remote_addr + len,
  734. &ohdr->u.rc.reth);
  735. ohdr->u.rc.reth.rkey =
  736. cpu_to_be32(wqe->rdma_wr.rkey);
  737. ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
  738. qp->s_state = OP(RDMA_READ_REQUEST);
  739. hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
  740. bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
  741. qp->s_psn = wqe->lpsn + 1;
  742. ss = NULL;
  743. len = 0;
  744. qp->s_cur++;
  745. if (qp->s_cur == qp->s_size)
  746. qp->s_cur = 0;
  747. break;
  748. }
  749. qp->s_sending_hpsn = bth2;
  750. delta = delta_psn(bth2, wqe->psn);
  751. if (delta && delta % HFI1_PSN_CREDIT == 0)
  752. bth2 |= IB_BTH_REQ_ACK;
  753. if (qp->s_flags & RVT_S_SEND_ONE) {
  754. qp->s_flags &= ~RVT_S_SEND_ONE;
  755. qp->s_flags |= RVT_S_WAIT_ACK;
  756. bth2 |= IB_BTH_REQ_ACK;
  757. }
  758. qp->s_len -= len;
  759. qp->s_hdrwords = hwords;
  760. ps->s_txreq->sde = priv->s_sde;
  761. qp->s_cur_sge = ss;
  762. qp->s_cur_size = len;
  763. hfi1_make_ruc_header(
  764. qp,
  765. ohdr,
  766. bth0 | (qp->s_state << 24),
  767. bth2,
  768. middle,
  769. ps);
  770. /* pbc */
  771. ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
  772. return 1;
  773. done_free_tx:
  774. hfi1_put_txreq(ps->s_txreq);
  775. ps->s_txreq = NULL;
  776. return 1;
  777. bail:
  778. hfi1_put_txreq(ps->s_txreq);
  779. bail_no_tx:
  780. ps->s_txreq = NULL;
  781. qp->s_flags &= ~RVT_S_BUSY;
  782. qp->s_hdrwords = 0;
  783. return 0;
  784. }
  785. /**
  786. * hfi1_send_rc_ack - Construct an ACK packet and send it
  787. * @qp: a pointer to the QP
  788. *
  789. * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
  790. * Note that RDMA reads and atomics are handled in the
  791. * send side QP state and send engine.
  792. */
  793. void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
  794. int is_fecn)
  795. {
  796. struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
  797. struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
  798. u64 pbc, pbc_flags = 0;
  799. u16 lrh0;
  800. u16 sc5;
  801. u32 bth0;
  802. u32 hwords;
  803. u32 vl, plen;
  804. struct send_context *sc;
  805. struct pio_buf *pbuf;
  806. struct ib_header hdr;
  807. struct ib_other_headers *ohdr;
  808. unsigned long flags;
  809. /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
  810. if (qp->s_flags & RVT_S_RESP_PENDING)
  811. goto queue_ack;
  812. /* Ensure s_rdma_ack_cnt changes are committed */
  813. smp_read_barrier_depends();
  814. if (qp->s_rdma_ack_cnt)
  815. goto queue_ack;
  816. /* Construct the header */
  817. /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
  818. hwords = 6;
  819. if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
  820. hwords += hfi1_make_grh(ibp, &hdr.u.l.grh,
  821. &qp->remote_ah_attr.grh, hwords, 0);
  822. ohdr = &hdr.u.l.oth;
  823. lrh0 = HFI1_LRH_GRH;
  824. } else {
  825. ohdr = &hdr.u.oth;
  826. lrh0 = HFI1_LRH_BTH;
  827. }
  828. /* read pkey_index w/o lock (its atomic) */
  829. bth0 = hfi1_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
  830. if (qp->s_mig_state == IB_MIG_MIGRATED)
  831. bth0 |= IB_BTH_MIG_REQ;
  832. if (qp->r_nak_state)
  833. ohdr->u.aeth = cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
  834. (qp->r_nak_state <<
  835. HFI1_AETH_CREDIT_SHIFT));
  836. else
  837. ohdr->u.aeth = hfi1_compute_aeth(qp);
  838. sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
  839. /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
  840. pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
  841. lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
  842. hdr.lrh[0] = cpu_to_be16(lrh0);
  843. hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
  844. hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
  845. hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
  846. ohdr->bth[0] = cpu_to_be32(bth0);
  847. ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
  848. ohdr->bth[1] |= cpu_to_be32((!!is_fecn) << HFI1_BECN_SHIFT);
  849. ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
  850. /* Don't try to send ACKs if the link isn't ACTIVE */
  851. if (driver_lstate(ppd) != IB_PORT_ACTIVE)
  852. return;
  853. sc = rcd->sc;
  854. plen = 2 /* PBC */ + hwords;
  855. vl = sc_to_vlt(ppd->dd, sc5);
  856. pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
  857. pbuf = sc_buffer_alloc(sc, plen, NULL, NULL);
  858. if (!pbuf) {
  859. /*
  860. * We have no room to send at the moment. Pass
  861. * responsibility for sending the ACK to the send engine
  862. * so that when enough buffer space becomes available,
  863. * the ACK is sent ahead of other outgoing packets.
  864. */
  865. goto queue_ack;
  866. }
  867. trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr);
  868. /* write the pbc and data */
  869. ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords);
  870. return;
  871. queue_ack:
  872. spin_lock_irqsave(&qp->s_lock, flags);
  873. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
  874. goto unlock;
  875. this_cpu_inc(*ibp->rvp.rc_qacks);
  876. qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
  877. qp->s_nak_state = qp->r_nak_state;
  878. qp->s_ack_psn = qp->r_ack_psn;
  879. if (is_fecn)
  880. qp->s_flags |= RVT_S_ECN;
  881. /* Schedule the send engine. */
  882. hfi1_schedule_send(qp);
  883. unlock:
  884. spin_unlock_irqrestore(&qp->s_lock, flags);
  885. }
  886. /**
  887. * reset_psn - reset the QP state to send starting from PSN
  888. * @qp: the QP
  889. * @psn: the packet sequence number to restart at
  890. *
  891. * This is called from hfi1_rc_rcv() to process an incoming RC ACK
  892. * for the given QP.
  893. * Called at interrupt level with the QP s_lock held.
  894. */
  895. static void reset_psn(struct rvt_qp *qp, u32 psn)
  896. {
  897. u32 n = qp->s_acked;
  898. struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
  899. u32 opcode;
  900. lockdep_assert_held(&qp->s_lock);
  901. qp->s_cur = n;
  902. /*
  903. * If we are starting the request from the beginning,
  904. * let the normal send code handle initialization.
  905. */
  906. if (cmp_psn(psn, wqe->psn) <= 0) {
  907. qp->s_state = OP(SEND_LAST);
  908. goto done;
  909. }
  910. /* Find the work request opcode corresponding to the given PSN. */
  911. opcode = wqe->wr.opcode;
  912. for (;;) {
  913. int diff;
  914. if (++n == qp->s_size)
  915. n = 0;
  916. if (n == qp->s_tail)
  917. break;
  918. wqe = rvt_get_swqe_ptr(qp, n);
  919. diff = cmp_psn(psn, wqe->psn);
  920. if (diff < 0)
  921. break;
  922. qp->s_cur = n;
  923. /*
  924. * If we are starting the request from the beginning,
  925. * let the normal send code handle initialization.
  926. */
  927. if (diff == 0) {
  928. qp->s_state = OP(SEND_LAST);
  929. goto done;
  930. }
  931. opcode = wqe->wr.opcode;
  932. }
  933. /*
  934. * Set the state to restart in the middle of a request.
  935. * Don't change the s_sge, s_cur_sge, or s_cur_size.
  936. * See hfi1_make_rc_req().
  937. */
  938. switch (opcode) {
  939. case IB_WR_SEND:
  940. case IB_WR_SEND_WITH_IMM:
  941. qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
  942. break;
  943. case IB_WR_RDMA_WRITE:
  944. case IB_WR_RDMA_WRITE_WITH_IMM:
  945. qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
  946. break;
  947. case IB_WR_RDMA_READ:
  948. qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
  949. break;
  950. default:
  951. /*
  952. * This case shouldn't happen since its only
  953. * one PSN per req.
  954. */
  955. qp->s_state = OP(SEND_LAST);
  956. }
  957. done:
  958. qp->s_psn = psn;
  959. /*
  960. * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
  961. * asynchronously before the send engine can get scheduled.
  962. * Doing it in hfi1_make_rc_req() is too late.
  963. */
  964. if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
  965. (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
  966. qp->s_flags |= RVT_S_WAIT_PSN;
  967. qp->s_flags &= ~RVT_S_AHG_VALID;
  968. }
  969. /*
  970. * Back up requester to resend the last un-ACKed request.
  971. * The QP r_lock and s_lock should be held and interrupts disabled.
  972. */
  973. static void restart_rc(struct rvt_qp *qp, u32 psn, int wait)
  974. {
  975. struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  976. struct hfi1_ibport *ibp;
  977. lockdep_assert_held(&qp->r_lock);
  978. lockdep_assert_held(&qp->s_lock);
  979. if (qp->s_retry == 0) {
  980. if (qp->s_mig_state == IB_MIG_ARMED) {
  981. hfi1_migrate_qp(qp);
  982. qp->s_retry = qp->s_retry_cnt;
  983. } else if (qp->s_last == qp->s_acked) {
  984. hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
  985. rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
  986. return;
  987. } else { /* need to handle delayed completion */
  988. return;
  989. }
  990. } else {
  991. qp->s_retry--;
  992. }
  993. ibp = to_iport(qp->ibqp.device, qp->port_num);
  994. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  995. ibp->rvp.n_rc_resends++;
  996. else
  997. ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
  998. qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
  999. RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
  1000. RVT_S_WAIT_ACK);
  1001. if (wait)
  1002. qp->s_flags |= RVT_S_SEND_ONE;
  1003. reset_psn(qp, psn);
  1004. }
  1005. /*
  1006. * This is called from s_timer for missing responses.
  1007. */
  1008. void hfi1_rc_timeout(unsigned long arg)
  1009. {
  1010. struct rvt_qp *qp = (struct rvt_qp *)arg;
  1011. struct hfi1_ibport *ibp;
  1012. unsigned long flags;
  1013. spin_lock_irqsave(&qp->r_lock, flags);
  1014. spin_lock(&qp->s_lock);
  1015. if (qp->s_flags & RVT_S_TIMER) {
  1016. ibp = to_iport(qp->ibqp.device, qp->port_num);
  1017. ibp->rvp.n_rc_timeouts++;
  1018. qp->s_flags &= ~RVT_S_TIMER;
  1019. del_timer(&qp->s_timer);
  1020. trace_hfi1_timeout(qp, qp->s_last_psn + 1);
  1021. restart_rc(qp, qp->s_last_psn + 1, 1);
  1022. hfi1_schedule_send(qp);
  1023. }
  1024. spin_unlock(&qp->s_lock);
  1025. spin_unlock_irqrestore(&qp->r_lock, flags);
  1026. }
  1027. /*
  1028. * This is called from s_timer for RNR timeouts.
  1029. */
  1030. void hfi1_rc_rnr_retry(unsigned long arg)
  1031. {
  1032. struct rvt_qp *qp = (struct rvt_qp *)arg;
  1033. unsigned long flags;
  1034. spin_lock_irqsave(&qp->s_lock, flags);
  1035. hfi1_stop_rnr_timer(qp);
  1036. hfi1_schedule_send(qp);
  1037. spin_unlock_irqrestore(&qp->s_lock, flags);
  1038. }
  1039. /*
  1040. * Set qp->s_sending_psn to the next PSN after the given one.
  1041. * This would be psn+1 except when RDMA reads are present.
  1042. */
  1043. static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
  1044. {
  1045. struct rvt_swqe *wqe;
  1046. u32 n = qp->s_last;
  1047. lockdep_assert_held(&qp->s_lock);
  1048. /* Find the work request corresponding to the given PSN. */
  1049. for (;;) {
  1050. wqe = rvt_get_swqe_ptr(qp, n);
  1051. if (cmp_psn(psn, wqe->lpsn) <= 0) {
  1052. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  1053. qp->s_sending_psn = wqe->lpsn + 1;
  1054. else
  1055. qp->s_sending_psn = psn + 1;
  1056. break;
  1057. }
  1058. if (++n == qp->s_size)
  1059. n = 0;
  1060. if (n == qp->s_tail)
  1061. break;
  1062. }
  1063. }
  1064. /*
  1065. * This should be called with the QP s_lock held and interrupts disabled.
  1066. */
  1067. void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
  1068. {
  1069. struct ib_other_headers *ohdr;
  1070. struct rvt_swqe *wqe;
  1071. struct ib_wc wc;
  1072. unsigned i;
  1073. u32 opcode;
  1074. u32 psn;
  1075. lockdep_assert_held(&qp->s_lock);
  1076. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
  1077. return;
  1078. /* Find out where the BTH is */
  1079. if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
  1080. ohdr = &hdr->u.oth;
  1081. else
  1082. ohdr = &hdr->u.l.oth;
  1083. opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
  1084. if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
  1085. opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
  1086. WARN_ON(!qp->s_rdma_ack_cnt);
  1087. qp->s_rdma_ack_cnt--;
  1088. return;
  1089. }
  1090. psn = be32_to_cpu(ohdr->bth[2]);
  1091. reset_sending_psn(qp, psn);
  1092. /*
  1093. * Start timer after a packet requesting an ACK has been sent and
  1094. * there are still requests that haven't been acked.
  1095. */
  1096. if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
  1097. !(qp->s_flags &
  1098. (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
  1099. (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
  1100. hfi1_add_retry_timer(qp);
  1101. while (qp->s_last != qp->s_acked) {
  1102. u32 s_last;
  1103. wqe = rvt_get_swqe_ptr(qp, qp->s_last);
  1104. if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
  1105. cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
  1106. break;
  1107. s_last = qp->s_last;
  1108. if (++s_last >= qp->s_size)
  1109. s_last = 0;
  1110. qp->s_last = s_last;
  1111. /* see post_send() */
  1112. barrier();
  1113. for (i = 0; i < wqe->wr.num_sge; i++) {
  1114. struct rvt_sge *sge = &wqe->sg_list[i];
  1115. rvt_put_mr(sge->mr);
  1116. }
  1117. /* Post a send completion queue entry if requested. */
  1118. if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
  1119. (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
  1120. memset(&wc, 0, sizeof(wc));
  1121. wc.wr_id = wqe->wr.wr_id;
  1122. wc.status = IB_WC_SUCCESS;
  1123. wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
  1124. wc.byte_len = wqe->length;
  1125. wc.qp = &qp->ibqp;
  1126. rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
  1127. }
  1128. }
  1129. /*
  1130. * If we were waiting for sends to complete before re-sending,
  1131. * and they are now complete, restart sending.
  1132. */
  1133. trace_hfi1_sendcomplete(qp, psn);
  1134. if (qp->s_flags & RVT_S_WAIT_PSN &&
  1135. cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
  1136. qp->s_flags &= ~RVT_S_WAIT_PSN;
  1137. qp->s_sending_psn = qp->s_psn;
  1138. qp->s_sending_hpsn = qp->s_psn - 1;
  1139. hfi1_schedule_send(qp);
  1140. }
  1141. }
  1142. static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
  1143. {
  1144. qp->s_last_psn = psn;
  1145. }
  1146. /*
  1147. * Generate a SWQE completion.
  1148. * This is similar to hfi1_send_complete but has to check to be sure
  1149. * that the SGEs are not being referenced if the SWQE is being resent.
  1150. */
  1151. static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
  1152. struct rvt_swqe *wqe,
  1153. struct hfi1_ibport *ibp)
  1154. {
  1155. struct ib_wc wc;
  1156. unsigned i;
  1157. lockdep_assert_held(&qp->s_lock);
  1158. /*
  1159. * Don't decrement refcount and don't generate a
  1160. * completion if the SWQE is being resent until the send
  1161. * is finished.
  1162. */
  1163. if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
  1164. cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
  1165. u32 s_last;
  1166. for (i = 0; i < wqe->wr.num_sge; i++) {
  1167. struct rvt_sge *sge = &wqe->sg_list[i];
  1168. rvt_put_mr(sge->mr);
  1169. }
  1170. s_last = qp->s_last;
  1171. if (++s_last >= qp->s_size)
  1172. s_last = 0;
  1173. qp->s_last = s_last;
  1174. /* see post_send() */
  1175. barrier();
  1176. /* Post a send completion queue entry if requested. */
  1177. if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
  1178. (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
  1179. memset(&wc, 0, sizeof(wc));
  1180. wc.wr_id = wqe->wr.wr_id;
  1181. wc.status = IB_WC_SUCCESS;
  1182. wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
  1183. wc.byte_len = wqe->length;
  1184. wc.qp = &qp->ibqp;
  1185. rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
  1186. }
  1187. } else {
  1188. struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
  1189. this_cpu_inc(*ibp->rvp.rc_delayed_comp);
  1190. /*
  1191. * If send progress not running attempt to progress
  1192. * SDMA queue.
  1193. */
  1194. if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
  1195. struct sdma_engine *engine;
  1196. u8 sc5;
  1197. /* For now use sc to find engine */
  1198. sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
  1199. engine = qp_to_sdma_engine(qp, sc5);
  1200. sdma_engine_progress_schedule(engine);
  1201. }
  1202. }
  1203. qp->s_retry = qp->s_retry_cnt;
  1204. update_last_psn(qp, wqe->lpsn);
  1205. /*
  1206. * If we are completing a request which is in the process of
  1207. * being resent, we can stop re-sending it since we know the
  1208. * responder has already seen it.
  1209. */
  1210. if (qp->s_acked == qp->s_cur) {
  1211. if (++qp->s_cur >= qp->s_size)
  1212. qp->s_cur = 0;
  1213. qp->s_acked = qp->s_cur;
  1214. wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
  1215. if (qp->s_acked != qp->s_tail) {
  1216. qp->s_state = OP(SEND_LAST);
  1217. qp->s_psn = wqe->psn;
  1218. }
  1219. } else {
  1220. if (++qp->s_acked >= qp->s_size)
  1221. qp->s_acked = 0;
  1222. if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
  1223. qp->s_draining = 0;
  1224. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1225. }
  1226. return wqe;
  1227. }
  1228. /**
  1229. * do_rc_ack - process an incoming RC ACK
  1230. * @qp: the QP the ACK came in on
  1231. * @psn: the packet sequence number of the ACK
  1232. * @opcode: the opcode of the request that resulted in the ACK
  1233. *
  1234. * This is called from rc_rcv_resp() to process an incoming RC ACK
  1235. * for the given QP.
  1236. * May be called at interrupt level, with the QP s_lock held.
  1237. * Returns 1 if OK, 0 if current operation should be aborted (NAK).
  1238. */
  1239. static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
  1240. u64 val, struct hfi1_ctxtdata *rcd)
  1241. {
  1242. struct hfi1_ibport *ibp;
  1243. enum ib_wc_status status;
  1244. struct rvt_swqe *wqe;
  1245. int ret = 0;
  1246. u32 ack_psn;
  1247. int diff;
  1248. unsigned long to;
  1249. lockdep_assert_held(&qp->s_lock);
  1250. /*
  1251. * Note that NAKs implicitly ACK outstanding SEND and RDMA write
  1252. * requests and implicitly NAK RDMA read and atomic requests issued
  1253. * before the NAK'ed request. The MSN won't include the NAK'ed
  1254. * request but will include an ACK'ed request(s).
  1255. */
  1256. ack_psn = psn;
  1257. if (aeth >> 29)
  1258. ack_psn--;
  1259. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1260. ibp = to_iport(qp->ibqp.device, qp->port_num);
  1261. /*
  1262. * The MSN might be for a later WQE than the PSN indicates so
  1263. * only complete WQEs that the PSN finishes.
  1264. */
  1265. while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
  1266. /*
  1267. * RDMA_READ_RESPONSE_ONLY is a special case since
  1268. * we want to generate completion events for everything
  1269. * before the RDMA read, copy the data, then generate
  1270. * the completion for the read.
  1271. */
  1272. if (wqe->wr.opcode == IB_WR_RDMA_READ &&
  1273. opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
  1274. diff == 0) {
  1275. ret = 1;
  1276. goto bail_stop;
  1277. }
  1278. /*
  1279. * If this request is a RDMA read or atomic, and the ACK is
  1280. * for a later operation, this ACK NAKs the RDMA read or
  1281. * atomic. In other words, only a RDMA_READ_LAST or ONLY
  1282. * can ACK a RDMA read and likewise for atomic ops. Note
  1283. * that the NAK case can only happen if relaxed ordering is
  1284. * used and requests are sent after an RDMA read or atomic
  1285. * is sent but before the response is received.
  1286. */
  1287. if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
  1288. (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
  1289. ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1290. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
  1291. (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
  1292. /* Retry this request. */
  1293. if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
  1294. qp->r_flags |= RVT_R_RDMAR_SEQ;
  1295. restart_rc(qp, qp->s_last_psn + 1, 0);
  1296. if (list_empty(&qp->rspwait)) {
  1297. qp->r_flags |= RVT_R_RSP_SEND;
  1298. rvt_get_qp(qp);
  1299. list_add_tail(&qp->rspwait,
  1300. &rcd->qp_wait_list);
  1301. }
  1302. }
  1303. /*
  1304. * No need to process the ACK/NAK since we are
  1305. * restarting an earlier request.
  1306. */
  1307. goto bail_stop;
  1308. }
  1309. if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1310. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
  1311. u64 *vaddr = wqe->sg_list[0].vaddr;
  1312. *vaddr = val;
  1313. }
  1314. if (qp->s_num_rd_atomic &&
  1315. (wqe->wr.opcode == IB_WR_RDMA_READ ||
  1316. wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1317. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
  1318. qp->s_num_rd_atomic--;
  1319. /* Restart sending task if fence is complete */
  1320. if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
  1321. !qp->s_num_rd_atomic) {
  1322. qp->s_flags &= ~(RVT_S_WAIT_FENCE |
  1323. RVT_S_WAIT_ACK);
  1324. hfi1_schedule_send(qp);
  1325. } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
  1326. qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
  1327. RVT_S_WAIT_ACK);
  1328. hfi1_schedule_send(qp);
  1329. }
  1330. }
  1331. wqe = do_rc_completion(qp, wqe, ibp);
  1332. if (qp->s_acked == qp->s_tail)
  1333. break;
  1334. }
  1335. switch (aeth >> 29) {
  1336. case 0: /* ACK */
  1337. this_cpu_inc(*ibp->rvp.rc_acks);
  1338. if (qp->s_acked != qp->s_tail) {
  1339. /*
  1340. * We are expecting more ACKs so
  1341. * mod the retry timer.
  1342. */
  1343. hfi1_mod_retry_timer(qp);
  1344. /*
  1345. * We can stop re-sending the earlier packets and
  1346. * continue with the next packet the receiver wants.
  1347. */
  1348. if (cmp_psn(qp->s_psn, psn) <= 0)
  1349. reset_psn(qp, psn + 1);
  1350. } else {
  1351. /* No more acks - kill all timers */
  1352. hfi1_stop_rc_timers(qp);
  1353. if (cmp_psn(qp->s_psn, psn) <= 0) {
  1354. qp->s_state = OP(SEND_LAST);
  1355. qp->s_psn = psn + 1;
  1356. }
  1357. }
  1358. if (qp->s_flags & RVT_S_WAIT_ACK) {
  1359. qp->s_flags &= ~RVT_S_WAIT_ACK;
  1360. hfi1_schedule_send(qp);
  1361. }
  1362. hfi1_get_credit(qp, aeth);
  1363. qp->s_rnr_retry = qp->s_rnr_retry_cnt;
  1364. qp->s_retry = qp->s_retry_cnt;
  1365. update_last_psn(qp, psn);
  1366. return 1;
  1367. case 1: /* RNR NAK */
  1368. ibp->rvp.n_rnr_naks++;
  1369. if (qp->s_acked == qp->s_tail)
  1370. goto bail_stop;
  1371. if (qp->s_flags & RVT_S_WAIT_RNR)
  1372. goto bail_stop;
  1373. if (qp->s_rnr_retry == 0) {
  1374. status = IB_WC_RNR_RETRY_EXC_ERR;
  1375. goto class_b;
  1376. }
  1377. if (qp->s_rnr_retry_cnt < 7)
  1378. qp->s_rnr_retry--;
  1379. /* The last valid PSN is the previous PSN. */
  1380. update_last_psn(qp, psn - 1);
  1381. ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
  1382. reset_psn(qp, psn);
  1383. qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
  1384. hfi1_stop_rc_timers(qp);
  1385. to =
  1386. ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) &
  1387. HFI1_AETH_CREDIT_MASK];
  1388. hfi1_add_rnr_timer(qp, to);
  1389. return 0;
  1390. case 3: /* NAK */
  1391. if (qp->s_acked == qp->s_tail)
  1392. goto bail_stop;
  1393. /* The last valid PSN is the previous PSN. */
  1394. update_last_psn(qp, psn - 1);
  1395. switch ((aeth >> HFI1_AETH_CREDIT_SHIFT) &
  1396. HFI1_AETH_CREDIT_MASK) {
  1397. case 0: /* PSN sequence error */
  1398. ibp->rvp.n_seq_naks++;
  1399. /*
  1400. * Back up to the responder's expected PSN.
  1401. * Note that we might get a NAK in the middle of an
  1402. * RDMA READ response which terminates the RDMA
  1403. * READ.
  1404. */
  1405. restart_rc(qp, psn, 0);
  1406. hfi1_schedule_send(qp);
  1407. break;
  1408. case 1: /* Invalid Request */
  1409. status = IB_WC_REM_INV_REQ_ERR;
  1410. ibp->rvp.n_other_naks++;
  1411. goto class_b;
  1412. case 2: /* Remote Access Error */
  1413. status = IB_WC_REM_ACCESS_ERR;
  1414. ibp->rvp.n_other_naks++;
  1415. goto class_b;
  1416. case 3: /* Remote Operation Error */
  1417. status = IB_WC_REM_OP_ERR;
  1418. ibp->rvp.n_other_naks++;
  1419. class_b:
  1420. if (qp->s_last == qp->s_acked) {
  1421. hfi1_send_complete(qp, wqe, status);
  1422. rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
  1423. }
  1424. break;
  1425. default:
  1426. /* Ignore other reserved NAK error codes */
  1427. goto reserved;
  1428. }
  1429. qp->s_retry = qp->s_retry_cnt;
  1430. qp->s_rnr_retry = qp->s_rnr_retry_cnt;
  1431. goto bail_stop;
  1432. default: /* 2: reserved */
  1433. reserved:
  1434. /* Ignore reserved NAK codes. */
  1435. goto bail_stop;
  1436. }
  1437. /* cannot be reached */
  1438. bail_stop:
  1439. hfi1_stop_rc_timers(qp);
  1440. return ret;
  1441. }
  1442. /*
  1443. * We have seen an out of sequence RDMA read middle or last packet.
  1444. * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
  1445. */
  1446. static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
  1447. struct hfi1_ctxtdata *rcd)
  1448. {
  1449. struct rvt_swqe *wqe;
  1450. lockdep_assert_held(&qp->s_lock);
  1451. /* Remove QP from retry timer */
  1452. hfi1_stop_rc_timers(qp);
  1453. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1454. while (cmp_psn(psn, wqe->lpsn) > 0) {
  1455. if (wqe->wr.opcode == IB_WR_RDMA_READ ||
  1456. wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1457. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
  1458. break;
  1459. wqe = do_rc_completion(qp, wqe, ibp);
  1460. }
  1461. ibp->rvp.n_rdma_seq++;
  1462. qp->r_flags |= RVT_R_RDMAR_SEQ;
  1463. restart_rc(qp, qp->s_last_psn + 1, 0);
  1464. if (list_empty(&qp->rspwait)) {
  1465. qp->r_flags |= RVT_R_RSP_SEND;
  1466. rvt_get_qp(qp);
  1467. list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
  1468. }
  1469. }
  1470. /**
  1471. * rc_rcv_resp - process an incoming RC response packet
  1472. * @ibp: the port this packet came in on
  1473. * @ohdr: the other headers for this packet
  1474. * @data: the packet data
  1475. * @tlen: the packet length
  1476. * @qp: the QP for this packet
  1477. * @opcode: the opcode for this packet
  1478. * @psn: the packet sequence number for this packet
  1479. * @hdrsize: the header length
  1480. * @pmtu: the path MTU
  1481. *
  1482. * This is called from hfi1_rc_rcv() to process an incoming RC response
  1483. * packet for the given QP.
  1484. * Called at interrupt level.
  1485. */
  1486. static void rc_rcv_resp(struct hfi1_ibport *ibp,
  1487. struct ib_other_headers *ohdr,
  1488. void *data, u32 tlen, struct rvt_qp *qp,
  1489. u32 opcode, u32 psn, u32 hdrsize, u32 pmtu,
  1490. struct hfi1_ctxtdata *rcd)
  1491. {
  1492. struct rvt_swqe *wqe;
  1493. enum ib_wc_status status;
  1494. unsigned long flags;
  1495. int diff;
  1496. u32 pad;
  1497. u32 aeth;
  1498. u64 val;
  1499. spin_lock_irqsave(&qp->s_lock, flags);
  1500. trace_hfi1_ack(qp, psn);
  1501. /* Ignore invalid responses. */
  1502. smp_read_barrier_depends(); /* see post_one_send */
  1503. if (cmp_psn(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0)
  1504. goto ack_done;
  1505. /* Ignore duplicate responses. */
  1506. diff = cmp_psn(psn, qp->s_last_psn);
  1507. if (unlikely(diff <= 0)) {
  1508. /* Update credits for "ghost" ACKs */
  1509. if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
  1510. aeth = be32_to_cpu(ohdr->u.aeth);
  1511. if ((aeth >> 29) == 0)
  1512. hfi1_get_credit(qp, aeth);
  1513. }
  1514. goto ack_done;
  1515. }
  1516. /*
  1517. * Skip everything other than the PSN we expect, if we are waiting
  1518. * for a reply to a restarted RDMA read or atomic op.
  1519. */
  1520. if (qp->r_flags & RVT_R_RDMAR_SEQ) {
  1521. if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
  1522. goto ack_done;
  1523. qp->r_flags &= ~RVT_R_RDMAR_SEQ;
  1524. }
  1525. if (unlikely(qp->s_acked == qp->s_tail))
  1526. goto ack_done;
  1527. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1528. status = IB_WC_SUCCESS;
  1529. switch (opcode) {
  1530. case OP(ACKNOWLEDGE):
  1531. case OP(ATOMIC_ACKNOWLEDGE):
  1532. case OP(RDMA_READ_RESPONSE_FIRST):
  1533. aeth = be32_to_cpu(ohdr->u.aeth);
  1534. if (opcode == OP(ATOMIC_ACKNOWLEDGE))
  1535. val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
  1536. else
  1537. val = 0;
  1538. if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
  1539. opcode != OP(RDMA_READ_RESPONSE_FIRST))
  1540. goto ack_done;
  1541. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1542. if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
  1543. goto ack_op_err;
  1544. /*
  1545. * If this is a response to a resent RDMA read, we
  1546. * have to be careful to copy the data to the right
  1547. * location.
  1548. */
  1549. qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
  1550. wqe, psn, pmtu);
  1551. goto read_middle;
  1552. case OP(RDMA_READ_RESPONSE_MIDDLE):
  1553. /* no AETH, no ACK */
  1554. if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
  1555. goto ack_seq_err;
  1556. if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
  1557. goto ack_op_err;
  1558. read_middle:
  1559. if (unlikely(tlen != (hdrsize + pmtu + 4)))
  1560. goto ack_len_err;
  1561. if (unlikely(pmtu >= qp->s_rdma_read_len))
  1562. goto ack_len_err;
  1563. /*
  1564. * We got a response so update the timeout.
  1565. * 4.096 usec. * (1 << qp->timeout)
  1566. */
  1567. qp->s_flags |= RVT_S_TIMER;
  1568. mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
  1569. if (qp->s_flags & RVT_S_WAIT_ACK) {
  1570. qp->s_flags &= ~RVT_S_WAIT_ACK;
  1571. hfi1_schedule_send(qp);
  1572. }
  1573. if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
  1574. qp->s_retry = qp->s_retry_cnt;
  1575. /*
  1576. * Update the RDMA receive state but do the copy w/o
  1577. * holding the locks and blocking interrupts.
  1578. */
  1579. qp->s_rdma_read_len -= pmtu;
  1580. update_last_psn(qp, psn);
  1581. spin_unlock_irqrestore(&qp->s_lock, flags);
  1582. hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0, 0);
  1583. goto bail;
  1584. case OP(RDMA_READ_RESPONSE_ONLY):
  1585. aeth = be32_to_cpu(ohdr->u.aeth);
  1586. if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
  1587. goto ack_done;
  1588. /* Get the number of bytes the message was padded by. */
  1589. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  1590. /*
  1591. * Check that the data size is >= 0 && <= pmtu.
  1592. * Remember to account for ICRC (4).
  1593. */
  1594. if (unlikely(tlen < (hdrsize + pad + 4)))
  1595. goto ack_len_err;
  1596. /*
  1597. * If this is a response to a resent RDMA read, we
  1598. * have to be careful to copy the data to the right
  1599. * location.
  1600. */
  1601. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1602. qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
  1603. wqe, psn, pmtu);
  1604. goto read_last;
  1605. case OP(RDMA_READ_RESPONSE_LAST):
  1606. /* ACKs READ req. */
  1607. if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
  1608. goto ack_seq_err;
  1609. if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
  1610. goto ack_op_err;
  1611. /* Get the number of bytes the message was padded by. */
  1612. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  1613. /*
  1614. * Check that the data size is >= 1 && <= pmtu.
  1615. * Remember to account for ICRC (4).
  1616. */
  1617. if (unlikely(tlen <= (hdrsize + pad + 4)))
  1618. goto ack_len_err;
  1619. read_last:
  1620. tlen -= hdrsize + pad + 4;
  1621. if (unlikely(tlen != qp->s_rdma_read_len))
  1622. goto ack_len_err;
  1623. aeth = be32_to_cpu(ohdr->u.aeth);
  1624. hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0, 0);
  1625. WARN_ON(qp->s_rdma_read_sge.num_sge);
  1626. (void)do_rc_ack(qp, aeth, psn,
  1627. OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
  1628. goto ack_done;
  1629. }
  1630. ack_op_err:
  1631. status = IB_WC_LOC_QP_OP_ERR;
  1632. goto ack_err;
  1633. ack_seq_err:
  1634. rdma_seq_err(qp, ibp, psn, rcd);
  1635. goto ack_done;
  1636. ack_len_err:
  1637. status = IB_WC_LOC_LEN_ERR;
  1638. ack_err:
  1639. if (qp->s_last == qp->s_acked) {
  1640. hfi1_send_complete(qp, wqe, status);
  1641. rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
  1642. }
  1643. ack_done:
  1644. spin_unlock_irqrestore(&qp->s_lock, flags);
  1645. bail:
  1646. return;
  1647. }
  1648. static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
  1649. struct rvt_qp *qp)
  1650. {
  1651. if (list_empty(&qp->rspwait)) {
  1652. qp->r_flags |= RVT_R_RSP_NAK;
  1653. rvt_get_qp(qp);
  1654. list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
  1655. }
  1656. }
  1657. static inline void rc_cancel_ack(struct rvt_qp *qp)
  1658. {
  1659. struct hfi1_qp_priv *priv = qp->priv;
  1660. priv->r_adefered = 0;
  1661. if (list_empty(&qp->rspwait))
  1662. return;
  1663. list_del_init(&qp->rspwait);
  1664. qp->r_flags &= ~RVT_R_RSP_NAK;
  1665. rvt_put_qp(qp);
  1666. }
  1667. /**
  1668. * rc_rcv_error - process an incoming duplicate or error RC packet
  1669. * @ohdr: the other headers for this packet
  1670. * @data: the packet data
  1671. * @qp: the QP for this packet
  1672. * @opcode: the opcode for this packet
  1673. * @psn: the packet sequence number for this packet
  1674. * @diff: the difference between the PSN and the expected PSN
  1675. *
  1676. * This is called from hfi1_rc_rcv() to process an unexpected
  1677. * incoming RC packet for the given QP.
  1678. * Called at interrupt level.
  1679. * Return 1 if no more processing is needed; otherwise return 0 to
  1680. * schedule a response to be sent.
  1681. */
  1682. static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
  1683. struct rvt_qp *qp, u32 opcode, u32 psn,
  1684. int diff, struct hfi1_ctxtdata *rcd)
  1685. {
  1686. struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
  1687. struct rvt_ack_entry *e;
  1688. unsigned long flags;
  1689. u8 i, prev;
  1690. int old_req;
  1691. trace_hfi1_rcv_error(qp, psn);
  1692. if (diff > 0) {
  1693. /*
  1694. * Packet sequence error.
  1695. * A NAK will ACK earlier sends and RDMA writes.
  1696. * Don't queue the NAK if we already sent one.
  1697. */
  1698. if (!qp->r_nak_state) {
  1699. ibp->rvp.n_rc_seqnak++;
  1700. qp->r_nak_state = IB_NAK_PSN_ERROR;
  1701. /* Use the expected PSN. */
  1702. qp->r_ack_psn = qp->r_psn;
  1703. /*
  1704. * Wait to send the sequence NAK until all packets
  1705. * in the receive queue have been processed.
  1706. * Otherwise, we end up propagating congestion.
  1707. */
  1708. rc_defered_ack(rcd, qp);
  1709. }
  1710. goto done;
  1711. }
  1712. /*
  1713. * Handle a duplicate request. Don't re-execute SEND, RDMA
  1714. * write or atomic op. Don't NAK errors, just silently drop
  1715. * the duplicate request. Note that r_sge, r_len, and
  1716. * r_rcv_len may be in use so don't modify them.
  1717. *
  1718. * We are supposed to ACK the earliest duplicate PSN but we
  1719. * can coalesce an outstanding duplicate ACK. We have to
  1720. * send the earliest so that RDMA reads can be restarted at
  1721. * the requester's expected PSN.
  1722. *
  1723. * First, find where this duplicate PSN falls within the
  1724. * ACKs previously sent.
  1725. * old_req is true if there is an older response that is scheduled
  1726. * to be sent before sending this one.
  1727. */
  1728. e = NULL;
  1729. old_req = 1;
  1730. ibp->rvp.n_rc_dupreq++;
  1731. spin_lock_irqsave(&qp->s_lock, flags);
  1732. for (i = qp->r_head_ack_queue; ; i = prev) {
  1733. if (i == qp->s_tail_ack_queue)
  1734. old_req = 0;
  1735. if (i)
  1736. prev = i - 1;
  1737. else
  1738. prev = HFI1_MAX_RDMA_ATOMIC;
  1739. if (prev == qp->r_head_ack_queue) {
  1740. e = NULL;
  1741. break;
  1742. }
  1743. e = &qp->s_ack_queue[prev];
  1744. if (!e->opcode) {
  1745. e = NULL;
  1746. break;
  1747. }
  1748. if (cmp_psn(psn, e->psn) >= 0) {
  1749. if (prev == qp->s_tail_ack_queue &&
  1750. cmp_psn(psn, e->lpsn) <= 0)
  1751. old_req = 0;
  1752. break;
  1753. }
  1754. }
  1755. switch (opcode) {
  1756. case OP(RDMA_READ_REQUEST): {
  1757. struct ib_reth *reth;
  1758. u32 offset;
  1759. u32 len;
  1760. /*
  1761. * If we didn't find the RDMA read request in the ack queue,
  1762. * we can ignore this request.
  1763. */
  1764. if (!e || e->opcode != OP(RDMA_READ_REQUEST))
  1765. goto unlock_done;
  1766. /* RETH comes after BTH */
  1767. reth = &ohdr->u.rc.reth;
  1768. /*
  1769. * Address range must be a subset of the original
  1770. * request and start on pmtu boundaries.
  1771. * We reuse the old ack_queue slot since the requester
  1772. * should not back up and request an earlier PSN for the
  1773. * same request.
  1774. */
  1775. offset = delta_psn(psn, e->psn) * qp->pmtu;
  1776. len = be32_to_cpu(reth->length);
  1777. if (unlikely(offset + len != e->rdma_sge.sge_length))
  1778. goto unlock_done;
  1779. if (e->rdma_sge.mr) {
  1780. rvt_put_mr(e->rdma_sge.mr);
  1781. e->rdma_sge.mr = NULL;
  1782. }
  1783. if (len != 0) {
  1784. u32 rkey = be32_to_cpu(reth->rkey);
  1785. u64 vaddr = get_ib_reth_vaddr(reth);
  1786. int ok;
  1787. ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
  1788. IB_ACCESS_REMOTE_READ);
  1789. if (unlikely(!ok))
  1790. goto unlock_done;
  1791. } else {
  1792. e->rdma_sge.vaddr = NULL;
  1793. e->rdma_sge.length = 0;
  1794. e->rdma_sge.sge_length = 0;
  1795. }
  1796. e->psn = psn;
  1797. if (old_req)
  1798. goto unlock_done;
  1799. qp->s_tail_ack_queue = prev;
  1800. break;
  1801. }
  1802. case OP(COMPARE_SWAP):
  1803. case OP(FETCH_ADD): {
  1804. /*
  1805. * If we didn't find the atomic request in the ack queue
  1806. * or the send engine is already backed up to send an
  1807. * earlier entry, we can ignore this request.
  1808. */
  1809. if (!e || e->opcode != (u8)opcode || old_req)
  1810. goto unlock_done;
  1811. qp->s_tail_ack_queue = prev;
  1812. break;
  1813. }
  1814. default:
  1815. /*
  1816. * Ignore this operation if it doesn't request an ACK
  1817. * or an earlier RDMA read or atomic is going to be resent.
  1818. */
  1819. if (!(psn & IB_BTH_REQ_ACK) || old_req)
  1820. goto unlock_done;
  1821. /*
  1822. * Resend the most recent ACK if this request is
  1823. * after all the previous RDMA reads and atomics.
  1824. */
  1825. if (i == qp->r_head_ack_queue) {
  1826. spin_unlock_irqrestore(&qp->s_lock, flags);
  1827. qp->r_nak_state = 0;
  1828. qp->r_ack_psn = qp->r_psn - 1;
  1829. goto send_ack;
  1830. }
  1831. /*
  1832. * Resend the RDMA read or atomic op which
  1833. * ACKs this duplicate request.
  1834. */
  1835. qp->s_tail_ack_queue = i;
  1836. break;
  1837. }
  1838. qp->s_ack_state = OP(ACKNOWLEDGE);
  1839. qp->s_flags |= RVT_S_RESP_PENDING;
  1840. qp->r_nak_state = 0;
  1841. hfi1_schedule_send(qp);
  1842. unlock_done:
  1843. spin_unlock_irqrestore(&qp->s_lock, flags);
  1844. done:
  1845. return 1;
  1846. send_ack:
  1847. return 0;
  1848. }
  1849. void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
  1850. {
  1851. unsigned long flags;
  1852. int lastwqe;
  1853. spin_lock_irqsave(&qp->s_lock, flags);
  1854. lastwqe = rvt_error_qp(qp, err);
  1855. spin_unlock_irqrestore(&qp->s_lock, flags);
  1856. if (lastwqe) {
  1857. struct ib_event ev;
  1858. ev.device = qp->ibqp.device;
  1859. ev.element.qp = &qp->ibqp;
  1860. ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
  1861. qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
  1862. }
  1863. }
  1864. static inline void update_ack_queue(struct rvt_qp *qp, unsigned n)
  1865. {
  1866. unsigned next;
  1867. next = n + 1;
  1868. if (next > HFI1_MAX_RDMA_ATOMIC)
  1869. next = 0;
  1870. qp->s_tail_ack_queue = next;
  1871. qp->s_ack_state = OP(ACKNOWLEDGE);
  1872. }
  1873. static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
  1874. u32 lqpn, u32 rqpn, u8 svc_type)
  1875. {
  1876. struct opa_hfi1_cong_log_event_internal *cc_event;
  1877. unsigned long flags;
  1878. if (sl >= OPA_MAX_SLS)
  1879. return;
  1880. spin_lock_irqsave(&ppd->cc_log_lock, flags);
  1881. ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
  1882. ppd->threshold_event_counter++;
  1883. cc_event = &ppd->cc_events[ppd->cc_log_idx++];
  1884. if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
  1885. ppd->cc_log_idx = 0;
  1886. cc_event->lqpn = lqpn & RVT_QPN_MASK;
  1887. cc_event->rqpn = rqpn & RVT_QPN_MASK;
  1888. cc_event->sl = sl;
  1889. cc_event->svc_type = svc_type;
  1890. cc_event->rlid = rlid;
  1891. /* keep timestamp in units of 1.024 usec */
  1892. cc_event->timestamp = ktime_to_ns(ktime_get()) / 1024;
  1893. spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
  1894. }
  1895. void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
  1896. u32 rqpn, u8 svc_type)
  1897. {
  1898. struct cca_timer *cca_timer;
  1899. u16 ccti, ccti_incr, ccti_timer, ccti_limit;
  1900. u8 trigger_threshold;
  1901. struct cc_state *cc_state;
  1902. unsigned long flags;
  1903. if (sl >= OPA_MAX_SLS)
  1904. return;
  1905. cc_state = get_cc_state(ppd);
  1906. if (!cc_state)
  1907. return;
  1908. /*
  1909. * 1) increase CCTI (for this SL)
  1910. * 2) select IPG (i.e., call set_link_ipg())
  1911. * 3) start timer
  1912. */
  1913. ccti_limit = cc_state->cct.ccti_limit;
  1914. ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
  1915. ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
  1916. trigger_threshold =
  1917. cc_state->cong_setting.entries[sl].trigger_threshold;
  1918. spin_lock_irqsave(&ppd->cca_timer_lock, flags);
  1919. cca_timer = &ppd->cca_timer[sl];
  1920. if (cca_timer->ccti < ccti_limit) {
  1921. if (cca_timer->ccti + ccti_incr <= ccti_limit)
  1922. cca_timer->ccti += ccti_incr;
  1923. else
  1924. cca_timer->ccti = ccti_limit;
  1925. set_link_ipg(ppd);
  1926. }
  1927. ccti = cca_timer->ccti;
  1928. if (!hrtimer_active(&cca_timer->hrtimer)) {
  1929. /* ccti_timer is in units of 1.024 usec */
  1930. unsigned long nsec = 1024 * ccti_timer;
  1931. hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
  1932. HRTIMER_MODE_REL);
  1933. }
  1934. spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
  1935. if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
  1936. log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
  1937. }
  1938. /**
  1939. * hfi1_rc_rcv - process an incoming RC packet
  1940. * @rcd: the context pointer
  1941. * @hdr: the header of this packet
  1942. * @rcv_flags: flags relevant to rcv processing
  1943. * @data: the packet data
  1944. * @tlen: the packet length
  1945. * @qp: the QP for this packet
  1946. *
  1947. * This is called from qp_rcv() to process an incoming RC packet
  1948. * for the given QP.
  1949. * May be called at interrupt level.
  1950. */
  1951. void hfi1_rc_rcv(struct hfi1_packet *packet)
  1952. {
  1953. struct hfi1_ctxtdata *rcd = packet->rcd;
  1954. struct ib_header *hdr = packet->hdr;
  1955. u32 rcv_flags = packet->rcv_flags;
  1956. void *data = packet->ebuf;
  1957. u32 tlen = packet->tlen;
  1958. struct rvt_qp *qp = packet->qp;
  1959. struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
  1960. struct ib_other_headers *ohdr = packet->ohdr;
  1961. u32 bth0, opcode;
  1962. u32 hdrsize = packet->hlen;
  1963. u32 psn;
  1964. u32 pad;
  1965. struct ib_wc wc;
  1966. u32 pmtu = qp->pmtu;
  1967. int diff;
  1968. struct ib_reth *reth;
  1969. unsigned long flags;
  1970. int ret, is_fecn = 0;
  1971. int copy_last = 0;
  1972. u32 rkey;
  1973. lockdep_assert_held(&qp->r_lock);
  1974. bth0 = be32_to_cpu(ohdr->bth[0]);
  1975. if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
  1976. return;
  1977. is_fecn = process_ecn(qp, packet, false);
  1978. psn = be32_to_cpu(ohdr->bth[2]);
  1979. opcode = (bth0 >> 24) & 0xff;
  1980. /*
  1981. * Process responses (ACKs) before anything else. Note that the
  1982. * packet sequence number will be for something in the send work
  1983. * queue rather than the expected receive packet sequence number.
  1984. * In other words, this QP is the requester.
  1985. */
  1986. if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
  1987. opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
  1988. rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
  1989. hdrsize, pmtu, rcd);
  1990. if (is_fecn)
  1991. goto send_ack;
  1992. return;
  1993. }
  1994. /* Compute 24 bits worth of difference. */
  1995. diff = delta_psn(psn, qp->r_psn);
  1996. if (unlikely(diff)) {
  1997. if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
  1998. return;
  1999. goto send_ack;
  2000. }
  2001. /* Check for opcode sequence errors. */
  2002. switch (qp->r_state) {
  2003. case OP(SEND_FIRST):
  2004. case OP(SEND_MIDDLE):
  2005. if (opcode == OP(SEND_MIDDLE) ||
  2006. opcode == OP(SEND_LAST) ||
  2007. opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
  2008. opcode == OP(SEND_LAST_WITH_INVALIDATE))
  2009. break;
  2010. goto nack_inv;
  2011. case OP(RDMA_WRITE_FIRST):
  2012. case OP(RDMA_WRITE_MIDDLE):
  2013. if (opcode == OP(RDMA_WRITE_MIDDLE) ||
  2014. opcode == OP(RDMA_WRITE_LAST) ||
  2015. opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
  2016. break;
  2017. goto nack_inv;
  2018. default:
  2019. if (opcode == OP(SEND_MIDDLE) ||
  2020. opcode == OP(SEND_LAST) ||
  2021. opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
  2022. opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
  2023. opcode == OP(RDMA_WRITE_MIDDLE) ||
  2024. opcode == OP(RDMA_WRITE_LAST) ||
  2025. opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
  2026. goto nack_inv;
  2027. /*
  2028. * Note that it is up to the requester to not send a new
  2029. * RDMA read or atomic operation before receiving an ACK
  2030. * for the previous operation.
  2031. */
  2032. break;
  2033. }
  2034. if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
  2035. qp_comm_est(qp);
  2036. /* OK, process the packet. */
  2037. switch (opcode) {
  2038. case OP(SEND_FIRST):
  2039. ret = hfi1_rvt_get_rwqe(qp, 0);
  2040. if (ret < 0)
  2041. goto nack_op_err;
  2042. if (!ret)
  2043. goto rnr_nak;
  2044. qp->r_rcv_len = 0;
  2045. /* FALLTHROUGH */
  2046. case OP(SEND_MIDDLE):
  2047. case OP(RDMA_WRITE_MIDDLE):
  2048. send_middle:
  2049. /* Check for invalid length PMTU or posted rwqe len. */
  2050. if (unlikely(tlen != (hdrsize + pmtu + 4)))
  2051. goto nack_inv;
  2052. qp->r_rcv_len += pmtu;
  2053. if (unlikely(qp->r_rcv_len > qp->r_len))
  2054. goto nack_inv;
  2055. hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0);
  2056. break;
  2057. case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
  2058. /* consume RWQE */
  2059. ret = hfi1_rvt_get_rwqe(qp, 1);
  2060. if (ret < 0)
  2061. goto nack_op_err;
  2062. if (!ret)
  2063. goto rnr_nak;
  2064. goto send_last_imm;
  2065. case OP(SEND_ONLY):
  2066. case OP(SEND_ONLY_WITH_IMMEDIATE):
  2067. case OP(SEND_ONLY_WITH_INVALIDATE):
  2068. ret = hfi1_rvt_get_rwqe(qp, 0);
  2069. if (ret < 0)
  2070. goto nack_op_err;
  2071. if (!ret)
  2072. goto rnr_nak;
  2073. qp->r_rcv_len = 0;
  2074. if (opcode == OP(SEND_ONLY))
  2075. goto no_immediate_data;
  2076. if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
  2077. goto send_last_inv;
  2078. /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
  2079. case OP(SEND_LAST_WITH_IMMEDIATE):
  2080. send_last_imm:
  2081. wc.ex.imm_data = ohdr->u.imm_data;
  2082. wc.wc_flags = IB_WC_WITH_IMM;
  2083. goto send_last;
  2084. case OP(SEND_LAST_WITH_INVALIDATE):
  2085. send_last_inv:
  2086. rkey = be32_to_cpu(ohdr->u.ieth);
  2087. if (rvt_invalidate_rkey(qp, rkey))
  2088. goto no_immediate_data;
  2089. wc.ex.invalidate_rkey = rkey;
  2090. wc.wc_flags = IB_WC_WITH_INVALIDATE;
  2091. goto send_last;
  2092. case OP(RDMA_WRITE_LAST):
  2093. copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user;
  2094. /* fall through */
  2095. case OP(SEND_LAST):
  2096. no_immediate_data:
  2097. wc.wc_flags = 0;
  2098. wc.ex.imm_data = 0;
  2099. send_last:
  2100. /* Get the number of bytes the message was padded by. */
  2101. pad = (bth0 >> 20) & 3;
  2102. /* Check for invalid length. */
  2103. /* LAST len should be >= 1 */
  2104. if (unlikely(tlen < (hdrsize + pad + 4)))
  2105. goto nack_inv;
  2106. /* Don't count the CRC. */
  2107. tlen -= (hdrsize + pad + 4);
  2108. wc.byte_len = tlen + qp->r_rcv_len;
  2109. if (unlikely(wc.byte_len > qp->r_len))
  2110. goto nack_inv;
  2111. hfi1_copy_sge(&qp->r_sge, data, tlen, 1, copy_last);
  2112. rvt_put_ss(&qp->r_sge);
  2113. qp->r_msn++;
  2114. if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
  2115. break;
  2116. wc.wr_id = qp->r_wr_id;
  2117. wc.status = IB_WC_SUCCESS;
  2118. if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
  2119. opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
  2120. wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
  2121. else
  2122. wc.opcode = IB_WC_RECV;
  2123. wc.qp = &qp->ibqp;
  2124. wc.src_qp = qp->remote_qpn;
  2125. wc.slid = qp->remote_ah_attr.dlid;
  2126. /*
  2127. * It seems that IB mandates the presence of an SL in a
  2128. * work completion only for the UD transport (see section
  2129. * 11.4.2 of IBTA Vol. 1).
  2130. *
  2131. * However, the way the SL is chosen below is consistent
  2132. * with the way that IB/qib works and is trying avoid
  2133. * introducing incompatibilities.
  2134. *
  2135. * See also OPA Vol. 1, section 9.7.6, and table 9-17.
  2136. */
  2137. wc.sl = qp->remote_ah_attr.sl;
  2138. /* zero fields that are N/A */
  2139. wc.vendor_err = 0;
  2140. wc.pkey_index = 0;
  2141. wc.dlid_path_bits = 0;
  2142. wc.port_num = 0;
  2143. /* Signal completion event if the solicited bit is set. */
  2144. rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
  2145. (bth0 & IB_BTH_SOLICITED) != 0);
  2146. break;
  2147. case OP(RDMA_WRITE_ONLY):
  2148. copy_last = 1;
  2149. /* fall through */
  2150. case OP(RDMA_WRITE_FIRST):
  2151. case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
  2152. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
  2153. goto nack_inv;
  2154. /* consume RWQE */
  2155. reth = &ohdr->u.rc.reth;
  2156. qp->r_len = be32_to_cpu(reth->length);
  2157. qp->r_rcv_len = 0;
  2158. qp->r_sge.sg_list = NULL;
  2159. if (qp->r_len != 0) {
  2160. u32 rkey = be32_to_cpu(reth->rkey);
  2161. u64 vaddr = get_ib_reth_vaddr(reth);
  2162. int ok;
  2163. /* Check rkey & NAK */
  2164. ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
  2165. rkey, IB_ACCESS_REMOTE_WRITE);
  2166. if (unlikely(!ok))
  2167. goto nack_acc;
  2168. qp->r_sge.num_sge = 1;
  2169. } else {
  2170. qp->r_sge.num_sge = 0;
  2171. qp->r_sge.sge.mr = NULL;
  2172. qp->r_sge.sge.vaddr = NULL;
  2173. qp->r_sge.sge.length = 0;
  2174. qp->r_sge.sge.sge_length = 0;
  2175. }
  2176. if (opcode == OP(RDMA_WRITE_FIRST))
  2177. goto send_middle;
  2178. else if (opcode == OP(RDMA_WRITE_ONLY))
  2179. goto no_immediate_data;
  2180. ret = hfi1_rvt_get_rwqe(qp, 1);
  2181. if (ret < 0)
  2182. goto nack_op_err;
  2183. if (!ret) {
  2184. /* peer will send again */
  2185. rvt_put_ss(&qp->r_sge);
  2186. goto rnr_nak;
  2187. }
  2188. wc.ex.imm_data = ohdr->u.rc.imm_data;
  2189. wc.wc_flags = IB_WC_WITH_IMM;
  2190. goto send_last;
  2191. case OP(RDMA_READ_REQUEST): {
  2192. struct rvt_ack_entry *e;
  2193. u32 len;
  2194. u8 next;
  2195. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
  2196. goto nack_inv;
  2197. next = qp->r_head_ack_queue + 1;
  2198. /* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */
  2199. if (next > HFI1_MAX_RDMA_ATOMIC)
  2200. next = 0;
  2201. spin_lock_irqsave(&qp->s_lock, flags);
  2202. if (unlikely(next == qp->s_tail_ack_queue)) {
  2203. if (!qp->s_ack_queue[next].sent)
  2204. goto nack_inv_unlck;
  2205. update_ack_queue(qp, next);
  2206. }
  2207. e = &qp->s_ack_queue[qp->r_head_ack_queue];
  2208. if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
  2209. rvt_put_mr(e->rdma_sge.mr);
  2210. e->rdma_sge.mr = NULL;
  2211. }
  2212. reth = &ohdr->u.rc.reth;
  2213. len = be32_to_cpu(reth->length);
  2214. if (len) {
  2215. u32 rkey = be32_to_cpu(reth->rkey);
  2216. u64 vaddr = get_ib_reth_vaddr(reth);
  2217. int ok;
  2218. /* Check rkey & NAK */
  2219. ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
  2220. rkey, IB_ACCESS_REMOTE_READ);
  2221. if (unlikely(!ok))
  2222. goto nack_acc_unlck;
  2223. /*
  2224. * Update the next expected PSN. We add 1 later
  2225. * below, so only add the remainder here.
  2226. */
  2227. if (len > pmtu)
  2228. qp->r_psn += (len - 1) / pmtu;
  2229. } else {
  2230. e->rdma_sge.mr = NULL;
  2231. e->rdma_sge.vaddr = NULL;
  2232. e->rdma_sge.length = 0;
  2233. e->rdma_sge.sge_length = 0;
  2234. }
  2235. e->opcode = opcode;
  2236. e->sent = 0;
  2237. e->psn = psn;
  2238. e->lpsn = qp->r_psn;
  2239. /*
  2240. * We need to increment the MSN here instead of when we
  2241. * finish sending the result since a duplicate request would
  2242. * increment it more than once.
  2243. */
  2244. qp->r_msn++;
  2245. qp->r_psn++;
  2246. qp->r_state = opcode;
  2247. qp->r_nak_state = 0;
  2248. qp->r_head_ack_queue = next;
  2249. /* Schedule the send engine. */
  2250. qp->s_flags |= RVT_S_RESP_PENDING;
  2251. hfi1_schedule_send(qp);
  2252. spin_unlock_irqrestore(&qp->s_lock, flags);
  2253. if (is_fecn)
  2254. goto send_ack;
  2255. return;
  2256. }
  2257. case OP(COMPARE_SWAP):
  2258. case OP(FETCH_ADD): {
  2259. struct ib_atomic_eth *ateth;
  2260. struct rvt_ack_entry *e;
  2261. u64 vaddr;
  2262. atomic64_t *maddr;
  2263. u64 sdata;
  2264. u32 rkey;
  2265. u8 next;
  2266. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
  2267. goto nack_inv;
  2268. next = qp->r_head_ack_queue + 1;
  2269. if (next > HFI1_MAX_RDMA_ATOMIC)
  2270. next = 0;
  2271. spin_lock_irqsave(&qp->s_lock, flags);
  2272. if (unlikely(next == qp->s_tail_ack_queue)) {
  2273. if (!qp->s_ack_queue[next].sent)
  2274. goto nack_inv_unlck;
  2275. update_ack_queue(qp, next);
  2276. }
  2277. e = &qp->s_ack_queue[qp->r_head_ack_queue];
  2278. if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
  2279. rvt_put_mr(e->rdma_sge.mr);
  2280. e->rdma_sge.mr = NULL;
  2281. }
  2282. ateth = &ohdr->u.atomic_eth;
  2283. vaddr = get_ib_ateth_vaddr(ateth);
  2284. if (unlikely(vaddr & (sizeof(u64) - 1)))
  2285. goto nack_inv_unlck;
  2286. rkey = be32_to_cpu(ateth->rkey);
  2287. /* Check rkey & NAK */
  2288. if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
  2289. vaddr, rkey,
  2290. IB_ACCESS_REMOTE_ATOMIC)))
  2291. goto nack_acc_unlck;
  2292. /* Perform atomic OP and save result. */
  2293. maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
  2294. sdata = get_ib_ateth_swap(ateth);
  2295. e->atomic_data = (opcode == OP(FETCH_ADD)) ?
  2296. (u64)atomic64_add_return(sdata, maddr) - sdata :
  2297. (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
  2298. get_ib_ateth_compare(ateth),
  2299. sdata);
  2300. rvt_put_mr(qp->r_sge.sge.mr);
  2301. qp->r_sge.num_sge = 0;
  2302. e->opcode = opcode;
  2303. e->sent = 0;
  2304. e->psn = psn;
  2305. e->lpsn = psn;
  2306. qp->r_msn++;
  2307. qp->r_psn++;
  2308. qp->r_state = opcode;
  2309. qp->r_nak_state = 0;
  2310. qp->r_head_ack_queue = next;
  2311. /* Schedule the send engine. */
  2312. qp->s_flags |= RVT_S_RESP_PENDING;
  2313. hfi1_schedule_send(qp);
  2314. spin_unlock_irqrestore(&qp->s_lock, flags);
  2315. if (is_fecn)
  2316. goto send_ack;
  2317. return;
  2318. }
  2319. default:
  2320. /* NAK unknown opcodes. */
  2321. goto nack_inv;
  2322. }
  2323. qp->r_psn++;
  2324. qp->r_state = opcode;
  2325. qp->r_ack_psn = psn;
  2326. qp->r_nak_state = 0;
  2327. /* Send an ACK if requested or required. */
  2328. if (psn & IB_BTH_REQ_ACK) {
  2329. struct hfi1_qp_priv *priv = qp->priv;
  2330. if (packet->numpkt == 0) {
  2331. rc_cancel_ack(qp);
  2332. goto send_ack;
  2333. }
  2334. if (priv->r_adefered >= HFI1_PSN_CREDIT) {
  2335. rc_cancel_ack(qp);
  2336. goto send_ack;
  2337. }
  2338. if (unlikely(is_fecn)) {
  2339. rc_cancel_ack(qp);
  2340. goto send_ack;
  2341. }
  2342. priv->r_adefered++;
  2343. rc_defered_ack(rcd, qp);
  2344. }
  2345. return;
  2346. rnr_nak:
  2347. qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
  2348. qp->r_ack_psn = qp->r_psn;
  2349. /* Queue RNR NAK for later */
  2350. rc_defered_ack(rcd, qp);
  2351. return;
  2352. nack_op_err:
  2353. hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
  2354. qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
  2355. qp->r_ack_psn = qp->r_psn;
  2356. /* Queue NAK for later */
  2357. rc_defered_ack(rcd, qp);
  2358. return;
  2359. nack_inv_unlck:
  2360. spin_unlock_irqrestore(&qp->s_lock, flags);
  2361. nack_inv:
  2362. hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
  2363. qp->r_nak_state = IB_NAK_INVALID_REQUEST;
  2364. qp->r_ack_psn = qp->r_psn;
  2365. /* Queue NAK for later */
  2366. rc_defered_ack(rcd, qp);
  2367. return;
  2368. nack_acc_unlck:
  2369. spin_unlock_irqrestore(&qp->s_lock, flags);
  2370. nack_acc:
  2371. hfi1_rc_error(qp, IB_WC_LOC_PROT_ERR);
  2372. qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
  2373. qp->r_ack_psn = qp->r_psn;
  2374. send_ack:
  2375. hfi1_send_rc_ack(rcd, qp, is_fecn);
  2376. }
  2377. void hfi1_rc_hdrerr(
  2378. struct hfi1_ctxtdata *rcd,
  2379. struct ib_header *hdr,
  2380. u32 rcv_flags,
  2381. struct rvt_qp *qp)
  2382. {
  2383. int has_grh = rcv_flags & HFI1_HAS_GRH;
  2384. struct ib_other_headers *ohdr;
  2385. struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
  2386. int diff;
  2387. u32 opcode;
  2388. u32 psn, bth0;
  2389. /* Check for GRH */
  2390. ohdr = &hdr->u.oth;
  2391. if (has_grh)
  2392. ohdr = &hdr->u.l.oth;
  2393. bth0 = be32_to_cpu(ohdr->bth[0]);
  2394. if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0))
  2395. return;
  2396. psn = be32_to_cpu(ohdr->bth[2]);
  2397. opcode = (bth0 >> 24) & 0xff;
  2398. /* Only deal with RDMA Writes for now */
  2399. if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
  2400. diff = delta_psn(psn, qp->r_psn);
  2401. if (!qp->r_nak_state && diff >= 0) {
  2402. ibp->rvp.n_rc_seqnak++;
  2403. qp->r_nak_state = IB_NAK_PSN_ERROR;
  2404. /* Use the expected PSN. */
  2405. qp->r_ack_psn = qp->r_psn;
  2406. /*
  2407. * Wait to send the sequence
  2408. * NAK until all packets
  2409. * in the receive queue have
  2410. * been processed.
  2411. * Otherwise, we end up
  2412. * propagating congestion.
  2413. */
  2414. rc_defered_ack(rcd, qp);
  2415. } /* Out of sequence NAK */
  2416. } /* QP Request NAKs */
  2417. }