xhci-mem.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714
  1. /*
  2. * USB HOST XHCI Controller stack
  3. *
  4. * Based on xHCI host controller driver in linux-kernel
  5. * by Sarah Sharp.
  6. *
  7. * Copyright (C) 2008 Intel Corp.
  8. * Author: Sarah Sharp
  9. *
  10. * Copyright (C) 2013 Samsung Electronics Co.Ltd
  11. * Authors: Vivek Gautam <gautam.vivek@samsung.com>
  12. * Vikas Sajjan <vikas.sajjan@samsung.com>
  13. *
  14. * SPDX-License-Identifier: GPL-2.0+
  15. */
  16. #include <common.h>
  17. #include <dm.h>
  18. #include <asm/byteorder.h>
  19. #include <usb.h>
  20. #include <malloc.h>
  21. #include <asm/cache.h>
  22. #include <linux/errno.h>
  23. #include "xhci.h"
  24. #define CACHELINE_SIZE CONFIG_SYS_CACHELINE_SIZE
  25. /**
  26. * flushes the address passed till the length
  27. *
  28. * @param addr pointer to memory region to be flushed
  29. * @param len the length of the cache line to be flushed
  30. * @return none
  31. */
  32. void xhci_flush_cache(uintptr_t addr, u32 len)
  33. {
  34. BUG_ON((void *)addr == NULL || len == 0);
  35. flush_dcache_range(addr & ~(CACHELINE_SIZE - 1),
  36. ALIGN(addr + len, CACHELINE_SIZE));
  37. }
  38. /**
  39. * invalidates the address passed till the length
  40. *
  41. * @param addr pointer to memory region to be invalidates
  42. * @param len the length of the cache line to be invalidated
  43. * @return none
  44. */
  45. void xhci_inval_cache(uintptr_t addr, u32 len)
  46. {
  47. BUG_ON((void *)addr == NULL || len == 0);
  48. invalidate_dcache_range(addr & ~(CACHELINE_SIZE - 1),
  49. ALIGN(addr + len, CACHELINE_SIZE));
  50. }
  51. /**
  52. * frees the "segment" pointer passed
  53. *
  54. * @param ptr pointer to "segement" to be freed
  55. * @return none
  56. */
  57. static void xhci_segment_free(struct xhci_segment *seg)
  58. {
  59. free(seg->trbs);
  60. seg->trbs = NULL;
  61. free(seg);
  62. }
  63. /**
  64. * frees the "ring" pointer passed
  65. *
  66. * @param ptr pointer to "ring" to be freed
  67. * @return none
  68. */
  69. static void xhci_ring_free(struct xhci_ring *ring)
  70. {
  71. struct xhci_segment *seg;
  72. struct xhci_segment *first_seg;
  73. BUG_ON(!ring);
  74. first_seg = ring->first_seg;
  75. seg = first_seg->next;
  76. while (seg != first_seg) {
  77. struct xhci_segment *next = seg->next;
  78. xhci_segment_free(seg);
  79. seg = next;
  80. }
  81. xhci_segment_free(first_seg);
  82. free(ring);
  83. }
  84. /**
  85. * frees the "xhci_container_ctx" pointer passed
  86. *
  87. * @param ptr pointer to "xhci_container_ctx" to be freed
  88. * @return none
  89. */
  90. static void xhci_free_container_ctx(struct xhci_container_ctx *ctx)
  91. {
  92. free(ctx->bytes);
  93. free(ctx);
  94. }
  95. /**
  96. * frees the virtual devices for "xhci_ctrl" pointer passed
  97. *
  98. * @param ptr pointer to "xhci_ctrl" whose virtual devices are to be freed
  99. * @return none
  100. */
  101. static void xhci_free_virt_devices(struct xhci_ctrl *ctrl)
  102. {
  103. int i;
  104. int slot_id;
  105. struct xhci_virt_device *virt_dev;
  106. /*
  107. * refactored here to loop through all virt_dev
  108. * Slot ID 0 is reserved
  109. */
  110. for (slot_id = 0; slot_id < MAX_HC_SLOTS; slot_id++) {
  111. virt_dev = ctrl->devs[slot_id];
  112. if (!virt_dev)
  113. continue;
  114. ctrl->dcbaa->dev_context_ptrs[slot_id] = 0;
  115. for (i = 0; i < 31; ++i)
  116. if (virt_dev->eps[i].ring)
  117. xhci_ring_free(virt_dev->eps[i].ring);
  118. if (virt_dev->in_ctx)
  119. xhci_free_container_ctx(virt_dev->in_ctx);
  120. if (virt_dev->out_ctx)
  121. xhci_free_container_ctx(virt_dev->out_ctx);
  122. free(virt_dev);
  123. /* make sure we are pointing to NULL */
  124. ctrl->devs[slot_id] = NULL;
  125. }
  126. }
  127. /**
  128. * frees all the memory allocated
  129. *
  130. * @param ptr pointer to "xhci_ctrl" to be cleaned up
  131. * @return none
  132. */
  133. void xhci_cleanup(struct xhci_ctrl *ctrl)
  134. {
  135. xhci_ring_free(ctrl->event_ring);
  136. xhci_ring_free(ctrl->cmd_ring);
  137. xhci_free_virt_devices(ctrl);
  138. free(ctrl->erst.entries);
  139. free(ctrl->dcbaa);
  140. memset(ctrl, '\0', sizeof(struct xhci_ctrl));
  141. }
  142. /**
  143. * Malloc the aligned memory
  144. *
  145. * @param size size of memory to be allocated
  146. * @return allocates the memory and returns the aligned pointer
  147. */
  148. static void *xhci_malloc(unsigned int size)
  149. {
  150. void *ptr;
  151. size_t cacheline_size = max(XHCI_ALIGNMENT, CACHELINE_SIZE);
  152. ptr = memalign(cacheline_size, ALIGN(size, cacheline_size));
  153. BUG_ON(!ptr);
  154. memset(ptr, '\0', size);
  155. xhci_flush_cache((uintptr_t)ptr, size);
  156. return ptr;
  157. }
  158. /**
  159. * Make the prev segment point to the next segment.
  160. * Change the last TRB in the prev segment to be a Link TRB which points to the
  161. * address of the next segment. The caller needs to set any Link TRB
  162. * related flags, such as End TRB, Toggle Cycle, and no snoop.
  163. *
  164. * @param prev pointer to the previous segment
  165. * @param next pointer to the next segment
  166. * @param link_trbs flag to indicate whether to link the trbs or NOT
  167. * @return none
  168. */
  169. static void xhci_link_segments(struct xhci_segment *prev,
  170. struct xhci_segment *next, bool link_trbs)
  171. {
  172. u32 val;
  173. u64 val_64 = 0;
  174. if (!prev || !next)
  175. return;
  176. prev->next = next;
  177. if (link_trbs) {
  178. val_64 = (uintptr_t)next->trbs;
  179. prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = val_64;
  180. /*
  181. * Set the last TRB in the segment to
  182. * have a TRB type ID of Link TRB
  183. */
  184. val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
  185. val &= ~TRB_TYPE_BITMASK;
  186. val |= (TRB_LINK << TRB_TYPE_SHIFT);
  187. prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
  188. }
  189. }
  190. /**
  191. * Initialises the Ring's enqueue,dequeue,enq_seg pointers
  192. *
  193. * @param ring pointer to the RING to be intialised
  194. * @return none
  195. */
  196. static void xhci_initialize_ring_info(struct xhci_ring *ring)
  197. {
  198. /*
  199. * The ring is empty, so the enqueue pointer == dequeue pointer
  200. */
  201. ring->enqueue = ring->first_seg->trbs;
  202. ring->enq_seg = ring->first_seg;
  203. ring->dequeue = ring->enqueue;
  204. ring->deq_seg = ring->first_seg;
  205. /*
  206. * The ring is initialized to 0. The producer must write 1 to the
  207. * cycle bit to handover ownership of the TRB, so PCS = 1.
  208. * The consumer must compare CCS to the cycle bit to
  209. * check ownership, so CCS = 1.
  210. */
  211. ring->cycle_state = 1;
  212. }
  213. /**
  214. * Allocates a generic ring segment from the ring pool, sets the dma address,
  215. * initializes the segment to zero, and sets the private next pointer to NULL.
  216. * Section 4.11.1.1:
  217. * "All components of all Command and Transfer TRBs shall be initialized to '0'"
  218. *
  219. * @param none
  220. * @return pointer to the newly allocated SEGMENT
  221. */
  222. static struct xhci_segment *xhci_segment_alloc(void)
  223. {
  224. struct xhci_segment *seg;
  225. seg = (struct xhci_segment *)malloc(sizeof(struct xhci_segment));
  226. BUG_ON(!seg);
  227. seg->trbs = (union xhci_trb *)xhci_malloc(SEGMENT_SIZE);
  228. seg->next = NULL;
  229. return seg;
  230. }
  231. /**
  232. * Create a new ring with zero or more segments.
  233. * TODO: current code only uses one-time-allocated single-segment rings
  234. * of 1KB anyway, so we might as well get rid of all the segment and
  235. * linking code (and maybe increase the size a bit, e.g. 4KB).
  236. *
  237. *
  238. * Link each segment together into a ring.
  239. * Set the end flag and the cycle toggle bit on the last segment.
  240. * See section 4.9.2 and figures 15 and 16 of XHCI spec rev1.0.
  241. *
  242. * @param num_segs number of segments in the ring
  243. * @param link_trbs flag to indicate whether to link the trbs or NOT
  244. * @return pointer to the newly created RING
  245. */
  246. struct xhci_ring *xhci_ring_alloc(unsigned int num_segs, bool link_trbs)
  247. {
  248. struct xhci_ring *ring;
  249. struct xhci_segment *prev;
  250. ring = (struct xhci_ring *)malloc(sizeof(struct xhci_ring));
  251. BUG_ON(!ring);
  252. if (num_segs == 0)
  253. return ring;
  254. ring->first_seg = xhci_segment_alloc();
  255. BUG_ON(!ring->first_seg);
  256. num_segs--;
  257. prev = ring->first_seg;
  258. while (num_segs > 0) {
  259. struct xhci_segment *next;
  260. next = xhci_segment_alloc();
  261. BUG_ON(!next);
  262. xhci_link_segments(prev, next, link_trbs);
  263. prev = next;
  264. num_segs--;
  265. }
  266. xhci_link_segments(prev, ring->first_seg, link_trbs);
  267. if (link_trbs) {
  268. /* See section 4.9.2.1 and 6.4.4.1 */
  269. prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
  270. cpu_to_le32(LINK_TOGGLE);
  271. }
  272. xhci_initialize_ring_info(ring);
  273. return ring;
  274. }
  275. /**
  276. * Allocates the Container context
  277. *
  278. * @param ctrl Host controller data structure
  279. * @param type type of XHCI Container Context
  280. * @return NULL if failed else pointer to the context on success
  281. */
  282. static struct xhci_container_ctx
  283. *xhci_alloc_container_ctx(struct xhci_ctrl *ctrl, int type)
  284. {
  285. struct xhci_container_ctx *ctx;
  286. ctx = (struct xhci_container_ctx *)
  287. malloc(sizeof(struct xhci_container_ctx));
  288. BUG_ON(!ctx);
  289. BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
  290. ctx->type = type;
  291. ctx->size = (MAX_EP_CTX_NUM + 1) *
  292. CTX_SIZE(readl(&ctrl->hccr->cr_hccparams));
  293. if (type == XHCI_CTX_TYPE_INPUT)
  294. ctx->size += CTX_SIZE(readl(&ctrl->hccr->cr_hccparams));
  295. ctx->bytes = (u8 *)xhci_malloc(ctx->size);
  296. return ctx;
  297. }
  298. /**
  299. * Allocating virtual device
  300. *
  301. * @param udev pointer to USB deivce structure
  302. * @return 0 on success else -1 on failure
  303. */
  304. int xhci_alloc_virt_device(struct xhci_ctrl *ctrl, unsigned int slot_id)
  305. {
  306. u64 byte_64 = 0;
  307. struct xhci_virt_device *virt_dev;
  308. /* Slot ID 0 is reserved */
  309. if (ctrl->devs[slot_id]) {
  310. printf("Virt dev for slot[%d] already allocated\n", slot_id);
  311. return -EEXIST;
  312. }
  313. ctrl->devs[slot_id] = (struct xhci_virt_device *)
  314. malloc(sizeof(struct xhci_virt_device));
  315. if (!ctrl->devs[slot_id]) {
  316. puts("Failed to allocate virtual device\n");
  317. return -ENOMEM;
  318. }
  319. memset(ctrl->devs[slot_id], 0, sizeof(struct xhci_virt_device));
  320. virt_dev = ctrl->devs[slot_id];
  321. /* Allocate the (output) device context that will be used in the HC. */
  322. virt_dev->out_ctx = xhci_alloc_container_ctx(ctrl,
  323. XHCI_CTX_TYPE_DEVICE);
  324. if (!virt_dev->out_ctx) {
  325. puts("Failed to allocate out context for virt dev\n");
  326. return -ENOMEM;
  327. }
  328. /* Allocate the (input) device context for address device command */
  329. virt_dev->in_ctx = xhci_alloc_container_ctx(ctrl,
  330. XHCI_CTX_TYPE_INPUT);
  331. if (!virt_dev->in_ctx) {
  332. puts("Failed to allocate in context for virt dev\n");
  333. return -ENOMEM;
  334. }
  335. /* Allocate endpoint 0 ring */
  336. virt_dev->eps[0].ring = xhci_ring_alloc(1, true);
  337. byte_64 = (uintptr_t)(virt_dev->out_ctx->bytes);
  338. /* Point to output device context in dcbaa. */
  339. ctrl->dcbaa->dev_context_ptrs[slot_id] = byte_64;
  340. xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[slot_id],
  341. sizeof(__le64));
  342. return 0;
  343. }
  344. /**
  345. * Allocates the necessary data structures
  346. * for XHCI host controller
  347. *
  348. * @param ctrl Host controller data structure
  349. * @param hccr pointer to HOST Controller Control Registers
  350. * @param hcor pointer to HOST Controller Operational Registers
  351. * @return 0 if successful else -1 on failure
  352. */
  353. int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr,
  354. struct xhci_hcor *hcor)
  355. {
  356. uint64_t val_64;
  357. uint64_t trb_64;
  358. uint32_t val;
  359. unsigned long deq;
  360. int i;
  361. struct xhci_segment *seg;
  362. /* DCBAA initialization */
  363. ctrl->dcbaa = (struct xhci_device_context_array *)
  364. xhci_malloc(sizeof(struct xhci_device_context_array));
  365. if (ctrl->dcbaa == NULL) {
  366. puts("unable to allocate DCBA\n");
  367. return -ENOMEM;
  368. }
  369. val_64 = (uintptr_t)ctrl->dcbaa;
  370. /* Set the pointer in DCBAA register */
  371. xhci_writeq(&hcor->or_dcbaap, val_64);
  372. /* Command ring control pointer register initialization */
  373. ctrl->cmd_ring = xhci_ring_alloc(1, true);
  374. /* Set the address in the Command Ring Control register */
  375. trb_64 = (uintptr_t)ctrl->cmd_ring->first_seg->trbs;
  376. val_64 = xhci_readq(&hcor->or_crcr);
  377. val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
  378. (trb_64 & (u64) ~CMD_RING_RSVD_BITS) |
  379. ctrl->cmd_ring->cycle_state;
  380. xhci_writeq(&hcor->or_crcr, val_64);
  381. /* write the address of db register */
  382. val = xhci_readl(&hccr->cr_dboff);
  383. val &= DBOFF_MASK;
  384. ctrl->dba = (struct xhci_doorbell_array *)((char *)hccr + val);
  385. /* write the address of runtime register */
  386. val = xhci_readl(&hccr->cr_rtsoff);
  387. val &= RTSOFF_MASK;
  388. ctrl->run_regs = (struct xhci_run_regs *)((char *)hccr + val);
  389. /* writting the address of ir_set structure */
  390. ctrl->ir_set = &ctrl->run_regs->ir_set[0];
  391. /* Event ring does not maintain link TRB */
  392. ctrl->event_ring = xhci_ring_alloc(ERST_NUM_SEGS, false);
  393. ctrl->erst.entries = (struct xhci_erst_entry *)
  394. xhci_malloc(sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS);
  395. ctrl->erst.num_entries = ERST_NUM_SEGS;
  396. for (val = 0, seg = ctrl->event_ring->first_seg;
  397. val < ERST_NUM_SEGS;
  398. val++) {
  399. trb_64 = 0;
  400. trb_64 = (uintptr_t)seg->trbs;
  401. struct xhci_erst_entry *entry = &ctrl->erst.entries[val];
  402. xhci_writeq(&entry->seg_addr, trb_64);
  403. entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
  404. entry->rsvd = 0;
  405. seg = seg->next;
  406. }
  407. xhci_flush_cache((uintptr_t)ctrl->erst.entries,
  408. ERST_NUM_SEGS * sizeof(struct xhci_erst_entry));
  409. deq = (unsigned long)ctrl->event_ring->dequeue;
  410. /* Update HC event ring dequeue pointer */
  411. xhci_writeq(&ctrl->ir_set->erst_dequeue,
  412. (u64)deq & (u64)~ERST_PTR_MASK);
  413. /* set ERST count with the number of entries in the segment table */
  414. val = xhci_readl(&ctrl->ir_set->erst_size);
  415. val &= ERST_SIZE_MASK;
  416. val |= ERST_NUM_SEGS;
  417. xhci_writel(&ctrl->ir_set->erst_size, val);
  418. /* this is the event ring segment table pointer */
  419. val_64 = xhci_readq(&ctrl->ir_set->erst_base);
  420. val_64 &= ERST_PTR_MASK;
  421. val_64 |= ((uintptr_t)(ctrl->erst.entries) & ~ERST_PTR_MASK);
  422. xhci_writeq(&ctrl->ir_set->erst_base, val_64);
  423. /* initializing the virtual devices to NULL */
  424. for (i = 0; i < MAX_HC_SLOTS; ++i)
  425. ctrl->devs[i] = NULL;
  426. /*
  427. * Just Zero'ing this register completely,
  428. * or some spurious Device Notification Events
  429. * might screw things here.
  430. */
  431. xhci_writel(&hcor->or_dnctrl, 0x0);
  432. return 0;
  433. }
  434. /**
  435. * Give the input control context for the passed container context
  436. *
  437. * @param ctx pointer to the context
  438. * @return pointer to the Input control context data
  439. */
  440. struct xhci_input_control_ctx
  441. *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx)
  442. {
  443. BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
  444. return (struct xhci_input_control_ctx *)ctx->bytes;
  445. }
  446. /**
  447. * Give the slot context for the passed container context
  448. *
  449. * @param ctrl Host controller data structure
  450. * @param ctx pointer to the context
  451. * @return pointer to the slot control context data
  452. */
  453. struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_ctrl *ctrl,
  454. struct xhci_container_ctx *ctx)
  455. {
  456. if (ctx->type == XHCI_CTX_TYPE_DEVICE)
  457. return (struct xhci_slot_ctx *)ctx->bytes;
  458. return (struct xhci_slot_ctx *)
  459. (ctx->bytes + CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)));
  460. }
  461. /**
  462. * Gets the EP context from based on the ep_index
  463. *
  464. * @param ctrl Host controller data structure
  465. * @param ctx context container
  466. * @param ep_index index of the endpoint
  467. * @return pointer to the End point context
  468. */
  469. struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_ctrl *ctrl,
  470. struct xhci_container_ctx *ctx,
  471. unsigned int ep_index)
  472. {
  473. /* increment ep index by offset of start of ep ctx array */
  474. ep_index++;
  475. if (ctx->type == XHCI_CTX_TYPE_INPUT)
  476. ep_index++;
  477. return (struct xhci_ep_ctx *)
  478. (ctx->bytes +
  479. (ep_index * CTX_SIZE(readl(&ctrl->hccr->cr_hccparams))));
  480. }
  481. /**
  482. * Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
  483. * Useful when you want to change one particular aspect of the endpoint
  484. * and then issue a configure endpoint command.
  485. *
  486. * @param ctrl Host controller data structure
  487. * @param in_ctx contains the input context
  488. * @param out_ctx contains the input context
  489. * @param ep_index index of the end point
  490. * @return none
  491. */
  492. void xhci_endpoint_copy(struct xhci_ctrl *ctrl,
  493. struct xhci_container_ctx *in_ctx,
  494. struct xhci_container_ctx *out_ctx,
  495. unsigned int ep_index)
  496. {
  497. struct xhci_ep_ctx *out_ep_ctx;
  498. struct xhci_ep_ctx *in_ep_ctx;
  499. out_ep_ctx = xhci_get_ep_ctx(ctrl, out_ctx, ep_index);
  500. in_ep_ctx = xhci_get_ep_ctx(ctrl, in_ctx, ep_index);
  501. in_ep_ctx->ep_info = out_ep_ctx->ep_info;
  502. in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
  503. in_ep_ctx->deq = out_ep_ctx->deq;
  504. in_ep_ctx->tx_info = out_ep_ctx->tx_info;
  505. }
  506. /**
  507. * Copy output xhci_slot_ctx to the input xhci_slot_ctx.
  508. * Useful when you want to change one particular aspect of the endpoint
  509. * and then issue a configure endpoint command.
  510. * Only the context entries field matters, but
  511. * we'll copy the whole thing anyway.
  512. *
  513. * @param ctrl Host controller data structure
  514. * @param in_ctx contains the inpout context
  515. * @param out_ctx contains the inpout context
  516. * @return none
  517. */
  518. void xhci_slot_copy(struct xhci_ctrl *ctrl, struct xhci_container_ctx *in_ctx,
  519. struct xhci_container_ctx *out_ctx)
  520. {
  521. struct xhci_slot_ctx *in_slot_ctx;
  522. struct xhci_slot_ctx *out_slot_ctx;
  523. in_slot_ctx = xhci_get_slot_ctx(ctrl, in_ctx);
  524. out_slot_ctx = xhci_get_slot_ctx(ctrl, out_ctx);
  525. in_slot_ctx->dev_info = out_slot_ctx->dev_info;
  526. in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
  527. in_slot_ctx->tt_info = out_slot_ctx->tt_info;
  528. in_slot_ctx->dev_state = out_slot_ctx->dev_state;
  529. }
  530. /**
  531. * Setup an xHCI virtual device for a Set Address command
  532. *
  533. * @param udev pointer to the Device Data Structure
  534. * @return returns negative value on failure else 0 on success
  535. */
  536. void xhci_setup_addressable_virt_dev(struct xhci_ctrl *ctrl, int slot_id,
  537. int speed, int hop_portnr)
  538. {
  539. struct xhci_virt_device *virt_dev;
  540. struct xhci_ep_ctx *ep0_ctx;
  541. struct xhci_slot_ctx *slot_ctx;
  542. u32 port_num = 0;
  543. u64 trb_64 = 0;
  544. virt_dev = ctrl->devs[slot_id];
  545. BUG_ON(!virt_dev);
  546. /* Extract the EP0 and Slot Ctrl */
  547. ep0_ctx = xhci_get_ep_ctx(ctrl, virt_dev->in_ctx, 0);
  548. slot_ctx = xhci_get_slot_ctx(ctrl, virt_dev->in_ctx);
  549. /* Only the control endpoint is valid - one endpoint context */
  550. slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | 0);
  551. switch (speed) {
  552. case USB_SPEED_SUPER:
  553. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
  554. break;
  555. case USB_SPEED_HIGH:
  556. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
  557. break;
  558. case USB_SPEED_FULL:
  559. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
  560. break;
  561. case USB_SPEED_LOW:
  562. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
  563. break;
  564. default:
  565. /* Speed was set earlier, this shouldn't happen. */
  566. BUG();
  567. }
  568. port_num = hop_portnr;
  569. debug("port_num = %d\n", port_num);
  570. slot_ctx->dev_info2 |=
  571. cpu_to_le32(((port_num & ROOT_HUB_PORT_MASK) <<
  572. ROOT_HUB_PORT_SHIFT));
  573. /* Step 4 - ring already allocated */
  574. /* Step 5 */
  575. ep0_ctx->ep_info2 = cpu_to_le32(CTRL_EP << EP_TYPE_SHIFT);
  576. debug("SPEED = %d\n", speed);
  577. switch (speed) {
  578. case USB_SPEED_SUPER:
  579. ep0_ctx->ep_info2 |= cpu_to_le32(((512 & MAX_PACKET_MASK) <<
  580. MAX_PACKET_SHIFT));
  581. debug("Setting Packet size = 512bytes\n");
  582. break;
  583. case USB_SPEED_HIGH:
  584. /* USB core guesses at a 64-byte max packet first for FS devices */
  585. case USB_SPEED_FULL:
  586. ep0_ctx->ep_info2 |= cpu_to_le32(((64 & MAX_PACKET_MASK) <<
  587. MAX_PACKET_SHIFT));
  588. debug("Setting Packet size = 64bytes\n");
  589. break;
  590. case USB_SPEED_LOW:
  591. ep0_ctx->ep_info2 |= cpu_to_le32(((8 & MAX_PACKET_MASK) <<
  592. MAX_PACKET_SHIFT));
  593. debug("Setting Packet size = 8bytes\n");
  594. break;
  595. default:
  596. /* New speed? */
  597. BUG();
  598. }
  599. /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
  600. ep0_ctx->ep_info2 |=
  601. cpu_to_le32(((0 & MAX_BURST_MASK) << MAX_BURST_SHIFT) |
  602. ((3 & ERROR_COUNT_MASK) << ERROR_COUNT_SHIFT));
  603. trb_64 = (uintptr_t)virt_dev->eps[0].ring->first_seg->trbs;
  604. ep0_ctx->deq = cpu_to_le64(trb_64 | virt_dev->eps[0].ring->cycle_state);
  605. /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
  606. xhci_flush_cache((uintptr_t)ep0_ctx, sizeof(struct xhci_ep_ctx));
  607. xhci_flush_cache((uintptr_t)slot_ctx, sizeof(struct xhci_slot_ctx));
  608. }