xHCI: store ring's type
When allocate a ring, store its type - four transfer types for endpoint, TYPE_STREAM for stream transfer, and TYPE_COMMAND/TYPE_EVENT for xHCI host. This helps to get rid of three bool function parameters: link_trbs, isoc and consumer. Signed-off-by: Andiry Xu <andiry.xu@amd.com> Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Tested-by: Paul Zimmerman <Paul.Zimmerman@synopsys.com>
This commit is contained in:
@@ -73,14 +73,14 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
|
||||
* related flags, such as End TRB, Toggle Cycle, and no snoop.
|
||||
*/
|
||||
static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
|
||||
struct xhci_segment *next, bool link_trbs, bool isoc)
|
||||
struct xhci_segment *next, enum xhci_ring_type type)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
if (!prev || !next)
|
||||
return;
|
||||
prev->next = next;
|
||||
if (link_trbs) {
|
||||
if (type != TYPE_EVENT) {
|
||||
prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
|
||||
cpu_to_le64(next->dma);
|
||||
|
||||
@@ -91,7 +91,8 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
|
||||
/* Always set the chain bit with 0.95 hardware */
|
||||
/* Set chain bit for isoc rings on AMD 0.96 host */
|
||||
if (xhci_link_trb_quirk(xhci) ||
|
||||
(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
|
||||
(type == TYPE_ISOC &&
|
||||
(xhci->quirks & XHCI_AMD_0x96_HOST)))
|
||||
val |= TRB_CHAIN;
|
||||
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
|
||||
}
|
||||
@@ -144,7 +145,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
|
||||
* See section 4.9.1 and figures 15 and 16.
|
||||
*/
|
||||
static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
|
||||
unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
|
||||
unsigned int num_segs, enum xhci_ring_type type, gfp_t flags)
|
||||
{
|
||||
struct xhci_ring *ring;
|
||||
struct xhci_segment *prev;
|
||||
@@ -154,6 +155,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&ring->td_list);
|
||||
ring->type = type;
|
||||
if (num_segs == 0)
|
||||
return ring;
|
||||
|
||||
@@ -169,14 +171,15 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
|
||||
next = xhci_segment_alloc(xhci, flags);
|
||||
if (!next)
|
||||
goto fail;
|
||||
xhci_link_segments(xhci, prev, next, link_trbs, isoc);
|
||||
xhci_link_segments(xhci, prev, next, type);
|
||||
|
||||
prev = next;
|
||||
num_segs--;
|
||||
}
|
||||
xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);
|
||||
xhci_link_segments(xhci, prev, ring->first_seg, type);
|
||||
|
||||
if (link_trbs) {
|
||||
/* Only event ring does not use link TRB */
|
||||
if (type != TYPE_EVENT) {
|
||||
/* See section 4.9.2.1 and 6.4.4.1 */
|
||||
prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
|
||||
cpu_to_le32(LINK_TOGGLE);
|
||||
@@ -217,16 +220,17 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
|
||||
* pointers to the beginning of the ring.
|
||||
*/
|
||||
static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
|
||||
struct xhci_ring *ring, bool isoc)
|
||||
struct xhci_ring *ring, enum xhci_ring_type type)
|
||||
{
|
||||
struct xhci_segment *seg = ring->first_seg;
|
||||
do {
|
||||
memset(seg->trbs, 0,
|
||||
sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
|
||||
/* All endpoint rings have link TRBs */
|
||||
xhci_link_segments(xhci, seg, seg->next, 1, isoc);
|
||||
xhci_link_segments(xhci, seg, seg->next, type);
|
||||
seg = seg->next;
|
||||
} while (seg != ring->first_seg);
|
||||
ring->type = type;
|
||||
xhci_initialize_ring_info(ring);
|
||||
/* td list should be empty since all URBs have been cancelled,
|
||||
* but just in case...
|
||||
@@ -528,7 +532,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
|
||||
*/
|
||||
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
|
||||
stream_info->stream_rings[cur_stream] =
|
||||
xhci_ring_alloc(xhci, 1, true, false, mem_flags);
|
||||
xhci_ring_alloc(xhci, 1, TYPE_STREAM, mem_flags);
|
||||
cur_ring = stream_info->stream_rings[cur_stream];
|
||||
if (!cur_ring)
|
||||
goto cleanup_rings;
|
||||
@@ -862,7 +866,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
|
||||
}
|
||||
|
||||
/* Allocate endpoint 0 ring */
|
||||
dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
|
||||
dev->eps[0].ring = xhci_ring_alloc(xhci, 1, TYPE_CTRL, flags);
|
||||
if (!dev->eps[0].ring)
|
||||
goto fail;
|
||||
|
||||
@@ -1300,11 +1304,13 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
|
||||
struct xhci_ring *ep_ring;
|
||||
unsigned int max_packet;
|
||||
unsigned int max_burst;
|
||||
enum xhci_ring_type type;
|
||||
u32 max_esit_payload;
|
||||
|
||||
ep_index = xhci_get_endpoint_index(&ep->desc);
|
||||
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
|
||||
|
||||
type = usb_endpoint_type(&ep->desc);
|
||||
/* Set up the endpoint ring */
|
||||
/*
|
||||
* Isochronous endpoint ring needs bigger size because one isoc URB
|
||||
@@ -1314,10 +1320,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
|
||||
*/
|
||||
if (usb_endpoint_xfer_isoc(&ep->desc))
|
||||
virt_dev->eps[ep_index].new_ring =
|
||||
xhci_ring_alloc(xhci, 8, true, true, mem_flags);
|
||||
xhci_ring_alloc(xhci, 8, type, mem_flags);
|
||||
else
|
||||
virt_dev->eps[ep_index].new_ring =
|
||||
xhci_ring_alloc(xhci, 1, true, false, mem_flags);
|
||||
xhci_ring_alloc(xhci, 1, type, mem_flags);
|
||||
if (!virt_dev->eps[ep_index].new_ring) {
|
||||
/* Attempt to use the ring cache */
|
||||
if (virt_dev->num_rings_cached == 0)
|
||||
@@ -1327,7 +1333,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
|
||||
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
|
||||
virt_dev->num_rings_cached--;
|
||||
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
|
||||
usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
|
||||
type);
|
||||
}
|
||||
virt_dev->eps[ep_index].skip = false;
|
||||
ep_ring = virt_dev->eps[ep_index].new_ring;
|
||||
@@ -2235,7 +2241,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||
goto fail;
|
||||
|
||||
/* Set up the command ring to have one segments for now. */
|
||||
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
|
||||
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, flags);
|
||||
if (!xhci->cmd_ring)
|
||||
goto fail;
|
||||
xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
|
||||
@@ -2266,7 +2272,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||
* the event ring segment table (ERST). Section 4.9.3.
|
||||
*/
|
||||
xhci_dbg(xhci, "// Allocating event ring\n");
|
||||
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
|
||||
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, TYPE_EVENT,
|
||||
flags);
|
||||
if (!xhci->event_ring)
|
||||
goto fail;
|
||||
|
Reference in New Issue
Block a user