Patch 2.6.32.33 to 2.6.32.35
/drivers/usb/host/xhci-ring.c
blob:38fb6820489d351664bb8fa6b7cfc8aa769217c3 -> blob:317e8dc7c4f30513d61e511e8e486fe239928e29
--- drivers/usb/host/xhci-ring.c
+++ drivers/usb/host/xhci-ring.c
@@ -124,7 +124,7 @@ static void next_trb(struct xhci_hcd *xh
*seg = (*seg)->next;
*trb = ((*seg)->trbs);
} else {
- (*trb)++;
+ *trb = (*trb)++;
}
}
@@ -391,11 +391,8 @@ void xhci_find_new_dequeue_state(struct
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
dev->eps[ep_index].stopped_trb,
&state->new_cycle_state);
- if (!state->new_deq_seg) {
- WARN_ON(1);
- return;
- }
-
+ if (!state->new_deq_seg)
+ BUG();
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg(xhci, "Finding endpoint context\n");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -406,10 +403,8 @@ void xhci_find_new_dequeue_state(struct
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
state->new_deq_ptr,
&state->new_cycle_state);
- if (!state->new_deq_seg) {
- WARN_ON(1);
- return;
- }
+ if (!state->new_deq_seg)
+ BUG();
trb = &state->new_deq_ptr->generic;
if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -417,20 +412,6 @@ void xhci_find_new_dequeue_state(struct
state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
- /*
- * If there is only one segment in a ring, find_trb_seg()'s while loop
- * will not run, and it will return before it has a chance to see if it
- * needs to toggle the cycle bit. It can't tell if the stalled transfer
- * ended just before the link TRB on a one-segment ring, or if the TD
- * wrapped around the top of the ring, because it doesn't have the TD in
- * question. Look for the one-segment case where stalled TRB's address
- * is greater than the new dequeue pointer address.
- */
- if (ep_ring->first_seg == ep_ring->first_seg->next &&
- state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
- state->new_cycle_state ^= 0x1;
- xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
-
/* Don't update the ring cycle state for the producer (us). */
xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
state->new_deq_seg);
@@ -1504,13 +1485,12 @@ static unsigned int count_sg_trbs_needed
/* Scatter gather list entries may cross 64KB boundaries */
running_total = TRB_MAX_BUFF_SIZE -
- (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
- running_total &= TRB_MAX_BUFF_SIZE - 1;
+ (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
if (running_total != 0)
num_trbs++;
/* How many more 64KB chunks to transfer, how many more TRBs? */
- while (running_total < sg_dma_len(sg) && running_total < temp) {
+ while (running_total < sg_dma_len(sg)) {
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
@@ -1535,11 +1515,11 @@ static unsigned int count_sg_trbs_needed
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
{
if (num_trbs != 0)
- dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+ dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
"TRBs, %d left\n", __func__,
urb->ep->desc.bEndpointAddress, num_trbs);
if (running_total != urb->transfer_buffer_length)
- dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+ dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
"queued %#x (%d), asked for %#x (%d)\n",
__func__,
urb->ep->desc.bEndpointAddress,
@@ -1646,7 +1626,8 @@ static int queue_bulk_sg_tx(struct xhci_
sg = urb->sg->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
- trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
@@ -1681,7 +1662,7 @@ static int queue_bulk_sg_tx(struct xhci_
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len);
if (TRB_MAX_BUFF_SIZE -
- (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -1719,7 +1700,7 @@ static int queue_bulk_sg_tx(struct xhci_
}
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & (TRB_MAX_BUFF_SIZE - 1));
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (running_total + trb_buff_len > urb->transfer_buffer_length)
trb_buff_len =
@@ -1754,8 +1735,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
running_total = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
- running_total &= TRB_MAX_BUFF_SIZE - 1;
+ (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
/* If there's some data on this 64KB chunk, or we have to send a
* zero-length transfer, we need at least one TRB
@@ -1794,8 +1774,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
/* How much data is in the first TRB? */
addr = (u64) urb->transfer_dma;
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
- if (trb_buff_len > urb->transfer_buffer_length)
+ (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ if (urb->transfer_buffer_length < trb_buff_len)
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;