Changeset 14409


Ignore:
Timestamp:
2009-02-04T19:00:13+01:00 (9 years ago)
Author:
claudio
Message:

[etrax] Update usb host driver as in Axis SDK2.20

Location:
trunk/target/linux/etrax
Files:
1 edited
2 moved

Legend:

Unmodified
Added
Removed
  • trunk/target/linux/etrax/files/drivers/usb/host/hc-crisv10.c

    r14402 r14409  
    11/* 
    2  * usb-host.c: ETRAX 100LX USB Host Controller Driver (HCD) 
    32 * 
    4  * Copyright (c) 2002, 2003 Axis Communications AB. 
     3 * ETRAX 100LX USB Host Controller Driver 
     4 * 
     5 * Copyright (C) 2005 - 2008  Axis Communications AB 
     6 * 
     7 * Author: Konrad Eriksson <konrad.eriksson@axis.se> 
     8 * 
    59 */ 
    610 
     11#include <linux/module.h> 
    712#include <linux/kernel.h> 
    8 #include <linux/delay.h> 
    9 #include <linux/ioport.h> 
    10 #include <linux/slab.h> 
    11 #include <linux/errno.h> 
    12 #include <linux/unistd.h> 
    13 #include <linux/interrupt.h> 
    1413#include <linux/init.h> 
    15 #include <linux/list.h> 
     14#include <linux/moduleparam.h> 
    1615#include <linux/spinlock.h> 
    17  
    18 #include <asm/uaccess.h> 
     16#include <linux/usb.h> 
     17#include <linux/platform_device.h> 
     18 
    1919#include <asm/io.h> 
    2020#include <asm/irq.h> 
    21 #include <asm/dma.h> 
    22 #include <asm/system.h> 
    23 #include <asm/arch/svinto.h> 
    24  
    25 #include <linux/usb.h> 
    26 /* Ugly include because we don't live with the other host drivers. */ 
    27 #include <../drivers/usb/core/hcd.h> 
    28 #include <../drivers/usb/core/usb.h> 
    29  
    30 #include "hc_crisv10.h" 
     21#include <asm/arch/dma.h> 
     22#include <asm/arch/io_interface_mux.h> 
     23 
     24#include "../core/hcd.h" 
     25#include "../core/hub.h" 
     26#include "hc-crisv10.h" 
     27#include "hc-cris-dbg.h" 
     28 
     29 
     30/***************************************************************************/ 
     31/***************************************************************************/ 
     32/* Host Controller settings                                                */ 
     33/***************************************************************************/ 
     34/***************************************************************************/ 
     35 
     36#define VERSION                 "1.00-openwrt_diff" 
     37#define COPYRIGHT               "(c) 2005, 2006 Axis Communications AB" 
     38#define DESCRIPTION             "ETRAX 100LX USB Host Controller" 
    3139 
    3240#define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR 
     
    3442#define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR 
    3543 
    36 static const char *usb_hcd_version = "$Revision: 1.2 $"; 
    37  
    38 #undef KERN_DEBUG 
    39 #define KERN_DEBUG "" 
    40  
    41  
    42 #undef USB_DEBUG_RH 
    43 #undef USB_DEBUG_EPID 
    44 #undef USB_DEBUG_SB 
    45 #undef USB_DEBUG_DESC 
    46 #undef USB_DEBUG_URB 
    47 #undef USB_DEBUG_TRACE 
    48 #undef USB_DEBUG_BULK 
    49 #undef USB_DEBUG_CTRL 
    50 #undef USB_DEBUG_INTR 
    51 #undef USB_DEBUG_ISOC 
    52  
    53 #ifdef USB_DEBUG_RH 
    54 #define dbg_rh(format, arg...) printk(KERN_DEBUG __FILE__ ": (RH) " format "\n" , ## arg) 
     44/* Number of physical ports in Etrax 100LX */ 
     45#define USB_ROOT_HUB_PORTS 2 
     46 
     47const char hc_name[] = "hc-crisv10"; 
     48const char product_desc[] = DESCRIPTION; 
     49 
     50/* The number of epids is, among other things, used for pre-allocating 
     51   ctrl, bulk and isoc EP descriptors (one for each epid). 
     52   Assumed to be > 1 when initiating the DMA lists. */ 
     53#define NBR_OF_EPIDS       32 
     54 
     55/* Support interrupt traffic intervals up to 128 ms. */ 
     56#define MAX_INTR_INTERVAL  128 
     57 
     58/* If periodic traffic (intr or isoc) is to be used, then one entry in the EP 
     59   table must be "invalid". By this we mean that we shouldn't care about epid 
     60   attentions for this epid, or at least handle them differently from epid 
     61   attentions for "valid" epids. This define determines which one to use 
     62   (don't change it). */ 
     63#define INVALID_EPID       31 
     64/* A special epid for the bulk dummys. */ 
     65#define DUMMY_EPID         30 
     66 
     67/* Module settings */ 
     68 
     69MODULE_DESCRIPTION(DESCRIPTION); 
     70MODULE_LICENSE("GPL"); 
     71MODULE_AUTHOR("Konrad Eriksson <konrad.eriksson@axis.se>"); 
     72 
     73 
     74/* Module parameters */ 
     75 
     76/* 0 = No ports enabled 
     77   1 = Only port 1 enabled (on board ethernet on devboard) 
     78   2 = Only port 2 enabled (external connector on devboard) 
     79   3 = Both ports enabled 
     80*/ 
     81static unsigned int ports = 3; 
     82module_param(ports, uint, S_IRUGO); 
     83MODULE_PARM_DESC(ports, "Bitmask indicating USB ports to use"); 
     84 
     85 
     86/***************************************************************************/ 
     87/***************************************************************************/ 
     88/* Shared global variables for this module                                 */ 
     89/***************************************************************************/ 
     90/***************************************************************************/ 
     91 
     92/* EP descriptor lists for non period transfers. Must be 32-bit aligned. */ 
     93static volatile struct USB_EP_Desc TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4))); 
     94 
     95static volatile struct USB_EP_Desc TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4))); 
     96 
     97/* EP descriptor lists for period transfers. Must be 32-bit aligned. */ 
     98static volatile struct USB_EP_Desc TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4))); 
     99static volatile struct USB_SB_Desc TxIntrSB_zout __attribute__ ((aligned (4))); 
     100 
     101static volatile struct USB_EP_Desc TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4))); 
     102static volatile struct USB_SB_Desc TxIsocSB_zout __attribute__ ((aligned (4))); 
     103 
     104static volatile struct USB_SB_Desc TxIsocSBList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));  
     105 
     106/* After each enabled bulk EP IN we put two disabled EP descriptors with the eol flag set, 
     107   causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which 
     108   gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the 
     109   EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors 
     110   in each frame. */ 
     111static volatile struct USB_EP_Desc TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4))); 
     112 
     113/* List of URB pointers, where each points to the active URB for a epid. 
     114   For Bulk, Ctrl and Intr this means which URB that currently is added to 
     115   DMA lists (Isoc URBs are all directly added to DMA lists). As soon as 
     116   URB has completed is the queue examined and the first URB in queue is 
     117   removed and moved to the activeUrbList while its state change to STARTED and 
     118   its transfer(s) gets added to DMA list (exception Isoc where URBs enter 
     119   state STARTED directly and added transfers added to DMA lists). */ 
     120static struct urb *activeUrbList[NBR_OF_EPIDS]; 
     121 
     122/* Additional software state info for each epid */ 
     123static struct etrax_epid epid_state[NBR_OF_EPIDS]; 
     124 
     125/* Timer handles for bulk traffic timer used to avoid DMA bug where DMA stops 
     126   even if there is new data waiting to be processed */ 
     127static struct timer_list bulk_start_timer = TIMER_INITIALIZER(NULL, 0, 0); 
     128static struct timer_list bulk_eot_timer = TIMER_INITIALIZER(NULL, 0, 0); 
     129 
     130/* We want the start timer to expire before the eot timer, because the former 
     131   might start traffic, thus making it unnecessary for the latter to time 
     132   out. */ 
     133#define BULK_START_TIMER_INTERVAL (HZ/50) /* 20 ms */ 
     134#define BULK_EOT_TIMER_INTERVAL (HZ/16) /* 60 ms */ 
     135 
     136/* Delay before a URB completion happen when it's scheduled to be delayed */ 
     137#define LATER_TIMER_DELAY (HZ/50) /* 20 ms */ 
     138 
     139/* Simplifying macros for checking software state info of a epid */ 
     140/* ----------------------------------------------------------------------- */ 
     141#define epid_inuse(epid)       epid_state[epid].inuse 
     142#define epid_out_traffic(epid) epid_state[epid].out_traffic 
     143#define epid_isoc(epid)   (epid_state[epid].type == PIPE_ISOCHRONOUS ? 1 : 0) 
     144#define epid_intr(epid)   (epid_state[epid].type == PIPE_INTERRUPT ? 1 : 0) 
     145 
     146 
     147/***************************************************************************/ 
     148/***************************************************************************/ 
     149/* DEBUG FUNCTIONS                                                         */ 
     150/***************************************************************************/ 
     151/***************************************************************************/ 
     152/* Note that these functions are always available in their "__" variants, 
     153   for use in error situations. The "__" missing variants are controlled by 
     154   the USB_DEBUG_DESC/USB_DEBUG_URB macros. */ 
     155static void __dump_urb(struct urb* purb) 
     156{ 
     157  struct crisv10_urb_priv *urb_priv = purb->hcpriv; 
     158  int urb_num = -1; 
     159  if(urb_priv) { 
     160    urb_num = urb_priv->urb_num; 
     161  } 
     162  printk("\nURB:0x%x[%d]\n", (unsigned int)purb, urb_num); 
     163  printk("dev                   :0x%08lx\n", (unsigned long)purb->dev); 
     164  printk("pipe                  :0x%08x\n", purb->pipe); 
     165  printk("status                :%d\n", purb->status); 
     166  printk("transfer_flags        :0x%08x\n", purb->transfer_flags); 
     167  printk("transfer_buffer       :0x%08lx\n", (unsigned long)purb->transfer_buffer); 
     168  printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length); 
     169  printk("actual_length         :%d\n", purb->actual_length); 
     170  printk("setup_packet          :0x%08lx\n", (unsigned long)purb->setup_packet); 
     171  printk("start_frame           :%d\n", purb->start_frame); 
     172  printk("number_of_packets     :%d\n", purb->number_of_packets); 
     173  printk("interval              :%d\n", purb->interval); 
     174  printk("error_count           :%d\n", purb->error_count); 
     175  printk("context               :0x%08lx\n", (unsigned long)purb->context); 
     176  printk("complete              :0x%08lx\n\n", (unsigned long)purb->complete); 
     177} 
     178 
     179static void __dump_in_desc(volatile struct USB_IN_Desc *in) 
     180{ 
     181  printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in); 
     182  printk("  sw_len  : 0x%04x (%d)\n", in->sw_len, in->sw_len); 
     183  printk("  command : 0x%04x\n", in->command); 
     184  printk("  next    : 0x%08lx\n", in->next); 
     185  printk("  buf     : 0x%08lx\n", in->buf); 
     186  printk("  hw_len  : 0x%04x (%d)\n", in->hw_len, in->hw_len); 
     187  printk("  status  : 0x%04x\n\n", in->status); 
     188} 
     189 
     190static void __dump_sb_desc(volatile struct USB_SB_Desc *sb) 
     191{ 
     192  char tt = (sb->command & 0x30) >> 4; 
     193  char *tt_string; 
     194 
     195  switch (tt) { 
     196  case 0: 
     197    tt_string = "zout"; 
     198    break; 
     199  case 1: 
     200    tt_string = "in"; 
     201    break; 
     202  case 2: 
     203    tt_string = "out"; 
     204    break; 
     205  case 3: 
     206    tt_string = "setup"; 
     207    break; 
     208  default: 
     209    tt_string = "unknown (weird)"; 
     210  } 
     211 
     212  printk(" USB_SB_Desc at 0x%08lx ", (unsigned long)sb); 
     213  printk(" command:0x%04x (", sb->command); 
     214  printk("rem:%d ", (sb->command & 0x3f00) >> 8); 
     215  printk("full:%d ", (sb->command & 0x40) >> 6); 
     216  printk("tt:%d(%s) ", tt, tt_string); 
     217  printk("intr:%d ", (sb->command & 0x8) >> 3); 
     218  printk("eot:%d ", (sb->command & 0x2) >> 1); 
     219  printk("eol:%d)", sb->command & 0x1); 
     220  printk(" sw_len:0x%04x(%d)", sb->sw_len, sb->sw_len); 
     221  printk(" next:0x%08lx", sb->next); 
     222  printk(" buf:0x%08lx\n", sb->buf); 
     223} 
     224 
     225 
     226static void __dump_ep_desc(volatile struct USB_EP_Desc *ep) 
     227{ 
     228  printk("USB_EP_Desc at 0x%08lx ", (unsigned long)ep); 
     229  printk(" command:0x%04x (", ep->command); 
     230  printk("ep_id:%d ", (ep->command & 0x1f00) >> 8); 
     231  printk("enable:%d ", (ep->command & 0x10) >> 4); 
     232  printk("intr:%d ", (ep->command & 0x8) >> 3); 
     233  printk("eof:%d ", (ep->command & 0x2) >> 1); 
     234  printk("eol:%d)", ep->command & 0x1); 
     235  printk(" hw_len:0x%04x(%d)", ep->hw_len, ep->hw_len); 
     236  printk(" next:0x%08lx", ep->next); 
     237  printk(" sub:0x%08lx\n", ep->sub); 
     238} 
     239 
     240static inline void __dump_ep_list(int pipe_type) 
     241{ 
     242  volatile struct USB_EP_Desc *ep; 
     243  volatile struct USB_EP_Desc *first_ep; 
     244  volatile struct USB_SB_Desc *sb; 
     245 
     246  switch (pipe_type) 
     247    { 
     248    case PIPE_BULK: 
     249      first_ep = &TxBulkEPList[0]; 
     250      break; 
     251    case PIPE_CONTROL: 
     252      first_ep = &TxCtrlEPList[0]; 
     253      break; 
     254    case PIPE_INTERRUPT: 
     255      first_ep = &TxIntrEPList[0]; 
     256      break; 
     257    case PIPE_ISOCHRONOUS: 
     258      first_ep = &TxIsocEPList[0]; 
     259      break; 
     260    default: 
     261      warn("Cannot dump unknown traffic type"); 
     262      return; 
     263    } 
     264  ep = first_ep; 
     265 
     266  printk("\n\nDumping EP list...\n\n"); 
     267 
     268  do { 
     269    __dump_ep_desc(ep); 
     270    /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */ 
     271    sb = ep->sub ? phys_to_virt(ep->sub) : 0; 
     272    while (sb) { 
     273      __dump_sb_desc(sb); 
     274      sb = sb->next ? phys_to_virt(sb->next) : 0; 
     275    } 
     276    ep = (volatile struct USB_EP_Desc *)(phys_to_virt(ep->next)); 
     277 
     278  } while (ep != first_ep); 
     279} 
     280 
     281static inline void __dump_ept_data(int epid) 
     282{ 
     283  unsigned long flags; 
     284  __u32 r_usb_ept_data; 
     285 
     286  if (epid < 0 || epid > 31) { 
     287    printk("Cannot dump ept data for invalid epid %d\n", epid); 
     288    return; 
     289  } 
     290 
     291  local_irq_save(flags); 
     292  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid); 
     293  nop(); 
     294  r_usb_ept_data = *R_USB_EPT_DATA; 
     295  local_irq_restore(flags); 
     296 
     297  printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid); 
     298  if (r_usb_ept_data == 0) { 
     299    /* No need for more detailed printing. */ 
     300    return; 
     301  } 
     302  printk("  valid           : %d\n", (r_usb_ept_data & 0x80000000) >> 31); 
     303  printk("  hold            : %d\n", (r_usb_ept_data & 0x40000000) >> 30); 
     304  printk("  error_count_in  : %d\n", (r_usb_ept_data & 0x30000000) >> 28); 
     305  printk("  t_in            : %d\n", (r_usb_ept_data & 0x08000000) >> 27); 
     306  printk("  low_speed       : %d\n", (r_usb_ept_data & 0x04000000) >> 26); 
     307  printk("  port            : %d\n", (r_usb_ept_data & 0x03000000) >> 24); 
     308  printk("  error_code      : %d\n", (r_usb_ept_data & 0x00c00000) >> 22); 
     309  printk("  t_out           : %d\n", (r_usb_ept_data & 0x00200000) >> 21); 
     310  printk("  error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19); 
     311  printk("  max_len         : %d\n", (r_usb_ept_data & 0x0003f800) >> 11); 
     312  printk("  ep              : %d\n", (r_usb_ept_data & 0x00000780) >> 7); 
     313  printk("  dev             : %d\n", (r_usb_ept_data & 0x0000003f)); 
     314} 
     315 
     316static inline void __dump_ept_data_iso(int epid) 
     317{ 
     318  unsigned long flags; 
     319  __u32 ept_data; 
     320 
     321  if (epid < 0 || epid > 31) { 
     322    printk("Cannot dump ept data for invalid epid %d\n", epid); 
     323    return; 
     324  } 
     325 
     326  local_irq_save(flags); 
     327  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid); 
     328  nop(); 
     329  ept_data = *R_USB_EPT_DATA_ISO; 
     330  local_irq_restore(flags); 
     331 
     332  printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", ept_data, epid); 
     333  if (ept_data == 0) { 
     334    /* No need for more detailed printing. */ 
     335    return; 
     336  } 
     337  printk("  valid           : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, valid, 
     338                                                ept_data)); 
     339  printk("  port            : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, port, 
     340                                                ept_data)); 
     341  printk("  error_code      : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, 
     342                                                ept_data)); 
     343  printk("  max_len         : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len, 
     344                                                ept_data)); 
     345  printk("  ep              : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, ep, 
     346                                                ept_data)); 
     347  printk("  dev             : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, dev, 
     348                                                ept_data)); 
     349} 
     350 
     351static inline void __dump_ept_data_list(void) 
     352{ 
     353  int i; 
     354 
     355  printk("Dumping the whole R_USB_EPT_DATA list\n"); 
     356 
     357  for (i = 0; i < 32; i++) { 
     358    __dump_ept_data(i); 
     359  } 
     360} 
     361 
     362static void debug_epid(int epid) { 
     363  int i; 
     364   
     365  if(epid_isoc(epid)) { 
     366    __dump_ept_data_iso(epid); 
     367  } else { 
     368    __dump_ept_data(epid); 
     369  } 
     370 
     371  printk("Bulk:\n"); 
     372  for(i = 0; i < 32; i++) { 
     373    if(IO_EXTRACT(USB_EP_command, epid, TxBulkEPList[i].command) == 
     374       epid) { 
     375      printk("%d: ", i); __dump_ep_desc(&(TxBulkEPList[i])); 
     376    } 
     377  } 
     378 
     379  printk("Ctrl:\n"); 
     380  for(i = 0; i < 32; i++) { 
     381    if(IO_EXTRACT(USB_EP_command, epid, TxCtrlEPList[i].command) == 
     382       epid) { 
     383      printk("%d: ", i); __dump_ep_desc(&(TxCtrlEPList[i])); 
     384    } 
     385  } 
     386 
     387  printk("Intr:\n"); 
     388  for(i = 0; i < MAX_INTR_INTERVAL; i++) { 
     389    if(IO_EXTRACT(USB_EP_command, epid, TxIntrEPList[i].command) == 
     390       epid) { 
     391      printk("%d: ", i); __dump_ep_desc(&(TxIntrEPList[i])); 
     392    } 
     393  } 
     394   
     395  printk("Isoc:\n"); 
     396  for(i = 0; i < 32; i++) { 
     397    if(IO_EXTRACT(USB_EP_command, epid, TxIsocEPList[i].command) == 
     398       epid) { 
     399      printk("%d: ", i); __dump_ep_desc(&(TxIsocEPList[i])); 
     400    } 
     401  } 
     402 
     403  __dump_ept_data_list(); 
     404  __dump_ep_list(PIPE_INTERRUPT); 
     405  printk("\n\n"); 
     406} 
     407 
     408 
     409 
     410char* hcd_status_to_str(__u8 bUsbStatus) { 
     411  static char hcd_status_str[128]; 
     412  hcd_status_str[0] = '\0'; 
     413  if(bUsbStatus & IO_STATE(R_USB_STATUS, ourun, yes)) { 
     414    strcat(hcd_status_str, "ourun "); 
     415  } 
     416  if(bUsbStatus & IO_STATE(R_USB_STATUS, perror, yes)) { 
     417    strcat(hcd_status_str, "perror "); 
     418  } 
     419  if(bUsbStatus & IO_STATE(R_USB_STATUS, device_mode, yes)) { 
     420    strcat(hcd_status_str, "device_mode "); 
     421  } 
     422  if(bUsbStatus & IO_STATE(R_USB_STATUS, host_mode, yes)) { 
     423    strcat(hcd_status_str, "host_mode "); 
     424  } 
     425  if(bUsbStatus & IO_STATE(R_USB_STATUS, started, yes)) { 
     426    strcat(hcd_status_str, "started "); 
     427  } 
     428  if(bUsbStatus & IO_STATE(R_USB_STATUS, running, yes)) { 
     429    strcat(hcd_status_str, "running "); 
     430  } 
     431  return hcd_status_str; 
     432} 
     433 
     434 
     435char* sblist_to_str(struct USB_SB_Desc* sb_desc) { 
     436  static char sblist_to_str_buff[128]; 
     437  char tmp[32], tmp2[32]; 
     438  sblist_to_str_buff[0] = '\0'; 
     439  while(sb_desc != NULL) { 
     440    switch(IO_EXTRACT(USB_SB_command, tt, sb_desc->command)) { 
     441    case 0: sprintf(tmp, "zout");  break; 
     442    case 1: sprintf(tmp, "in");    break; 
     443    case 2: sprintf(tmp, "out");   break; 
     444    case 3: sprintf(tmp, "setup"); break; 
     445    } 
     446    sprintf(tmp2, "(%s %d)", tmp, sb_desc->sw_len); 
     447    strcat(sblist_to_str_buff, tmp2); 
     448    if(sb_desc->next != 0) { 
     449      sb_desc = phys_to_virt(sb_desc->next); 
     450    } else { 
     451      sb_desc = NULL; 
     452    } 
     453  } 
     454  return sblist_to_str_buff; 
     455} 
     456 
     457char* port_status_to_str(__u16 wPortStatus) { 
     458  static char port_status_str[128]; 
     459  port_status_str[0] = '\0'; 
     460  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) { 
     461    strcat(port_status_str, "connected "); 
     462  } 
     463  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) { 
     464    strcat(port_status_str, "enabled "); 
     465  } 
     466  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, suspended, yes)) { 
     467    strcat(port_status_str, "suspended "); 
     468  } 
     469  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes)) { 
     470    strcat(port_status_str, "reset "); 
     471  } 
     472  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, speed, full)) { 
     473    strcat(port_status_str, "full-speed "); 
     474  } else { 
     475    strcat(port_status_str, "low-speed "); 
     476  } 
     477  return port_status_str; 
     478} 
     479 
     480 
     481char* endpoint_to_str(struct usb_endpoint_descriptor *ed) { 
     482  static char endpoint_to_str_buff[128]; 
     483  char tmp[32]; 
     484  int epnum = ed->bEndpointAddress & 0x0F; 
     485  int dir = ed->bEndpointAddress & 0x80; 
     486  int type = ed->bmAttributes & 0x03; 
     487  endpoint_to_str_buff[0] = '\0'; 
     488  sprintf(endpoint_to_str_buff, "ep:%d ", epnum); 
     489  switch(type) { 
     490  case 0: 
     491    sprintf(tmp, " ctrl"); 
     492    break; 
     493  case 1: 
     494    sprintf(tmp, " isoc"); 
     495    break; 
     496  case 2: 
     497    sprintf(tmp, " bulk"); 
     498    break; 
     499  case 3: 
     500    sprintf(tmp, " intr"); 
     501    break; 
     502  } 
     503  strcat(endpoint_to_str_buff, tmp); 
     504  if(dir) { 
     505    sprintf(tmp, " in"); 
     506  } else { 
     507    sprintf(tmp, " out"); 
     508  } 
     509  strcat(endpoint_to_str_buff, tmp); 
     510 
     511  return endpoint_to_str_buff; 
     512} 
     513 
     514/* Debug helper functions for Transfer Controller */ 
     515char* pipe_to_str(unsigned int pipe) { 
     516  static char pipe_to_str_buff[128]; 
     517  char tmp[64]; 
     518  sprintf(pipe_to_str_buff, "dir:%s", str_dir(pipe)); 
     519  sprintf(tmp, " type:%s", str_type(pipe)); 
     520  strcat(pipe_to_str_buff, tmp); 
     521 
     522  sprintf(tmp, " dev:%d", usb_pipedevice(pipe)); 
     523  strcat(pipe_to_str_buff, tmp); 
     524  sprintf(tmp, " ep:%d", usb_pipeendpoint(pipe)); 
     525  strcat(pipe_to_str_buff, tmp); 
     526  return pipe_to_str_buff; 
     527} 
     528 
     529 
     530#define USB_DEBUG_DESC 1 
     531 
     532#ifdef USB_DEBUG_DESC 
     533#define dump_in_desc(x) __dump_in_desc(x) 
     534#define dump_sb_desc(...) __dump_sb_desc(...) 
     535#define dump_ep_desc(x) __dump_ep_desc(x) 
     536#define dump_ept_data(x) __dump_ept_data(x) 
    55537#else 
    56 #define dbg_rh(format, arg...) do {} while (0) 
     538#define dump_in_desc(...) do {} while (0) 
     539#define dump_sb_desc(...) do {} while (0) 
     540#define dump_ep_desc(...) do {} while (0) 
    57541#endif 
    58542 
    59 #ifdef USB_DEBUG_EPID 
    60 #define dbg_epid(format, arg...) printk(KERN_DEBUG __FILE__ ": (EPID) " format "\n" , ## arg) 
    61 #else 
    62 #define dbg_epid(format, arg...) do {} while (0) 
    63 #endif 
    64  
    65 #ifdef USB_DEBUG_SB 
    66 #define dbg_sb(format, arg...) printk(KERN_DEBUG __FILE__ ": (SB) " format "\n" , ## arg) 
    67 #else 
    68 #define dbg_sb(format, arg...) do {} while (0) 
    69 #endif 
    70  
    71 #ifdef USB_DEBUG_CTRL 
    72 #define dbg_ctrl(format, arg...) printk(KERN_DEBUG __FILE__ ": (CTRL) " format "\n" , ## arg) 
    73 #else 
    74 #define dbg_ctrl(format, arg...) do {} while (0) 
    75 #endif 
    76  
    77 #ifdef USB_DEBUG_BULK 
    78 #define dbg_bulk(format, arg...) printk(KERN_DEBUG __FILE__ ": (BULK) " format "\n" , ## arg) 
    79 #else 
    80 #define dbg_bulk(format, arg...) do {} while (0) 
    81 #endif 
    82  
    83 #ifdef USB_DEBUG_INTR 
    84 #define dbg_intr(format, arg...) printk(KERN_DEBUG __FILE__ ": (INTR) " format "\n" , ## arg) 
    85 #else 
    86 #define dbg_intr(format, arg...) do {} while (0) 
    87 #endif 
    88  
    89 #ifdef USB_DEBUG_ISOC 
    90 #define dbg_isoc(format, arg...) printk(KERN_DEBUG __FILE__ ": (ISOC) " format "\n" , ## arg) 
    91 #else 
    92 #define dbg_isoc(format, arg...) do {} while (0) 
    93 #endif 
     543 
     544/* Uncomment this to enable massive function call trace 
     545   #define USB_DEBUG_TRACE */ 
    94546 
    95547#ifdef USB_DEBUG_TRACE 
     
    101553#endif 
    102554 
    103 #define usb_pipeslow(pipe)      (((pipe) >> 26) & 1) 
    104  
    105 /*------------------------------------------------------------------- 
    106  Virtual Root Hub 
    107  -------------------------------------------------------------------*/ 
    108  
    109 static __u8 root_hub_dev_des[] = 
    110 { 
    111         0x12,  /*  __u8  bLength; */ 
    112         0x01,  /*  __u8  bDescriptorType; Device */ 
    113         0x00,  /*  __le16 bcdUSB; v1.0 */ 
    114         0x01, 
    115         0x09,  /*  __u8  bDeviceClass; HUB_CLASSCODE */ 
    116         0x00,  /*  __u8  bDeviceSubClass; */ 
    117         0x00,  /*  __u8  bDeviceProtocol; */ 
    118         0x08,  /*  __u8  bMaxPacketSize0; 8 Bytes */ 
    119         0x00,  /*  __le16 idVendor; */ 
    120         0x00, 
    121         0x00,  /*  __le16 idProduct; */ 
    122         0x00, 
    123         0x00,  /*  __le16 bcdDevice; */ 
    124         0x00, 
    125         0x00,  /*  __u8  iManufacturer; */ 
    126         0x02,  /*  __u8  iProduct; */ 
    127         0x01,  /*  __u8  iSerialNumber; */ 
    128         0x01   /*  __u8  bNumConfigurations; */ 
    129 }; 
    130  
    131 /* Configuration descriptor */ 
    132 static __u8 root_hub_config_des[] = 
    133 { 
    134         0x09,  /*  __u8  bLength; */ 
    135         0x02,  /*  __u8  bDescriptorType; Configuration */ 
    136         0x19,  /*  __le16 wTotalLength; */ 
    137         0x00, 
    138         0x01,  /*  __u8  bNumInterfaces; */ 
    139         0x01,  /*  __u8  bConfigurationValue; */ 
    140         0x00,  /*  __u8  iConfiguration; */ 
    141         0x40,  /*  __u8  bmAttributes; Bit 7: Bus-powered */ 
    142         0x00,  /*  __u8  MaxPower; */ 
    143  
    144      /* interface */ 
    145         0x09,  /*  __u8  if_bLength; */ 
    146         0x04,  /*  __u8  if_bDescriptorType; Interface */ 
    147         0x00,  /*  __u8  if_bInterfaceNumber; */ 
    148         0x00,  /*  __u8  if_bAlternateSetting; */ 
    149         0x01,  /*  __u8  if_bNumEndpoints; */ 
    150         0x09,  /*  __u8  if_bInterfaceClass; HUB_CLASSCODE */ 
    151         0x00,  /*  __u8  if_bInterfaceSubClass; */ 
    152         0x00,  /*  __u8  if_bInterfaceProtocol; */ 
    153         0x00,  /*  __u8  if_iInterface; */ 
    154  
    155      /* endpoint */ 
    156         0x07,  /*  __u8  ep_bLength; */ 
    157         0x05,  /*  __u8  ep_bDescriptorType; Endpoint */ 
    158         0x81,  /*  __u8  ep_bEndpointAddress; IN Endpoint 1 */ 
    159         0x03,  /*  __u8  ep_bmAttributes; Interrupt */ 
    160         0x08,  /*  __le16 ep_wMaxPacketSize; 8 Bytes */ 
    161         0x00, 
    162         0xff   /*  __u8  ep_bInterval; 255 ms */ 
    163 }; 
    164  
    165 static __u8 root_hub_hub_des[] = 
    166 { 
    167         0x09,  /*  __u8  bLength; */ 
    168         0x29,  /*  __u8  bDescriptorType; Hub-descriptor */ 
    169         0x02,  /*  __u8  bNbrPorts; */ 
    170         0x00,  /* __u16  wHubCharacteristics; */ 
    171         0x00, 
    172         0x01,  /*  __u8  bPwrOn2pwrGood; 2ms */ 
    173         0x00,  /*  __u8  bHubContrCurrent; 0 mA */ 
    174         0x00,  /*  __u8  DeviceRemovable; *** 7 Ports max *** */ 
    175         0xff   /*  __u8  PortPwrCtrlMask; *** 7 ports max *** */ 
    176 }; 
    177  
    178 static DEFINE_TIMER(bulk_start_timer, NULL, 0, 0); 
    179 static DEFINE_TIMER(bulk_eot_timer, NULL, 0, 0); 
    180  
    181 /* We want the start timer to expire before the eot timer, because the former might start 
    182    traffic, thus making it unnecessary for the latter to time out. */ 
    183 #define BULK_START_TIMER_INTERVAL (HZ/10) /* 100 ms */ 
    184 #define BULK_EOT_TIMER_INTERVAL (HZ/10+2) /* 120 ms */ 
    185  
    186 #define OK(x) len = (x); dbg_rh("OK(%d): line: %d", x, __LINE__); break 
    187555#define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \ 
    188556{panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);} 
    189557 
    190 #define SLAB_FLAG     (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL) 
    191 #define KMALLOC_FLAG  (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL) 
    192  
    193558/* Most helpful debugging aid */ 
    194 #define assert(expr) ((void) ((expr) ? 0 : (err("assert failed at line %d",__LINE__)))) 
    195  
    196 /* Alternative assert define which stops after a failed assert. */ 
     559#define ASSERT(expr) ((void) ((expr) ? 0 : (err("assert failed at: %s %d",__FUNCTION__, __LINE__)))) 
     560 
     561 
     562/***************************************************************************/ 
     563/***************************************************************************/ 
     564/* Forward declarations                                                    */ 
     565/***************************************************************************/ 
     566/***************************************************************************/ 
     567void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg); 
     568void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg); 
     569void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg); 
     570void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg); 
     571 
     572void rh_port_status_change(__u16[]); 
     573int  rh_clear_port_feature(__u8, __u16); 
     574int  rh_set_port_feature(__u8, __u16); 
     575static void rh_disable_port(unsigned int port); 
     576 
     577static void check_finished_bulk_tx_epids(struct usb_hcd *hcd, 
     578                                         int timer); 
     579 
     580static int  tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb, 
     581                         int mem_flags); 
     582static void tc_free_epid(struct usb_host_endpoint *ep); 
     583static int  tc_allocate_epid(void); 
     584static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status); 
     585static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb, 
     586                                int status); 
     587 
     588static int  urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid, 
     589                           int mem_flags); 
     590static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb); 
     591 
     592static int crisv10_usb_check_bandwidth(struct usb_device *dev,struct urb *urb); 
     593static void crisv10_usb_claim_bandwidth( 
     594        struct usb_device *dev, struct urb *urb, int bustime, int isoc); 
     595static void crisv10_usb_release_bandwidth( 
     596        struct usb_hcd *hcd, int isoc, int bandwidth); 
     597 
     598static inline struct urb *urb_list_first(int epid); 
     599static inline void        urb_list_add(struct urb *urb, int epid, 
     600                                      int mem_flags); 
     601static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid); 
     602static inline void        urb_list_del(struct urb *urb, int epid); 
     603static inline void        urb_list_move_last(struct urb *urb, int epid); 
     604static inline struct urb *urb_list_next(struct urb *urb, int epid); 
     605 
     606int create_sb_for_urb(struct urb *urb, int mem_flags); 
     607int init_intr_urb(struct urb *urb, int mem_flags); 
     608 
     609static inline void  etrax_epid_set(__u8 index, __u32 data); 
     610static inline void  etrax_epid_clear_error(__u8 index); 
     611static inline void  etrax_epid_set_toggle(__u8 index, __u8 dirout, 
     612                                              __u8 toggle); 
     613static inline __u8  etrax_epid_get_toggle(__u8 index, __u8 dirout); 
     614static inline __u32 etrax_epid_get(__u8 index); 
     615 
     616/* We're accessing the same register position in Etrax so 
     617   when we do full access the internal difference doesn't matter */ 
     618#define etrax_epid_iso_set(index, data) etrax_epid_set(index, data) 
     619#define etrax_epid_iso_get(index) etrax_epid_get(index) 
     620 
     621 
     622static void        tc_dma_process_isoc_urb(struct urb *urb); 
     623static void        tc_dma_process_queue(int epid); 
     624static void        tc_dma_unlink_intr_urb(struct urb *urb); 
     625static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc); 
     626static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc); 
     627 
     628static void tc_bulk_start_timer_func(unsigned long dummy); 
     629static void tc_bulk_eot_timer_func(unsigned long dummy); 
     630 
     631 
     632/*************************************************************/ 
     633/*************************************************************/ 
     634/* Host Controler Driver block                               */ 
     635/*************************************************************/ 
     636/*************************************************************/ 
     637 
     638/* HCD operations */ 
     639static irqreturn_t crisv10_hcd_top_irq(int irq, void*); 
     640static int crisv10_hcd_reset(struct usb_hcd *); 
     641static int crisv10_hcd_start(struct usb_hcd *); 
     642static void crisv10_hcd_stop(struct usb_hcd *); 
     643#ifdef CONFIG_PM 
     644static int crisv10_hcd_suspend(struct device *, u32, u32); 
     645static int crisv10_hcd_resume(struct device *, u32); 
     646#endif /* CONFIG_PM */ 
     647static int crisv10_hcd_get_frame(struct usb_hcd *); 
     648 
     649static int  tc_urb_enqueue(struct usb_hcd *, struct urb *, gfp_t mem_flags); 
     650static int  tc_urb_dequeue(struct usb_hcd *, struct urb *, int); 
     651static void tc_endpoint_disable(struct usb_hcd *, struct usb_host_endpoint *ep); 
     652 
     653static int rh_status_data_request(struct usb_hcd *, char *); 
     654static int rh_control_request(struct usb_hcd *, u16, u16, u16, char*, u16); 
     655 
     656#ifdef CONFIG_PM 
     657static int crisv10_hcd_hub_suspend(struct usb_hcd *); 
     658static int crisv10_hcd_hub_resume(struct usb_hcd *); 
     659#endif /* CONFIG_PM */ 
     660#ifdef CONFIG_USB_OTG 
     661static int crisv10_hcd_start_port_reset(struct usb_hcd *, unsigned); 
     662#endif /* CONFIG_USB_OTG */ 
     663 
     664/* host controller driver interface */ 
     665static const struct hc_driver crisv10_hc_driver =  
     666  { 
     667    .description =      hc_name, 
     668    .product_desc =     product_desc, 
     669    .hcd_priv_size =    sizeof(struct crisv10_hcd), 
     670 
     671    /* Attaching IRQ handler manualy in probe() */ 
     672    /* .irq =           crisv10_hcd_irq, */ 
     673 
     674    .flags =            HCD_USB11, 
     675 
     676    /* called to init HCD and root hub */ 
     677    .reset =            crisv10_hcd_reset, 
     678    .start =            crisv10_hcd_start,       
     679 
     680    /* cleanly make HCD stop writing memory and doing I/O */ 
     681    .stop =             crisv10_hcd_stop, 
     682 
     683    /* return current frame number */ 
     684    .get_frame_number = crisv10_hcd_get_frame, 
     685 
     686 
     687    /* Manage i/o requests via the Transfer Controller */ 
     688    .urb_enqueue =      tc_urb_enqueue, 
     689    .urb_dequeue =      tc_urb_dequeue, 
     690 
     691    /* hw synch, freeing endpoint resources that urb_dequeue can't */ 
     692    .endpoint_disable = tc_endpoint_disable, 
     693 
     694 
     695    /* Root Hub support */ 
     696    .hub_status_data =  rh_status_data_request, 
     697    .hub_control =      rh_control_request, 
     698#ifdef CONFIG_PM 
     699    .hub_suspend =      rh_suspend_request, 
     700    .hub_resume =       rh_resume_request, 
     701#endif /* CONFIG_PM */ 
     702#ifdef  CONFIG_USB_OTG 
     703    .start_port_reset = crisv10_hcd_start_port_reset, 
     704#endif /* CONFIG_USB_OTG */ 
     705  }; 
     706 
     707 
    197708/* 
    198 #define assert(expr)                                      \ 
    199 {                                                         \ 
    200         if (!(expr)) {                                    \ 
    201                 err("assert failed at line %d",__LINE__); \ 
    202                 while (1);                                \ 
    203         }                                                 \ 
    204 } 
    205 */ 
    206  
    207  
    208 /* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it dynamically? 
    209    To adjust it dynamically we would have to get an interrupt when we reach the end 
    210    of the rx descriptor list, or when we get close to the end, and then allocate more 
    211    descriptors. */ 
    212  
     709 * conversion between pointers to a hcd and the corresponding 
     710 * crisv10_hcd  
     711 */ 
     712 
     713static inline struct crisv10_hcd *hcd_to_crisv10_hcd(struct usb_hcd *hcd) 
     714{ 
     715        return (struct crisv10_hcd *) hcd->hcd_priv; 
     716} 
     717 
     718static inline struct usb_hcd *crisv10_hcd_to_hcd(struct crisv10_hcd *hcd) 
     719{ 
     720        return container_of((void *) hcd, struct usb_hcd, hcd_priv); 
     721} 
     722 
     723/* check if specified port is in use */ 
     724static inline int port_in_use(unsigned int port) 
     725{ 
     726        return ports & (1 << port); 
     727} 
     728 
     729/* number of ports in use */ 
     730static inline unsigned int num_ports(void) 
     731{ 
     732        unsigned int i, num = 0; 
     733        for (i = 0; i < USB_ROOT_HUB_PORTS; i++) 
     734                if (port_in_use(i)) 
     735                        num++; 
     736        return num; 
     737} 
     738 
     739/* map hub port number to the port number used internally by the HC */ 
     740static inline unsigned int map_port(unsigned int port) 
     741{ 
     742  unsigned int i, num = 0; 
     743  for (i = 0; i < USB_ROOT_HUB_PORTS; i++) 
     744    if (port_in_use(i)) 
     745      if (++num == port) 
     746        return i; 
     747  return -1; 
     748} 
     749 
     750/* size of descriptors in slab cache */ 
     751#ifndef MAX 
     752#define MAX(x, y)               ((x) > (y) ? (x) : (y)) 
     753#endif 
     754 
     755 
     756/******************************************************************/ 
     757/* Hardware Interrupt functions                                   */ 
     758/******************************************************************/ 
     759 
     760/* Fast interrupt handler for HC */ 
     761static irqreturn_t crisv10_hcd_top_irq(int irq, void *vcd) 
     762{ 
     763  struct usb_hcd *hcd = vcd; 
     764  struct crisv10_irq_reg reg; 
     765  __u32 irq_mask; 
     766  unsigned long flags; 
     767 
     768  DBFENTER; 
     769 
     770  ASSERT(hcd != NULL); 
     771  reg.hcd = hcd; 
     772 
     773  /* Turn of other interrupts while handling these sensitive cases */ 
     774  local_irq_save(flags); 
     775   
     776  /* Read out which interrupts that are flaged */ 
     777  irq_mask = *R_USB_IRQ_MASK_READ; 
     778  reg.r_usb_irq_mask_read = irq_mask; 
     779 
     780  /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that 
     781     R_USB_STATUS must be read before R_USB_EPID_ATTN since reading the latter 
     782     clears the ourun and perror fields of R_USB_STATUS. */ 
     783  reg.r_usb_status = *R_USB_STATUS; 
     784   
     785  /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn 
     786     interrupts. */ 
     787  reg.r_usb_epid_attn = *R_USB_EPID_ATTN; 
     788   
     789  /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the 
     790     port_status interrupt. */ 
     791  reg.r_usb_rh_port_status_1 = *R_USB_RH_PORT_STATUS_1; 
     792  reg.r_usb_rh_port_status_2 = *R_USB_RH_PORT_STATUS_2; 
     793   
     794  /* Reading R_USB_FM_NUMBER clears the sof interrupt. */ 
     795  /* Note: the lower 11 bits contain the actual frame number, sent with each 
     796     sof. */ 
     797  reg.r_usb_fm_number = *R_USB_FM_NUMBER; 
     798 
     799  /* Interrupts are handled in order of priority. */ 
     800  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) { 
     801    crisv10_hcd_port_status_irq(&reg); 
     802  } 
     803  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) { 
     804    crisv10_hcd_epid_attn_irq(&reg); 
     805  } 
     806  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) { 
     807    crisv10_hcd_ctl_status_irq(&reg); 
     808  } 
     809  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) { 
     810    crisv10_hcd_isoc_eof_irq(&reg); 
     811  } 
     812  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) { 
     813    /* Update/restart the bulk start timer since obviously the channel is 
     814       running. */ 
     815    mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL); 
     816    /* Update/restart the bulk eot timer since we just received an bulk eot 
     817       interrupt. */ 
     818    mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL); 
     819 
     820    /* Check for finished bulk transfers on epids */ 
     821    check_finished_bulk_tx_epids(hcd, 0); 
     822  } 
     823  local_irq_restore(flags); 
     824 
     825  DBFEXIT; 
     826  return IRQ_HANDLED; 
     827} 
     828 
     829 
     830void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg) { 
     831  struct usb_hcd *hcd = reg->hcd; 
     832  struct crisv10_urb_priv *urb_priv; 
     833  int epid; 
     834  DBFENTER; 
     835 
     836  for (epid = 0; epid < NBR_OF_EPIDS; epid++) { 
     837    if (test_bit(epid, (void *)&reg->r_usb_epid_attn)) { 
     838      struct urb *urb; 
     839      __u32 ept_data; 
     840      int error_code; 
     841 
     842      if (epid == DUMMY_EPID || epid == INVALID_EPID) { 
     843        /* We definitely don't care about these ones. Besides, they are 
     844           always disabled, so any possible disabling caused by the 
     845           epid attention interrupt is irrelevant. */ 
     846        warn("Got epid_attn for INVALID_EPID or DUMMY_EPID (%d).", epid); 
     847        continue; 
     848      } 
     849 
     850      if(!epid_inuse(epid)) { 
     851        irq_err("Epid attention on epid:%d that isn't in use\n", epid); 
     852        printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status); 
     853        debug_epid(epid); 
     854        continue; 
     855      } 
     856 
     857      /* Note that although there are separate R_USB_EPT_DATA and 
     858         R_USB_EPT_DATA_ISO registers, they are located at the same address and 
     859         are of the same size. In other words, this read should be ok for isoc 
     860         also. */ 
     861      ept_data = etrax_epid_get(epid); 
     862      error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, ept_data); 
     863 
     864      /* Get the active URB for this epid. We blatantly assume 
     865         that only this URB could have caused the epid attention. */ 
     866      urb = activeUrbList[epid]; 
     867      if (urb == NULL) { 
     868        irq_err("Attention on epid:%d error:%d with no active URB.\n", 
     869                epid, error_code); 
     870        printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status); 
     871        debug_epid(epid); 
     872        continue; 
     873      } 
     874 
     875      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv; 
     876      ASSERT(urb_priv); 
     877 
     878      /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */ 
     879      if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) { 
     880 
     881        /* Isoc traffic doesn't have error_count_in/error_count_out. */ 
     882        if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) && 
     883            (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, ept_data) == 3 || 
     884             IO_EXTRACT(R_USB_EPT_DATA, error_count_out, ept_data) == 3)) { 
     885          /* Check if URB allready is marked for late-finish, we can get 
     886             several 3rd error for Intr traffic when a device is unplugged */ 
     887          if(urb_priv->later_data == NULL) { 
     888            /* 3rd error. */ 
     889            irq_warn("3rd error for epid:%d (%s %s) URB:0x%x[%d]\n", epid, 
     890                     str_dir(urb->pipe), str_type(urb->pipe), 
     891                     (unsigned int)urb, urb_priv->urb_num); 
     892           
     893            tc_finish_urb_later(hcd, urb, -EPROTO); 
     894          } 
     895 
     896        } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) { 
     897          irq_warn("Perror for epid:%d\n", epid); 
     898          printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff); 
     899          printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status); 
     900          __dump_urb(urb); 
     901          debug_epid(epid); 
     902 
     903          if (!(ept_data & IO_MASK(R_USB_EPT_DATA, valid))) { 
     904            /* invalid ep_id */ 
     905            panic("Perror because of invalid epid." 
     906                  " Deconfigured too early?"); 
     907          } else { 
     908            /* past eof1, near eof, zout transfer, setup transfer */ 
     909            /* Dump the urb and the relevant EP descriptor. */ 
     910            panic("Something wrong with DMA descriptor contents." 
     911                  " Too much traffic inserted?"); 
     912          } 
     913        } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) { 
     914          /* buffer ourun */ 
     915          printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff); 
     916          printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status); 
     917          __dump_urb(urb); 
     918          debug_epid(epid); 
     919 
     920          panic("Buffer overrun/underrun for epid:%d. DMA too busy?", epid); 
     921        } else { 
     922          irq_warn("Attention on epid:%d (%s %s) with no error code\n", epid, 
     923                   str_dir(urb->pipe), str_type(urb->pipe)); 
     924          printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status); 
     925          __dump_urb(urb); 
     926          debug_epid(epid); 
     927        } 
     928 
     929      } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, 
     930                                              stall)) { 
     931        /* Not really a protocol error, just says that the endpoint gave 
     932           a stall response. Note that error_code cannot be stall for isoc. */ 
     933        if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 
     934          panic("Isoc traffic cannot stall"); 
     935        } 
     936 
     937        tc_dbg("Stall for epid:%d (%s %s) URB:0x%x\n", epid, 
     938               str_dir(urb->pipe), str_type(urb->pipe), (unsigned int)urb); 
     939        tc_finish_urb(hcd, urb, -EPIPE); 
     940 
     941      } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, 
     942                                              bus_error)) { 
     943        /* Two devices responded to a transaction request. Must be resolved 
     944           by software. FIXME: Reset ports? */ 
     945        panic("Bus error for epid %d." 
     946              " Two devices responded to transaction request\n", 
     947              epid); 
     948 
     949      } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, 
     950                                              buffer_error)) { 
     951        /* DMA overrun or underrun. */ 
     952        irq_warn("Buffer overrun/underrun for epid:%d (%s %s)\n", epid, 
     953                 str_dir(urb->pipe), str_type(urb->pipe)); 
     954 
     955        /* It seems that error_code = buffer_error in 
     956           R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS 
     957           are the same error. */ 
     958        tc_finish_urb(hcd, urb, -EPROTO); 
     959      } else { 
     960          irq_warn("Unknown attention on epid:%d (%s %s)\n", epid, 
     961                   str_dir(urb->pipe), str_type(urb->pipe)); 
     962          dump_ept_data(epid); 
     963      } 
     964    } 
     965  } 
     966  DBFEXIT; 
     967} 
     968 
     969void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg) 
     970{ 
     971  __u16 port_reg[USB_ROOT_HUB_PORTS]; 
     972  DBFENTER; 
     973  port_reg[0] = reg->r_usb_rh_port_status_1; 
     974  port_reg[1] = reg->r_usb_rh_port_status_2; 
     975  rh_port_status_change(port_reg); 
     976  DBFEXIT; 
     977} 
     978 
     979void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg) 
     980{ 
     981  int epid; 
     982  struct urb *urb; 
     983  struct crisv10_urb_priv *urb_priv; 
     984 
     985  DBFENTER; 
     986 
     987  for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) { 
     988 
     989    /* Only check epids that are in use, is valid and has SB list */ 
     990    if (!epid_inuse(epid) || epid == INVALID_EPID || 
     991        TxIsocEPList[epid].sub == 0 || epid == DUMMY_EPID) { 
     992      /* Nothing here to see. */ 
     993      continue; 
     994    } 
     995    ASSERT(epid_isoc(epid)); 
     996 
     997    /* Get the active URB for this epid (if any). */ 
     998    urb = activeUrbList[epid]; 
     999    if (urb == 0) { 
     1000      isoc_warn("Ignoring NULL urb for epid:%d\n", epid); 
     1001      continue; 
     1002    } 
     1003    if(!epid_out_traffic(epid)) { 
     1004      /* Sanity check. */ 
     1005      ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS); 
     1006 
     1007      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv; 
     1008      ASSERT(urb_priv); 
     1009 
     1010      if (urb_priv->urb_state == NOT_STARTED) { 
     1011        /* If ASAP is not set and urb->start_frame is the current frame, 
     1012           start the transfer. */ 
     1013        if (!(urb->transfer_flags & URB_ISO_ASAP) && 
     1014            (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) { 
     1015          /* EP should not be enabled if we're waiting for start_frame */ 
     1016          ASSERT((TxIsocEPList[epid].command & 
     1017                  IO_STATE(USB_EP_command, enable, yes)) == 0); 
     1018 
     1019          isoc_warn("Enabling isoc IN EP descr for epid %d\n", epid); 
     1020          TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes); 
     1021 
     1022          /* This urb is now active. */ 
     1023          urb_priv->urb_state = STARTED; 
     1024          continue; 
     1025        } 
     1026      } 
     1027    } 
     1028  } 
     1029 
     1030  DBFEXIT; 
     1031} 
     1032 
     1033void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg) 
     1034{ 
     1035  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(reg->hcd); 
     1036 
     1037  DBFENTER; 
     1038  ASSERT(crisv10_hcd); 
     1039 
     1040  irq_dbg("ctr_status_irq, controller status: %s\n", 
     1041          hcd_status_to_str(reg->r_usb_status)); 
     1042   
     1043  /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB 
     1044     list for the corresponding epid? */ 
     1045  if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) { 
     1046    panic("USB controller got ourun."); 
     1047  } 
     1048  if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) { 
     1049     
     1050    /* Before, etrax_usb_do_intr_recover was called on this epid if it was 
     1051       an interrupt pipe. I don't see how re-enabling all EP descriptors 
     1052       will help if there was a programming error. */ 
     1053    panic("USB controller got perror."); 
     1054  } 
     1055 
     1056  /* Keep track of USB Controller, if it's running or not */ 
     1057  if(reg->r_usb_status & IO_STATE(R_USB_STATUS, running, yes)) { 
     1058    crisv10_hcd->running = 1; 
     1059  } else { 
     1060    crisv10_hcd->running = 0; 
     1061  } 
     1062   
     1063  if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) { 
     1064    /* We should never operate in device mode. */ 
     1065    panic("USB controller in device mode."); 
     1066  } 
     1067 
     1068  /* Set the flag to avoid getting "Unlink after no-IRQ? Controller is probably 
     1069     using the wrong IRQ" from hcd_unlink_urb() in drivers/usb/core/hcd.c */ 
     1070  set_bit(HCD_FLAG_SAW_IRQ, &reg->hcd->flags); 
     1071   
     1072  DBFEXIT; 
     1073} 
     1074 
     1075 
     1076/******************************************************************/ 
     1077/* Host Controller interface functions                            */ 
     1078/******************************************************************/ 
     1079 
     1080static inline void crisv10_ready_wait(void) { 
     1081  volatile int timeout = 10000; 
     1082  /* Check the busy bit of USB controller in Etrax */ 
     1083  while((*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy)) && 
     1084        (timeout-- > 0)); 
     1085  if(timeout == 0) { 
     1086    warn("Timeout while waiting for USB controller to be idle\n"); 
     1087  } 
     1088} 
     1089 
     1090/* reset host controller */ 
     1091static int crisv10_hcd_reset(struct usb_hcd *hcd) 
     1092{ 
     1093  DBFENTER; 
     1094  hcd_dbg(hcd, "reset\n"); 
     1095 
     1096 
     1097  /* Reset the USB interface. */ 
     1098  /* 
     1099  *R_USB_COMMAND = 
     1100    IO_STATE(R_USB_COMMAND, port_sel, nop) | 
     1101    IO_STATE(R_USB_COMMAND, port_cmd, reset) | 
     1102    IO_STATE(R_USB_COMMAND, ctrl_cmd, reset); 
     1103  nop(); 
     1104  */ 
     1105  DBFEXIT; 
     1106  return 0; 
     1107} 
     1108 
     1109/* start host controller */ 
     1110static int crisv10_hcd_start(struct usb_hcd *hcd) 
     1111{ 
     1112  DBFENTER; 
     1113  hcd_dbg(hcd, "start\n"); 
     1114 
     1115  crisv10_ready_wait(); 
     1116 
     1117  /* Start processing of USB traffic. */ 
     1118  *R_USB_COMMAND = 
     1119    IO_STATE(R_USB_COMMAND, port_sel, nop) | 
     1120    IO_STATE(R_USB_COMMAND, port_cmd, reset) | 
     1121    IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run); 
     1122 
     1123  nop(); 
     1124 
     1125  hcd->state = HC_STATE_RUNNING; 
     1126 
     1127  DBFEXIT; 
     1128  return 0; 
     1129} 
     1130 
     1131/* stop host controller */ 
     1132static void crisv10_hcd_stop(struct usb_hcd *hcd) 
     1133{ 
     1134  DBFENTER; 
     1135  hcd_dbg(hcd, "stop\n"); 
     1136  crisv10_hcd_reset(hcd); 
     1137  DBFEXIT; 
     1138} 
     1139 
     1140/* return the current frame number */ 
     1141static int crisv10_hcd_get_frame(struct usb_hcd *hcd) 
     1142{ 
     1143  DBFENTER; 
     1144  DBFEXIT; 
     1145  return (*R_USB_FM_NUMBER & 0x7ff); 
     1146} 
     1147 
     1148#ifdef  CONFIG_USB_OTG 
     1149 
     1150static int crisv10_hcd_start_port_reset(struct usb_hcd *hcd, unsigned port) 
     1151{ 
     1152  return 0; /* no-op for now */ 
     1153} 
     1154 
     1155#endif /* CONFIG_USB_OTG */ 
     1156 
     1157 
     1158/******************************************************************/ 
     1159/* Root Hub functions                                             */ 
     1160/******************************************************************/ 
     1161 
     1162/* root hub status */ 
     1163static const struct usb_hub_status rh_hub_status =  
     1164  { 
     1165    .wHubStatus =               0, 
     1166    .wHubChange =               0, 
     1167  }; 
     1168 
     1169/* root hub descriptor */ 
     1170static const u8 rh_hub_descr[] = 
     1171  { 
     1172    0x09,                       /* bDescLength         */ 
     1173    0x29,                       /* bDescriptorType     */ 
     1174    USB_ROOT_HUB_PORTS,         /* bNbrPorts           */ 
     1175    0x00,                       /* wHubCharacteristics */ 
     1176    0x00,                 
     1177    0x01,                       /* bPwrOn2pwrGood      */ 
     1178    0x00,                       /* bHubContrCurrent    */ 
     1179    0x00,                       /* DeviceRemovable     */ 
     1180    0xff                        /* PortPwrCtrlMask     */ 
     1181  }; 
     1182 
     1183/* Actual holder of root hub status*/ 
     1184struct crisv10_rh rh; 
     1185 
     1186/* Initialize root hub data structures (called from dvdrv_hcd_probe()) */ 
     1187int rh_init(void) { 
     1188  int i; 
     1189  /* Reset port status flags */ 
     1190  for (i = 0; i < USB_ROOT_HUB_PORTS; i++) { 
     1191    rh.wPortChange[i] = 0; 
     1192    rh.wPortStatusPrev[i] = 0; 
     1193  } 
     1194  return 0; 
     1195} 
     1196 
     1197#define RH_FEAT_MASK ((1<<USB_PORT_FEAT_CONNECTION)|\ 
     1198                      (1<<USB_PORT_FEAT_ENABLE)|\ 
     1199                      (1<<USB_PORT_FEAT_SUSPEND)|\ 
     1200                      (1<<USB_PORT_FEAT_RESET)) 
     1201 
     1202/* Handle port status change interrupt (called from bottom part interrupt) */ 
     1203void rh_port_status_change(__u16 port_reg[]) { 
     1204  int i; 
     1205  __u16 wChange; 
     1206 
     1207  for(i = 0; i < USB_ROOT_HUB_PORTS; i++) { 
     1208    /* Xor out changes since last read, masked for important flags */ 
     1209    wChange = (port_reg[i] & RH_FEAT_MASK) ^ rh.wPortStatusPrev[i]; 
     1210    /* Or changes together with (if any) saved changes */ 
     1211    rh.wPortChange[i] |= wChange; 
     1212    /* Save new status */ 
     1213    rh.wPortStatusPrev[i] = port_reg[i]; 
     1214 
     1215    if(wChange) { 
     1216      rh_dbg("Interrupt port_status change port%d: %s  Current-status:%s\n", i+1, 
     1217             port_status_to_str(wChange), 
     1218             port_status_to_str(port_reg[i])); 
     1219    } 
     1220  } 
     1221} 
     1222 
     1223/* Construct port status change bitmap for the root hub */ 
     1224static int rh_status_data_request(struct usb_hcd *hcd, char *buf) 
     1225{ 
     1226  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd); 
     1227  unsigned int i; 
     1228 
     1229  DBFENTER; 
     1230  /* 
     1231   * corresponds to hub status change EP (USB 2.0 spec section 11.13.4) 
     1232   * return bitmap indicating ports with status change 
     1233   */ 
     1234  *buf = 0; 
     1235  spin_lock(&crisv10_hcd->lock); 
     1236  for (i = 1; i <= crisv10_hcd->num_ports; i++) { 
     1237    if (rh.wPortChange[map_port(i)]) { 
     1238      *buf |= (1 << i); 
     1239      rh_dbg("rh_status_data_request, change on port %d: %s  Current Status: %s\n", i, 
     1240             port_status_to_str(rh.wPortChange[map_port(i)]), 
     1241             port_status_to_str(rh.wPortStatusPrev[map_port(i)])); 
     1242    } 
     1243  } 
     1244  spin_unlock(&crisv10_hcd->lock); 
     1245  DBFEXIT; 
     1246  return *buf == 0 ? 0 : 1; 
     1247} 
     1248 
     1249/* Handle a control request for the root hub (called from hcd_driver) */ 
     1250static int rh_control_request(struct usb_hcd *hcd,  
     1251                              u16 typeReq,  
     1252                              u16 wValue,  
     1253                              u16 wIndex, 
     1254                              char *buf,  
     1255                              u16 wLength) { 
     1256 
     1257  struct crisv10_hcd *crisv10_hcd = hcd_to_crisv10_hcd(hcd); 
     1258  int retval = 0; 
     1259  int len; 
     1260  DBFENTER; 
     1261 
     1262  switch (typeReq) { 
     1263  case GetHubDescriptor: 
     1264    rh_dbg("GetHubDescriptor\n"); 
     1265    len = min_t(unsigned int, sizeof rh_hub_descr, wLength); 
     1266    memcpy(buf, rh_hub_descr, len); 
     1267    buf[2] = crisv10_hcd->num_ports; 
     1268    break; 
     1269  case GetHubStatus: 
     1270    rh_dbg("GetHubStatus\n"); 
     1271    len = min_t(unsigned int, sizeof rh_hub_status, wLength); 
     1272    memcpy(buf, &rh_hub_status, len); 
     1273    break; 
     1274  case GetPortStatus: 
     1275    if (!wIndex || wIndex > crisv10_hcd->num_ports) 
     1276      goto error; 
     1277    rh_dbg("GetportStatus, port:%d change:%s  status:%s\n", wIndex, 
     1278           port_status_to_str(rh.wPortChange[map_port(wIndex)]), 
     1279           port_status_to_str(rh.wPortStatusPrev[map_port(wIndex)])); 
     1280    *(u16 *) buf = cpu_to_le16(rh.wPortStatusPrev[map_port(wIndex)]); 
     1281    *(u16 *) (buf + 2) = cpu_to_le16(rh.wPortChange[map_port(wIndex)]); 
     1282    break; 
     1283  case SetHubFeature: 
     1284    rh_dbg("SetHubFeature\n"); 
     1285  case ClearHubFeature: 
     1286    rh_dbg("ClearHubFeature\n"); 
     1287    switch (wValue) { 
     1288    case C_HUB_OVER_CURRENT: 
     1289    case C_HUB_LOCAL_POWER: 
     1290      rh_warn("Not implemented hub request:%d \n", typeReq); 
     1291      /* not implemented */ 
     1292      break; 
     1293    default: 
     1294      goto error; 
     1295    } 
     1296    break; 
     1297  case SetPortFeature: 
     1298    if (!wIndex || wIndex > crisv10_hcd->num_ports) 
     1299      goto error; 
     1300    if(rh_set_port_feature(map_port(wIndex), wValue)) 
     1301      goto error; 
     1302    break; 
     1303  case ClearPortFeature: 
     1304    if (!wIndex || wIndex > crisv10_hcd->num_ports) 
     1305      goto error; 
     1306    if(rh_clear_port_feature(map_port(wIndex), wValue)) 
     1307      goto error; 
     1308    break; 
     1309  default: 
     1310    rh_warn("Unknown hub request: %d\n", typeReq); 
     1311  error: 
     1312    retval = -EPIPE; 
     1313  } 
     1314  DBFEXIT; 
     1315  return retval; 
     1316} 
     1317 
     1318int rh_set_port_feature(__u8 bPort, __u16 wFeature) { 
     1319  __u8 bUsbCommand = 0; 
     1320  __u8 reset_cnt; 
     1321  switch(wFeature) { 
     1322  case USB_PORT_FEAT_RESET: 
     1323    rh_dbg("SetPortFeature: reset\n"); 
     1324 
     1325    if (rh.wPortStatusPrev[bPort] & 
     1326        IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) 
     1327    { 
     1328      __u8 restart_controller = 0; 
     1329 
     1330      if ( (rh.wPortStatusPrev[0] & 
     1331            IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) && 
     1332           (rh.wPortStatusPrev[1] & 
     1333            IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes)) ) 
     1334      { 
     1335        /* Both ports is enabled. The USB controller will not change state. */ 
     1336        restart_controller = 0; 
     1337      } 
     1338      else 
     1339      { 
     1340        /* Only ports is enabled. The USB controller will change state and 
     1341           must be restarted. */ 
     1342        restart_controller = 1; 
     1343      } 
     1344      /* 
     1345        In ETRAX 100LX it's not possible to reset an enabled root hub port. 
     1346        The workaround is to disable and enable the port before resetting it. 
     1347        Disabling the port can, if both ports are disabled at once, cause the 
     1348        USB controller to change state to HOST_MODE state.  
     1349        The USB controller state transition causes a lot of unwanted 
     1350        interrupts that must be avoided. 
     1351        Disabling the USB controller status and port status interrupts before 
     1352        disabling/resetting the port stops these interrupts. 
     1353 
     1354        These actions are performed: 
     1355        1. Disable USB controller status and port status interrupts. 
     1356        2. Disable the port 
     1357        3. Wait for the port to be disabled. 
     1358        4. Enable the port. 
     1359        5. Wait for the port to be enabled. 
     1360        6. Reset the port. 
     1361        7. Wait for for the reset to end. 
     1362        8. Wait for the USB controller entering started state. 
     1363        9. Order the USB controller to running state. 
     1364        10. Wait for the USB controller reaching running state. 
     1365        11. Clear all interrupts generated during the disable/enable/reset 
     1366            procedure. 
     1367        12. Enable the USB controller status and port status interrupts. 
     1368      */ 
     1369 
     1370      /* 1. Disable USB controller status and USB port status interrupts. */ 
     1371      *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, ctl_status, clr); 
     1372      __asm__ __volatile__ ("  nop"); 
     1373      *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, port_status, clr); 
     1374      __asm__ __volatile__ ("  nop"); 
     1375       
     1376      { 
     1377 
     1378        /* Since an root hub port reset shall be 50 ms and the ETRAX 100LX 
     1379           root hub port reset is 10 ms we must perform 5 port resets to 
     1380           achieve a proper root hub port reset. */ 
     1381        for (reset_cnt = 0; reset_cnt < 5; reset_cnt ++) 
     1382        { 
     1383          rh_dbg("Disable Port %d\n", bPort + 1); 
     1384 
     1385          /* 2. Disable the port*/ 
     1386          if (bPort == 0) 
     1387          { 
     1388            *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes); 
     1389          } 
     1390          else 
     1391          { 
     1392            *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes); 
     1393          } 
     1394 
     1395          /* 3. Wait for the port to be disabled. */ 
     1396          while ( (bPort == 0) ? 
     1397                  *R_USB_RH_PORT_STATUS_1 & 
     1398                    IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes) : 
     1399                  *R_USB_RH_PORT_STATUS_2 & 
     1400                    IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes) ) {} 
     1401 
     1402          rh_dbg("Port %d is disabled. Enable it!\n", bPort + 1); 
     1403 
     1404          /* 4. Enable the port. */ 
     1405          if (bPort == 0) 
     1406          { 
     1407            *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no); 
     1408          } 
     1409          else 
     1410          { 
     1411            *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no); 
     1412          } 
     1413 
     1414          /* 5. Wait for the port to be enabled again.  */ 
     1415          while (!( (bPort == 0) ? 
     1416                    *R_USB_RH_PORT_STATUS_1 & 
     1417                      IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes) : 
     1418                    *R_USB_RH_PORT_STATUS_2 & 
     1419                      IO_STATE(R_USB_RH_PORT_STATUS_2, connected, yes) ) ) {} 
     1420 
     1421          rh_dbg("Port %d is enabled.\n", bPort + 1); 
     1422 
     1423          /* 6. Reset the port */ 
     1424          crisv10_ready_wait(); 
     1425          *R_USB_COMMAND = 
     1426            ( (bPort == 0) ? 
     1427              IO_STATE(R_USB_COMMAND, port_sel, port1): 
     1428              IO_STATE(R_USB_COMMAND, port_sel, port2) ) | 
     1429            IO_STATE(R_USB_COMMAND, port_cmd, reset) | 
     1430            IO_STATE(R_USB_COMMAND, busy,     no) | 
     1431            IO_STATE(R_USB_COMMAND, ctrl_cmd, nop); 
     1432          rh_dbg("Port %d is resetting.\n", bPort + 1); 
     1433 
     1434          /* 7. The USB specification says that we should wait for at least 
     1435             10ms for device recover */ 
     1436          udelay(10500); /* 10,5ms blocking wait */ 
     1437     
     1438          crisv10_ready_wait(); 
     1439        } 
     1440      } 
     1441 
     1442 
     1443      /* Check if the USB controller needs to be restarted. */ 
     1444      if (restart_controller) 
     1445      { 
     1446        /* 8. Wait for the USB controller entering started state. */ 
     1447        while (!(*R_USB_STATUS & IO_STATE(R_USB_STATUS, started, yes))) {} 
     1448 
     1449        /* 9. Order the USB controller to running state. */ 
     1450        crisv10_ready_wait(); 
     1451        *R_USB_COMMAND = 
     1452          IO_STATE(R_USB_COMMAND, port_sel, nop) | 
     1453          IO_STATE(R_USB_COMMAND, port_cmd, reset) | 
     1454          IO_STATE(R_USB_COMMAND, busy,     no) | 
     1455          IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run); 
     1456 
     1457        /* 10. Wait for the USB controller reaching running state. */ 
     1458        while (!(*R_USB_STATUS & IO_STATE(R_USB_STATUS, running, yes))) {} 
     1459      } 
     1460 
     1461      /* 11. Clear any controller or port satus interrupts before enabling 
     1462             the interrupts. */ 
     1463      { 
     1464        u16 dummy; 
     1465 
     1466        /* Clear the port status interrupt of the reset port. */ 
     1467        if (bPort == 0) 
     1468        { 
     1469          rh_dbg("Clearing port 1 interrupts\n"); 
     1470          dummy = *R_USB_RH_PORT_STATUS_1; 
     1471        } 
     1472        else 
     1473        { 
     1474          rh_dbg("Clearing port 2 interrupts\n"); 
     1475          dummy = *R_USB_RH_PORT_STATUS_2; 
     1476        } 
     1477 
     1478        if (restart_controller) 
     1479        { 
     1480          /* The USB controller is restarted. Clear all interupts. */ 
     1481          rh_dbg("Clearing all interrupts\n"); 
     1482          dummy = *R_USB_STATUS; 
     1483          dummy = *R_USB_RH_PORT_STATUS_1; 
     1484          dummy = *R_USB_RH_PORT_STATUS_2; 
     1485        } 
     1486      } 
     1487 
     1488      /* 12. Enable USB controller status and USB port status interrupts.  */ 
     1489      *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set); 
     1490      __asm__ __volatile__ ("  nop"); 
     1491      *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, port_status, set); 
     1492      __asm__ __volatile__ ("  nop"); 
     1493 
     1494    } 
     1495    else 
     1496    { 
     1497 
     1498      bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, reset); 
     1499      /* Select which port via the port_sel field */ 
     1500      bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1); 
     1501 
     1502      /* Make sure the controller isn't busy. */ 
     1503      crisv10_ready_wait(); 
     1504      /* Send out the actual command to the USB controller */ 
     1505      *R_USB_COMMAND = bUsbCommand; 
     1506 
     1507      /* Wait a while for controller to first become started after port reset */ 
     1508      udelay(12000); /* 12ms blocking wait */ 
     1509       
     1510      /* Make sure the controller isn't busy. */ 
     1511      crisv10_ready_wait(); 
     1512 
     1513      /* If all enabled ports were disabled the host controller goes down into 
     1514         started mode, so we need to bring it back into the running state. 
     1515         (This is safe even if it's already in the running state.) */ 
     1516      *R_USB_COMMAND = 
     1517        IO_STATE(R_USB_COMMAND, port_sel, nop) | 
     1518        IO_STATE(R_USB_COMMAND, port_cmd, reset) | 
     1519        IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run); 
     1520    } 
     1521 
     1522    break; 
     1523  case USB_PORT_FEAT_SUSPEND: 
     1524    rh_dbg("SetPortFeature: suspend\n"); 
     1525    bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, suspend); 
     1526    goto set; 
     1527    break; 
     1528  case USB_PORT_FEAT_POWER: 
     1529    rh_dbg("SetPortFeature: power\n"); 
     1530    break; 
     1531  case USB_PORT_FEAT_C_CONNECTION: 
     1532    rh_dbg("SetPortFeature: c_connection\n"); 
     1533    break; 
     1534  case USB_PORT_FEAT_C_RESET: 
     1535    rh_dbg("SetPortFeature: c_reset\n"); 
     1536    break; 
     1537  case USB_PORT_FEAT_C_OVER_CURRENT: 
     1538    rh_dbg("SetPortFeature: c_over_current\n"); 
     1539    break; 
     1540 
     1541  set: 
     1542    /* Select which port via the port_sel field */ 
     1543    bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1); 
     1544 
     1545    /* Make sure the controller isn't busy. */ 
     1546    crisv10_ready_wait(); 
     1547    /* Send out the actual command to the USB controller */ 
     1548    *R_USB_COMMAND = bUsbCommand; 
     1549    break; 
     1550  default: 
     1551    rh_dbg("SetPortFeature: unknown feature\n"); 
     1552    return -1; 
     1553  } 
     1554  return 0; 
     1555} 
     1556 
     1557int rh_clear_port_feature(__u8 bPort, __u16 wFeature) { 
     1558  switch(wFeature) { 
     1559  case USB_PORT_FEAT_ENABLE: 
     1560    rh_dbg("ClearPortFeature: enable\n"); 
     1561    rh_disable_port(bPort); 
     1562    break; 
     1563  case USB_PORT_FEAT_SUSPEND: 
     1564    rh_dbg("ClearPortFeature: suspend\n"); 
     1565    break; 
     1566  case USB_PORT_FEAT_POWER: 
     1567    rh_dbg("ClearPortFeature: power\n"); 
     1568    break; 
     1569 
     1570  case USB_PORT_FEAT_C_ENABLE: 
     1571    rh_dbg("ClearPortFeature: c_enable\n"); 
     1572    goto clear; 
     1573  case USB_PORT_FEAT_C_SUSPEND: 
     1574    rh_dbg("ClearPortFeature: c_suspend\n"); 
     1575    goto clear; 
     1576  case USB_PORT_FEAT_C_CONNECTION: 
     1577    rh_dbg("ClearPortFeature: c_connection\n"); 
     1578    goto clear; 
     1579  case USB_PORT_FEAT_C_OVER_CURRENT: 
     1580    rh_dbg("ClearPortFeature: c_over_current\n"); 
     1581    goto clear; 
     1582  case USB_PORT_FEAT_C_RESET: 
     1583    rh_dbg("ClearPortFeature: c_reset\n"); 
     1584    goto clear; 
     1585  clear: 
     1586    rh.wPortChange[bPort] &= ~(1 << (wFeature - 16)); 
     1587    break; 
     1588  default: 
     1589    rh_dbg("ClearPortFeature: unknown feature\n"); 
     1590    return -1; 
     1591  } 
     1592  return 0; 
     1593} 
     1594 
     1595 
     1596#ifdef  CONFIG_PM 
     1597/* Handle a suspend request for the root hub (called from hcd_driver) */ 
     1598static int rh_suspend_request(struct usb_hcd *hcd) 
     1599{ 
     1600  return 0; /* no-op for now */ 
     1601} 
     1602 
     1603/* Handle a resume request for the root hub (called from hcd_driver) */ 
     1604static int rh_resume_request(struct usb_hcd *hcd) 
     1605{ 
     1606  return 0; /* no-op for now */ 
     1607} 
     1608#endif /* CONFIG_PM */ 
     1609 
     1610 
     1611 
     1612/* Wrapper function for workaround port disable registers in USB controller  */ 
     1613static void rh_disable_port(unsigned int port) { 
     1614  volatile int timeout = 10000; 
     1615  volatile char* usb_portx_disable; 
     1616  switch(port) { 
     1617  case 0: 
     1618    usb_portx_disable = R_USB_PORT1_DISABLE; 
     1619    break; 
     1620  case 1: 
     1621    usb_portx_disable = R_USB_PORT2_DISABLE; 
     1622    break; 
     1623  default: 
     1624    /* Invalid port index */ 
     1625    return; 
     1626  } 
     1627  /* Set disable flag in special register  */ 
     1628  *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, yes); 
     1629  /* Wait until not enabled anymore */ 
     1630  while((rh.wPortStatusPrev[port] & 
     1631        IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) && 
     1632        (timeout-- > 0)); 
     1633  if(timeout == 0) { 
     1634    warn("Timeout while waiting for port %d to become disabled\n", port); 
     1635  } 
     1636  /* clear disable flag in special register  */ 
     1637  *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, no); 
     1638  rh_info("Physical port %d disabled\n", port+1); 
     1639} 
     1640 
     1641 
     1642/******************************************************************/ 
     1643/* Transfer Controller (TC) functions                             */ 
     1644/******************************************************************/ 
     1645 
     1646/* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it 
     1647   dynamically? 
     1648   To adjust it dynamically we would have to get an interrupt when we reach 
     1649   the end of the rx descriptor list, or when we get close to the end, and 
     1650   then allocate more descriptors. */ 
    2131651#define NBR_OF_RX_DESC     512 
    2141652#define RX_DESC_BUF_SIZE   1024 
    2151653#define RX_BUF_SIZE        (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE) 
    2161654 
    217 /* The number of epids is, among other things, used for pre-allocating 
    218    ctrl, bulk and isoc EP descriptors (one for each epid). 
    219    Assumed to be > 1 when initiating the DMA lists. */ 
    220 #define NBR_OF_EPIDS       32 
    221  
    222 /* Support interrupt traffic intervals up to 128 ms. */ 
    223 #define MAX_INTR_INTERVAL 128 
    224  
    225 /* If periodic traffic (intr or isoc) is to be used, then one entry in the EP table 
    226    must be "invalid". By this we mean that we shouldn't care about epid attentions 
    227    for this epid, or at least handle them differently from epid attentions for "valid" 
    228    epids. This define determines which one to use (don't change it). */ 
    229 #define INVALID_EPID     31 
    230 /* A special epid for the bulk dummys. */ 
    231 #define DUMMY_EPID       30 
    232  
    233 /* This is just a software cache for the valid entries in R_USB_EPT_DATA. */ 
    234 static __u32 epid_usage_bitmask; 
    235  
    236 /* A bitfield to keep information on in/out traffic is needed to uniquely identify 
    237    an endpoint on a device, since the most significant bit which indicates traffic 
    238    direction is lacking in the ep_id field (ETRAX epids can handle both in and 
    239    out traffic on endpoints that are otherwise identical). The USB framework, however, 
    240    relies on them to be handled separately.  For example, bulk IN and OUT urbs cannot 
    241    be queued in the same list, since they would block each other. */ 
    242 static __u32 epid_out_traffic; 
    243  
    244 /* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line. 
    245    Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be cache aligned. */ 
    246 static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32))); 
    247 static volatile USB_IN_Desc_t RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4))); 
    248  
    249 /* Pointers into RxDescList. */ 
    250 static volatile USB_IN_Desc_t *myNextRxDesc; 
    251 static volatile USB_IN_Desc_t *myLastRxDesc; 
    252 static volatile USB_IN_Desc_t *myPrevRxDesc; 
    253  
    254 /* EP descriptors must be 32-bit aligned. */ 
    255 static volatile USB_EP_Desc_t TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4))); 
    256 static volatile USB_EP_Desc_t TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4))); 
    257 /* After each enabled bulk EP (IN or OUT) we put two disabled EP descriptors with the eol flag set, 
    258    causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which 
    259    gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the 
    260    EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors 
    261    in each frame. */ 
    262 static volatile USB_EP_Desc_t TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4))); 
    263  
    264 static volatile USB_EP_Desc_t TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4))); 
    265 static volatile USB_SB_Desc_t TxIsocSB_zout __attribute__ ((aligned (4))); 
    266  
    267 static volatile USB_EP_Desc_t TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4))); 
    268 static volatile USB_SB_Desc_t TxIntrSB_zout __attribute__ ((aligned (4))); 
    269  
    270 /* A zout transfer makes a memory access at the address of its buf pointer, which means that setting 
    271    this buf pointer to 0 will cause an access to the flash. In addition to this, setting sw_len to 0 
    272    results in a 16/32 bytes (depending on DMA burst size) transfer. Instead, we set it to 1, and point 
    273    it to this buffer. */ 
    274 static int zout_buffer[4] __attribute__ ((aligned (4))); 
    275  
    276 /* Cache for allocating new EP and SB descriptors. */ 
    277 static struct kmem_cache *usb_desc_cache; 
    278  
    279 /* Cache for the registers allocated in the top half. */ 
    280 static struct kmem_cache *top_half_reg_cache; 
    281  
    282 /* Cache for the data allocated in the isoc descr top half. */ 
    283 static struct kmem_cache *isoc_compl_cache; 
    284  
    285 static struct usb_bus *etrax_usb_bus; 
     1655 
     1656/* Local variables for Transfer Controller */ 
     1657/* --------------------------------------- */ 
    2861658 
    2871659/* This is a circular (double-linked) list of the active urbs for each epid. 
    2881660   The head is never removed, and new urbs are linked onto the list as 
    2891661   urb_entry_t elements. Don't reference urb_list directly; use the wrapper 
    290    functions instead. Note that working with these lists might require spinlock 
    291    protection. */ 
     1662   functions instead (which includes spin_locks) */ 
    2921663static struct list_head urb_list[NBR_OF_EPIDS]; 
    2931664 
    2941665/* Read about the need and usage of this lock in submit_ctrl_urb. */ 
     1666/* Lock for URB lists for each EPID */ 
    2951667static spinlock_t urb_list_lock; 
    2961668 
    297 /* Used when unlinking asynchronously. */ 
    298 static struct list_head urb_unlink_list; 
    299  
    300 /* for returning string descriptors in UTF-16LE */ 
    301 static int ascii2utf (char *ascii, __u8 *utf, int utfmax) 
     1669/* Lock for EPID array register (R_USB_EPT_x) in Etrax */ 
     1670static spinlock_t etrax_epid_lock; 
     1671 
     1672/* Lock for dma8 sub0 handling */ 
     1673static spinlock_t etrax_dma8_sub0_lock; 
     1674 
     1675/* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line. 
     1676   Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be 
     1677   cache aligned. */ 
     1678static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32))); 
     1679static volatile struct USB_IN_Desc RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4))); 
     1680 
     1681/* Pointers into RxDescList. */ 
     1682static volatile struct USB_IN_Desc *myNextRxDesc; 
     1683static volatile struct USB_IN_Desc *myLastRxDesc; 
     1684 
     1685/* A zout transfer makes a memory access at the address of its buf pointer, 
     1686   which means that setting this buf pointer to 0 will cause an access to the 
     1687   flash. In addition to this, setting sw_len to 0 results in a 16/32 bytes 
     1688   (depending on DMA burst size) transfer. 
     1689   Instead, we set it to 1, and point it to this buffer. */ 
     1690static int zout_buffer[4] __attribute__ ((aligned (4))); 
     1691 
     1692/* Cache for allocating new EP and SB descriptors. */ 
     1693static struct kmem_cache *usb_desc_cache; 
     1694 
     1695/* Cache for the data allocated in the isoc descr top half. */ 
     1696static struct kmem_cache *isoc_compl_cache; 
     1697 
     1698/* Cache for the data allocated when delayed finishing of URBs */ 
     1699static struct kmem_cache *later_data_cache; 
     1700 
     1701 
     1702/* Counter to keep track of how many Isoc EP we have sat up. Used to enable 
     1703   and disable iso_eof interrupt. We only need these interrupts when we have 
     1704   Isoc data endpoints (consumes CPU cycles). 
     1705   FIXME: This could be more fine granular, so this interrupt is only enabled 
     1706   when we have a In Isoc URB not URB_ISO_ASAP flaged queued. */ 
     1707static int isoc_epid_counter; 
     1708 
     1709/* Protecting wrapper functions for R_USB_EPT_x */ 
     1710/* -------------------------------------------- */ 
     1711static inline void etrax_epid_set(__u8 index, __u32 data) { 
     1712  unsigned long flags; 
     1713  spin_lock_irqsave(&etrax_epid_lock, flags); 
     1714  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index); 
     1715  nop(); 
     1716  *R_USB_EPT_DATA = data; 
     1717  spin_unlock_irqrestore(&etrax_epid_lock, flags); 
     1718} 
     1719 
     1720static inline void etrax_epid_clear_error(__u8 index) { 
     1721  unsigned long flags; 
     1722  spin_lock_irqsave(&etrax_epid_lock, flags); 
     1723  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index); 
     1724  nop(); 
     1725  *R_USB_EPT_DATA &= 
     1726    ~(IO_MASK(R_USB_EPT_DATA, error_count_in) | 
     1727      IO_MASK(R_USB_EPT_DATA, error_count_out) | 
     1728      IO_MASK(R_USB_EPT_DATA, error_code)); 
     1729  spin_unlock_irqrestore(&etrax_epid_lock, flags); 
     1730} 
     1731 
     1732static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout, 
     1733                                             __u8 toggle) { 
     1734  unsigned long flags; 
     1735  spin_lock_irqsave(&etrax_epid_lock, flags); 
     1736  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index); 
     1737  nop(); 
     1738  if(dirout) { 
     1739    *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out); 
     1740    *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle); 
     1741  } else { 
     1742    *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in); 
     1743    *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle); 
     1744  } 
     1745  spin_unlock_irqrestore(&etrax_epid_lock, flags); 
     1746} 
     1747 
     1748static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout) { 
     1749  unsigned long flags; 
     1750  __u8 toggle; 
     1751  spin_lock_irqsave(&etrax_epid_lock, flags); 
     1752  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index); 
     1753  nop(); 
     1754  if (dirout) { 
     1755    toggle = IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA); 
     1756  } else { 
     1757    toggle = IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA); 
     1758  } 
     1759  spin_unlock_irqrestore(&etrax_epid_lock, flags); 
     1760  return toggle; 
     1761} 
     1762 
     1763 
     1764static inline __u32 etrax_epid_get(__u8 index) { 
     1765  unsigned long flags; 
     1766  __u32 data; 
     1767  spin_lock_irqsave(&etrax_epid_lock, flags); 
     1768  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index); 
     1769  nop(); 
     1770  data = *R_USB_EPT_DATA; 
     1771  spin_unlock_irqrestore(&etrax_epid_lock, flags); 
     1772  return data; 
     1773} 
     1774 
     1775 
     1776 
     1777 
     1778/* Main functions for Transfer Controller */ 
     1779/* -------------------------------------- */ 
     1780 
     1781/* Init structs, memories and lists used by Transfer Controller */ 
     1782int tc_init(struct usb_hcd *hcd) { 
     1783  int i; 
     1784  /* Clear software state info for all epids */ 
     1785  memset(epid_state, 0, sizeof(struct etrax_epid) * NBR_OF_EPIDS); 
     1786 
     1787  /* Set Invalid and Dummy as being in use and disabled */ 
     1788  epid_state[INVALID_EPID].inuse = 1; 
     1789  epid_state[DUMMY_EPID].inuse = 1; 
     1790  epid_state[INVALID_EPID].disabled = 1; 
     1791  epid_state[DUMMY_EPID].disabled = 1; 
     1792 
     1793  /* Clear counter for how many Isoc epids we have sat up */ 
     1794  isoc_epid_counter = 0; 
     1795 
     1796  /* Initialize the urb list by initiating a head for each list. 
     1797     Also reset list hodling active URB for each epid */ 
     1798  for (i = 0; i < NBR_OF_EPIDS; i++) { 
     1799    INIT_LIST_HEAD(&urb_list[i]); 
     1800    activeUrbList[i] = NULL; 
     1801  } 
     1802 
     1803  /* Init lock for URB lists */ 
     1804  spin_lock_init(&urb_list_lock); 
     1805  /* Init lock for Etrax R_USB_EPT register */ 
     1806  spin_lock_init(&etrax_epid_lock); 
     1807  /* Init lock for Etrax dma8 sub0 handling */ 
     1808  spin_lock_init(&etrax_dma8_sub0_lock); 
     1809 
     1810  /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */ 
     1811 
     1812  /* Note that we specify sizeof(struct USB_EP_Desc) as the size, but also 
     1813     allocate SB descriptors from this cache. This is ok since 
     1814     sizeof(struct USB_EP_Desc) == sizeof(struct USB_SB_Desc). */ 
     1815  usb_desc_cache = kmem_cache_create("usb_desc_cache", 
     1816                                     sizeof(struct USB_EP_Desc), 0, 
     1817                                     SLAB_HWCACHE_ALIGN, 0); 
     1818  if(usb_desc_cache == NULL) { 
     1819    return -ENOMEM; 
     1820  } 
     1821 
     1822  /* Create slab cache for speedy allocation of memory for isoc bottom-half 
     1823     interrupt handling */ 
     1824  isoc_compl_cache = 
     1825    kmem_cache_create("isoc_compl_cache", 
     1826                      sizeof(struct crisv10_isoc_complete_data), 
     1827                      0, SLAB_HWCACHE_ALIGN, 0); 
     1828  if(isoc_compl_cache == NULL) { 
     1829    return -ENOMEM; 
     1830  } 
     1831 
     1832  /* Create slab cache for speedy allocation of memory for later URB finish 
     1833     struct */ 
     1834  later_data_cache = 
     1835    kmem_cache_create("later_data_cache", 
     1836                      sizeof(struct urb_later_data), 
     1837                      0, SLAB_HWCACHE_ALIGN, 0); 
     1838  if(later_data_cache == NULL) { 
     1839    return -ENOMEM; 
     1840  } 
     1841 
     1842 
     1843  /* Initiate the bulk start timer. */ 
     1844  init_timer(&bulk_start_timer); 
     1845  bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL; 
     1846  bulk_start_timer.function = tc_bulk_start_timer_func; 
     1847  add_timer(&bulk_start_timer); 
     1848 
     1849 
     1850  /* Initiate the bulk eot timer. */ 
     1851  init_timer(&bulk_eot_timer); 
     1852  bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL; 
     1853  bulk_eot_timer.function = tc_bulk_eot_timer_func; 
     1854  bulk_eot_timer.data = (unsigned long)hcd; 
     1855  add_timer(&bulk_eot_timer); 
     1856 
     1857  return 0; 
     1858} 
     1859 
     1860/* Uninitialize all resources used by Transfer Controller */ 
     1861void tc_destroy(void) { 
     1862 
     1863  /* Destroy all slab cache */ 
     1864  kmem_cache_destroy(usb_desc_cache); 
     1865  kmem_cache_destroy(isoc_compl_cache); 
     1866  kmem_cache_destroy(later_data_cache); 
     1867 
     1868  /* Remove timers */ 
     1869  del_timer(&bulk_start_timer); 
     1870  del_timer(&bulk_eot_timer); 
     1871} 
     1872 
     1873static void restart_dma8_sub0(void) { 
     1874  unsigned long flags; 
     1875  spin_lock_irqsave(&etrax_dma8_sub0_lock, flags); 
     1876  /* Verify that the dma is not running */ 
     1877  if ((*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd)) == 0) { 
     1878    struct USB_EP_Desc *ep = (struct USB_EP_Desc *)phys_to_virt(*R_DMA_CH8_SUB0_EP); 
     1879    while (DUMMY_EPID == IO_EXTRACT(USB_EP_command, epid, ep->command)) { 
     1880      ep = (struct USB_EP_Desc *)phys_to_virt(ep->next); 
     1881    } 
     1882    /* Advance the DMA to the next EP descriptor that is not a DUMMY_EPID. */ 
     1883    *R_DMA_CH8_SUB0_EP = virt_to_phys(ep); 
     1884    /* Restart the DMA */ 
     1885    *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); 
     1886  } 
     1887  spin_unlock_irqrestore(&etrax_dma8_sub0_lock, flags); 
     1888} 
     1889 
     1890/* queue an URB with the transfer controller (called from hcd_driver) */ 
     1891static int tc_urb_enqueue(struct usb_hcd *hcd,  
     1892                          struct urb *urb,  
     1893                          gfp_t mem_flags) { 
     1894  int epid; 
     1895  int retval; 
     1896  int bustime = 0; 
     1897  int maxpacket; 
     1898  unsigned long flags; 
     1899  struct crisv10_urb_priv *urb_priv; 
     1900  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd); 
     1901  DBFENTER; 
     1902 
     1903  if(!(crisv10_hcd->running)) { 
     1904    /* The USB Controller is not running, probably because no device is  
     1905       attached. No idea to enqueue URBs then */ 
     1906    tc_warn("Rejected enqueueing of URB:0x%x because no dev attached\n", 
     1907            (unsigned int)urb); 
     1908    return -ENOENT; 
     1909  } 
     1910 
     1911  maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); 
     1912  /* Special case check for In Isoc transfers. Specification states that each 
     1913     In Isoc transfer consists of one packet and therefore it should fit into 
     1914     the transfer-buffer of an URB. 
     1915     We do the check here to be sure (an invalid scenario can be produced with 
     1916     parameters to the usbtest suite) */ 
     1917  if(usb_pipeisoc(urb->pipe) && usb_pipein(urb->pipe) && 
     1918     (urb->transfer_buffer_length < maxpacket)) { 
     1919    tc_err("Submit In Isoc URB with buffer length:%d to pipe with maxpacketlen: %d\n", urb->transfer_buffer_length, maxpacket); 
     1920    return -EMSGSIZE; 
     1921  } 
     1922 
     1923  /* Check if there is a epid for URBs destination, if not this function 
     1924     set up one. */ 
     1925  epid = tc_setup_epid(urb->ep, urb, mem_flags); 
     1926  if (epid < 0) { 
     1927    tc_err("Failed setup epid:%d for URB:0x%x\n", epid, (unsigned int)urb); 
     1928    DBFEXIT; 
     1929    return -ENOMEM; 
     1930  } 
     1931 
     1932  if(urb == activeUrbList[epid]) { 
     1933    tc_err("Resubmition of allready active URB:0x%x\n", (unsigned int)urb); 
     1934    return -ENXIO; 
     1935  } 
     1936 
     1937  if(urb_list_entry(urb, epid)) { 
     1938    tc_err("Resubmition of allready queued URB:0x%x\n", (unsigned int)urb); 
     1939    return -ENXIO; 
     1940  } 
     1941 
     1942  /* If we actively have flaged endpoint as disabled then refuse submition */ 
     1943  if(epid_state[epid].disabled) { 
     1944    return -ENOENT; 
     1945  } 
     1946 
     1947  /* Allocate and init HC-private data for URB */ 
     1948  if(urb_priv_create(hcd, urb, epid, mem_flags) != 0) { 
     1949    DBFEXIT; 
     1950    return -ENOMEM; 
     1951  } 
     1952  urb_priv = urb->hcpriv; 
     1953 
     1954  /* Check if there is enough bandwidth for periodic transfer  */ 
     1955  if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe)) { 
     1956    /* only check (and later claim) if not already claimed */ 
     1957    if (urb_priv->bandwidth == 0) { 
     1958      bustime = crisv10_usb_check_bandwidth(urb->dev, urb); 
     1959      if (bustime < 0) { 
     1960        tc_err("Not enough periodic bandwidth\n"); 
     1961        urb_priv_free(hcd, urb); 
     1962        DBFEXIT; 
     1963        return -ENOSPC; 
     1964      } 
     1965    } 
     1966  } 
     1967 
     1968  tc_dbg("Enqueue URB:0x%x[%d] epid:%d (%s) bufflen:%d\n", 
     1969         (unsigned int)urb, urb_priv->urb_num, epid, 
     1970         pipe_to_str(urb->pipe), urb->transfer_buffer_length); 
     1971 
     1972  /* Create and link SBs required for this URB */ 
     1973  retval = create_sb_for_urb(urb, mem_flags); 
     1974  if(retval != 0) { 
     1975    tc_err("Failed to create SBs for URB:0x%x[%d]\n", (unsigned int)urb, 
     1976           urb_priv->urb_num); 
     1977    urb_priv_free(hcd, urb); 
     1978    DBFEXIT; 
     1979    return retval; 
     1980  } 
     1981 
     1982  /* Init intr EP pool if this URB is a INTR transfer. This pool is later 
     1983     used when inserting EPs in the TxIntrEPList. We do the alloc here 
     1984     so we can't run out of memory later */ 
     1985  if(usb_pipeint(urb->pipe)) { 
     1986    retval = init_intr_urb(urb, mem_flags); 
     1987    if(retval != 0) { 
     1988      tc_warn("Failed to init Intr URB\n"); 
     1989      urb_priv_free(hcd, urb); 
     1990      DBFEXIT; 
     1991      return retval; 
     1992    } 
     1993  } 
     1994 
     1995  /* Disable other access when inserting USB */ 
     1996  local_irq_save(flags); 
     1997 
     1998  /* Claim bandwidth, if needed */ 
     1999  if(bustime) { 
     2000    crisv10_usb_claim_bandwidth(urb->dev, 
     2001                                urb, 
     2002                                bustime, 
     2003                                (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)); 
     2004  } 
     2005   
     2006  /* Add URB to EP queue */ 
     2007  urb_list_add(urb, epid, mem_flags); 
     2008 
     2009  if(usb_pipeisoc(urb->pipe)) { 
     2010    /* Special processing of Isoc URBs. */ 
     2011    tc_dma_process_isoc_urb(urb); 
     2012  } else { 
     2013    /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */ 
     2014    tc_dma_process_queue(epid); 
     2015  } 
     2016 
     2017  local_irq_restore(flags); 
     2018 
     2019  DBFEXIT; 
     2020  return 0; 
     2021} 
     2022 
     2023/* remove an URB from the transfer controller queues (called from hcd_driver)*/ 
     2024static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { 
     2025  struct crisv10_urb_priv *urb_priv; 
     2026  unsigned long flags; 
     2027  int epid; 
     2028 
     2029  DBFENTER; 
     2030  /* Disable interrupts here since a descriptor interrupt for the isoc epid 
     2031     will modify the sb list.  This could possibly be done more granular, but 
     2032     urb_dequeue should not be used frequently anyway. 
     2033  */ 
     2034  local_irq_save(flags); 
     2035 
     2036  urb->status = status; 
     2037  urb_priv = urb->hcpriv; 
     2038 
     2039  if (!urb_priv) { 
     2040    /* This happens if a device driver calls unlink on an urb that 
     2041       was never submitted (lazy driver) or if the urb was completed 
     2042       while dequeue was being called. */ 
     2043    tc_warn("Dequeing of not enqueued URB:0x%x\n", (unsigned int)urb); 
     2044    local_irq_restore(flags); 
     2045    return 0; 
     2046  } 
     2047  epid = urb_priv->epid; 
     2048 
     2049  tc_warn("Dequeing %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n", 
     2050          (urb == activeUrbList[epid]) ? "active" : "queued", 
     2051          (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe), 
     2052          str_type(urb->pipe), epid, urb->status, 
     2053          (urb_priv->later_data) ? "later-sched" : ""); 
     2054 
     2055  /* For Bulk, Ctrl and Intr are only one URB active at a time. So any URB 
     2056     that isn't active can be dequeued by just removing it from the queue */ 
     2057  if(usb_pipebulk(urb->pipe) || usb_pipecontrol(urb->pipe) || 
     2058     usb_pipeint(urb->pipe)) { 
     2059 
     2060    /* Check if URB haven't gone further than the queue */ 
     2061    if(urb != activeUrbList[epid]) { 
     2062      ASSERT(urb_priv->later_data == NULL); 
     2063      tc_warn("Dequeing URB:0x%x[%d] (%s %s epid:%d) from queue" 
     2064              " (not active)\n", (unsigned int)urb, urb_priv->urb_num, 
     2065              str_dir(urb->pipe), str_type(urb->pipe), epid); 
     2066       
     2067      /* Finish the URB with error status from USB core */ 
     2068      tc_finish_urb(hcd, urb, urb->status); 
     2069      local_irq_restore(flags); 
     2070      return 0; 
     2071    } 
     2072  } 
     2073 
     2074  /* Set URB status to Unlink for handling when interrupt comes. */ 
     2075  urb_priv->urb_state = UNLINK; 
     2076 
     2077  /* Differentiate dequeing of Bulk and Ctrl from Isoc and Intr */ 
     2078  switch(usb_pipetype(urb->pipe)) { 
     2079  case PIPE_BULK: 
     2080    /* Check if EP still is enabled */ 
     2081    if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) { 
     2082      /* The EP was enabled, disable it. */ 
     2083      TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable); 
     2084    } 
     2085    /* Kicking dummy list out of the party. */ 
     2086    TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]); 
     2087    break; 
     2088  case PIPE_CONTROL: 
     2089    /* Check if EP still is enabled */ 
     2090    if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) { 
     2091      /* The EP was enabled, disable it. */ 
     2092      TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable); 
     2093    } 
     2094    break; 
     2095  case PIPE_ISOCHRONOUS: 
     2096    /* Disabling, busy-wait and unlinking of Isoc SBs will be done in 
     2097       finish_isoc_urb(). Because there might the case when URB is dequeued 
     2098       but there are other valid URBs waiting */ 
     2099 
     2100    /* Check if In Isoc EP still is enabled */ 
     2101    if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) { 
     2102      /* The EP was enabled, disable it. */ 
     2103      TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable); 
     2104    } 
     2105    break; 
     2106  case PIPE_INTERRUPT: 
     2107    /* Special care is taken for interrupt URBs. EPs are unlinked in 
     2108       tc_finish_urb */ 
     2109    break; 
     2110  default: 
     2111    break; 
     2112  } 
     2113 
     2114  /* Asynchronous unlink, finish the URB later from scheduled or other 
     2115     event (data finished, error) */ 
     2116  tc_finish_urb_later(hcd, urb, urb->status); 
     2117 
     2118  local_irq_restore(flags); 
     2119  DBFEXIT; 
     2120  return 0; 
     2121} 
     2122 
     2123 
     2124static void tc_sync_finish_epid(struct usb_hcd *hcd, int epid) { 
     2125  volatile int timeout = 10000; 
     2126  struct urb* urb; 
     2127  struct crisv10_urb_priv* urb_priv; 
     2128  unsigned long flags; 
     2129   
     2130  volatile struct USB_EP_Desc *first_ep;  /* First EP in the list. */ 
     2131  volatile struct USB_EP_Desc *curr_ep;   /* Current EP, the iterator. */ 
     2132  volatile struct USB_EP_Desc *next_ep;   /* The EP after current. */ 
     2133 
     2134  int type = epid_state[epid].type; 
     2135 
     2136  /* Setting this flag will cause enqueue() to return -ENOENT for new 
     2137     submitions on this endpoint and finish_urb() wont process queue further */ 
     2138  epid_state[epid].disabled = 1; 
     2139 
     2140  switch(type) { 
     2141  case PIPE_BULK: 
     2142    /* Check if EP still is enabled */ 
     2143    if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) { 
     2144      /* The EP was enabled, disable it. */ 
     2145      TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable); 
     2146      tc_warn("sync_finish: Disabling EP for epid:%d\n", epid); 
     2147 
     2148      /* Do busy-wait until DMA not using this EP descriptor anymore */ 
     2149      while((*R_DMA_CH8_SUB0_EP == 
     2150             virt_to_phys(&TxBulkEPList[epid])) && 
     2151            (timeout-- > 0)); 
     2152      if(timeout == 0) { 
     2153        warn("Timeout while waiting for DMA-TX-Bulk to leave EP for" 
     2154             " epid:%d\n", epid); 
     2155      } 
     2156    } 
     2157    break; 
     2158 
     2159  case PIPE_CONTROL: 
     2160    /* Check if EP still is enabled */ 
     2161    if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) { 
     2162      /* The EP was enabled, disable it. */ 
     2163      TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable); 
     2164      tc_warn("sync_finish: Disabling EP for epid:%d\n", epid); 
     2165 
     2166      /* Do busy-wait until DMA not using this EP descriptor anymore */ 
     2167      while((*R_DMA_CH8_SUB1_EP == 
     2168             virt_to_phys(&TxCtrlEPList[epid])) && 
     2169            (timeout-- > 0)); 
     2170      if(timeout == 0) { 
     2171        warn("Timeout while waiting for DMA-TX-Ctrl to leave EP for" 
     2172             " epid:%d\n", epid); 
     2173      } 
     2174    } 
     2175    break; 
     2176 
     2177  case PIPE_INTERRUPT: 
     2178    local_irq_save(flags); 
     2179    /* Disable all Intr EPs belonging to epid */ 
     2180    first_ep = &TxIntrEPList[0]; 
     2181    curr_ep = first_ep; 
     2182    do { 
     2183      next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next); 
     2184      if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) { 
     2185        /* Disable EP */ 
     2186        next_ep->command &= ~IO_MASK(USB_EP_command, enable); 
     2187      } 
     2188      curr_ep = phys_to_virt(curr_ep->next); 
     2189    } while (curr_ep != first_ep); 
     2190 
     2191    local_irq_restore(flags); 
     2192    break; 
     2193 
     2194  case PIPE_ISOCHRONOUS: 
     2195    /* Check if EP still is enabled */ 
     2196    if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) { 
     2197      tc_warn("sync_finish: Disabling Isoc EP for epid:%d\n", epid); 
     2198      /* The EP was enabled, disable it. */ 
     2199      TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable); 
     2200       
     2201      while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) && 
     2202            (timeout-- > 0)); 
     2203      if(timeout == 0) { 
     2204        warn("Timeout while waiting for DMA-TX-Isoc to leave EP for" 
     2205             " epid:%d\n", epid); 
     2206      } 
     2207    } 
     2208    break; 
     2209  } 
     2210 
     2211  local_irq_save(flags); 
     2212 
     2213  /* Finish if there is active URB for this endpoint */ 
     2214  if(activeUrbList[epid] != NULL) { 
     2215    urb = activeUrbList[epid]; 
     2216    urb_priv = urb->hcpriv; 
     2217    ASSERT(urb_priv); 
     2218    tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n", 
     2219            (urb == activeUrbList[epid]) ? "active" : "queued", 
     2220            (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe), 
     2221            str_type(urb->pipe), epid, urb->status, 
     2222            (urb_priv->later_data) ? "later-sched" : ""); 
     2223 
     2224    tc_finish_urb(hcd, activeUrbList[epid], -ENOENT); 
     2225    ASSERT(activeUrbList[epid] == NULL); 
     2226  } 
     2227 
     2228  /* Finish any queued URBs for this endpoint. There won't be any resubmitions 
     2229     because epid_disabled causes enqueue() to fail for this endpoint */ 
     2230  while((urb = urb_list_first(epid)) != NULL) { 
     2231    urb_priv = urb->hcpriv; 
     2232    ASSERT(urb_priv); 
     2233 
     2234    tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n", 
     2235            (urb == activeUrbList[epid]) ? "active" : "queued", 
     2236            (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe), 
     2237            str_type(urb->pipe), epid, urb->status, 
     2238            (urb_priv->later_data) ? "later-sched" : ""); 
     2239 
     2240    tc_finish_urb(hcd, urb, -ENOENT); 
     2241  } 
     2242  epid_state[epid].disabled = 0; 
     2243  local_irq_restore(flags); 
     2244} 
     2245 
     2246/* free resources associated with an endpoint (called from hcd_driver) */ 
     2247static void tc_endpoint_disable(struct usb_hcd *hcd,  
     2248                                struct usb_host_endpoint *ep) { 
     2249  DBFENTER; 
     2250  /* Only free epid if it has been allocated. We get two endpoint_disable 
     2251     requests for ctrl endpoints so ignore the second one */ 
     2252  if(ep->hcpriv != NULL) { 
     2253    struct crisv10_ep_priv *ep_priv = ep->hcpriv; 
     2254    int epid = ep_priv->epid; 
     2255    tc_warn("endpoint_disable ep:0x%x ep-priv:0x%x (%s) (epid:%d freed)\n", 
     2256           (unsigned int)ep, (unsigned int)ep->hcpriv, 
     2257           endpoint_to_str(&(ep->desc)), epid); 
     2258 
     2259    tc_sync_finish_epid(hcd, epid); 
     2260 
     2261    ASSERT(activeUrbList[epid] == NULL); 
     2262    ASSERT(list_empty(&urb_list[epid])); 
     2263 
     2264    tc_free_epid(ep); 
     2265  } else { 
     2266    tc_dbg("endpoint_disable ep:0x%x ep-priv:0x%x (%s)\n", (unsigned int)ep, 
     2267           (unsigned int)ep->hcpriv, endpoint_to_str(&(ep->desc))); 
     2268  } 
     2269  DBFEXIT; 
     2270} 
     2271 
     2272static void tc_finish_urb_later_proc(struct work_struct* work) { 
     2273  unsigned long flags; 
     2274  struct urb_later_data* uld; 
     2275 
     2276  local_irq_save(flags); 
     2277  uld = container_of(work, struct urb_later_data, dws.work); 
     2278  if(uld->urb == NULL) { 
     2279    late_dbg("Later finish of URB = NULL (allready finished)\n"); 
     2280  } else { 
     2281    struct crisv10_urb_priv* urb_priv = uld->urb->hcpriv; 
     2282    ASSERT(urb_priv); 
     2283    if(urb_priv->urb_num == uld->urb_num) { 
     2284      late_dbg("Later finish of URB:0x%x[%d]\n", (unsigned int)(uld->urb), 
     2285               urb_priv->urb_num); 
     2286      if(uld->status != uld->urb->status) { 
     2287        errno_dbg("Later-finish URB with status:%d, later-status:%d\n", 
     2288                  uld->urb->status, uld->status); 
     2289      } 
     2290      if(uld != urb_priv->later_data) { 
     2291        panic("Scheduled uld not same as URBs uld\n"); 
     2292      } 
     2293      tc_finish_urb(uld->hcd, uld->urb, uld->status); 
     2294    } else { 
     2295      late_warn("Ignoring later finish of URB:0x%x[%d]" 
     2296                ", urb_num doesn't match current URB:0x%x[%d]", 
     2297                (unsigned int)(uld->urb), uld->urb_num, 
     2298                (unsigned int)(uld->urb), urb_priv->urb_num); 
     2299    } 
     2300  } 
     2301  local_irq_restore(flags); 
     2302  kmem_cache_free(later_data_cache, uld); 
     2303} 
     2304 
     2305static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb, 
     2306                                int status) { 
     2307  struct crisv10_urb_priv *urb_priv = urb->hcpriv; 
     2308  struct urb_later_data* uld; 
     2309 
     2310  ASSERT(urb_priv); 
     2311 
     2312  if(urb_priv->later_data != NULL) { 
     2313    /* Later-finish allready scheduled for this URB, just update status to 
     2314       return when finishing later */ 
     2315    errno_dbg("Later-finish schedule change URB status:%d with new" 
     2316              " status:%d\n", urb_priv->later_data->status, status); 
     2317     
     2318    urb_priv->later_data->status = status; 
     2319    return; 
     2320  } 
     2321 
     2322  uld = kmem_cache_alloc(later_data_cache, GFP_ATOMIC); 
     2323  ASSERT(uld); 
     2324 
     2325  uld->hcd = hcd; 
     2326  uld->urb = urb; 
     2327  uld->urb_num = urb_priv->urb_num; 
     2328  uld->status = status; 
     2329 
     2330  INIT_DELAYED_WORK(&uld->dws, tc_finish_urb_later_proc); 
     2331  urb_priv->later_data = uld; 
     2332 
     2333  /* Schedule the finishing of the URB to happen later */ 
     2334  schedule_delayed_work(&uld->dws, LATER_TIMER_DELAY); 
     2335} 
     2336 
     2337static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb, 
     2338                               int status); 
     2339 
     2340static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status) { 
     2341  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd); 
     2342  struct crisv10_urb_priv *urb_priv = urb->hcpriv; 
     2343  int epid; 
     2344  char toggle; 
     2345  int urb_num; 
     2346 
     2347  DBFENTER; 
     2348  ASSERT(urb_priv != NULL); 
     2349  epid = urb_priv->epid; 
     2350  urb_num = urb_priv->urb_num; 
     2351 
     2352  if(urb != activeUrbList[epid]) { 
     2353    if(urb_list_entry(urb, epid)) { 
     2354      /* Remove this URB from the list. Only happens when URB are finished 
     2355         before having been processed (dequeing) */ 
     2356      urb_list_del(urb, epid); 
     2357    } else { 
     2358      tc_warn("Finishing of URB:0x%x[%d] neither active or in queue for" 
     2359              " epid:%d\n", (unsigned int)urb, urb_num, epid); 
     2360    } 
     2361  } 
     2362 
     2363  /* Cancel any pending later-finish of this URB */ 
     2364  if(urb_priv->later_data) { 
     2365    urb_priv->later_data->urb = NULL; 
     2366  } 
     2367 
     2368  /* For an IN pipe, we always set the actual length, regardless of whether 
     2369     there was an error or not (which means the device driver can use the data 
     2370     if it wants to). */ 
     2371  if(usb_pipein(urb->pipe)) { 
     2372    urb->actual_length = urb_priv->rx_offset; 
     2373  } else { 
     2374    /* Set actual_length for OUT urbs also; the USB mass storage driver seems 
     2375       to want that. */ 
     2376    if (status == 0 && urb->status == -EINPROGRESS) { 
     2377      urb->actual_length = urb->transfer_buffer_length; 
     2378    } else { 
     2379      /*  We wouldn't know of any partial writes if there was an error. */ 
     2380      urb->actual_length = 0; 
     2381    } 
     2382  } 
     2383 
     2384 
     2385  /* URB status mangling */ 
     2386  if(urb->status == -EINPROGRESS) { 
     2387    /* The USB core hasn't changed the status, let's set our finish status */ 
     2388    urb->status = status; 
     2389 
     2390    if ((status == 0) && (urb->transfer_flags & URB_SHORT_NOT_OK) && 
     2391        usb_pipein(urb->pipe) && 
     2392        (urb->actual_length != urb->transfer_buffer_length)) { 
     2393      /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's 
     2394         max length) is to be treated as an error. */ 
     2395      errno_dbg("Finishing URB:0x%x[%d] with SHORT_NOT_OK flag and short" 
     2396                " data:%d\n", (unsigned int)urb, urb_num, 
     2397                urb->actual_length); 
     2398      urb->status = -EREMOTEIO; 
     2399    } 
     2400 
     2401    if(urb_priv->urb_state == UNLINK) { 
     2402      /* URB has been requested to be unlinked asynchronously */ 
     2403      urb->status = -ECONNRESET; 
     2404      errno_dbg("Fixing unlink status of URB:0x%x[%d] to:%d\n", 
     2405                (unsigned int)urb, urb_num, urb->status); 
     2406    } 
     2407  } else { 
     2408    /* The USB Core wants to signal some error via the URB, pass it through */ 
     2409  } 
     2410 
     2411  /* use completely different finish function for Isoc URBs */ 
     2412  if(usb_pipeisoc(urb->pipe)) { 
     2413    tc_finish_isoc_urb(hcd, urb, status); 
     2414    return; 
     2415  } 
     2416 
     2417  /* Do special unlinking of EPs for Intr traffic */ 
     2418  if(usb_pipeint(urb->pipe)) { 
     2419    tc_dma_unlink_intr_urb(urb); 
     2420  } 
     2421 
     2422  /* Release allocated bandwidth for periodic transfers */ 
     2423  if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe)) 
     2424    crisv10_usb_release_bandwidth(hcd, 
     2425                                  usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS, 
     2426                                  urb_priv->bandwidth); 
     2427 
     2428  /* This URB is active on EP */ 
     2429  if(urb == activeUrbList[epid]) { 
     2430    /* We need to fiddle with the toggle bits because the hardware doesn't do 
     2431       it for us. */ 
     2432    toggle = etrax_epid_get_toggle(epid, usb_pipeout(urb->pipe)); 
     2433    usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), 
     2434                  usb_pipeout(urb->pipe), toggle); 
     2435 
     2436    /* Checks for Ctrl and Bulk EPs */ 
     2437    switch(usb_pipetype(urb->pipe)) { 
     2438    case PIPE_BULK: 
     2439      /* Check so Bulk EP realy is disabled before finishing active URB  */ 
     2440      ASSERT((TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) == 
     2441             IO_STATE(USB_EP_command, enable, no)); 
     2442      /* Disable sub-pointer for EP to avoid next tx_interrupt() to 
     2443         process Bulk EP. */ 
     2444      TxBulkEPList[epid].sub = 0; 
     2445      /* No need to wait for the DMA before changing the next pointer. 
     2446         The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use 
     2447         the last one (INVALID_EPID) for actual traffic. */ 
     2448      TxBulkEPList[epid].next =  
     2449        virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]); 
     2450      break; 
     2451    case PIPE_CONTROL: 
     2452      /* Check so Ctrl EP realy is disabled before finishing active URB  */ 
     2453      ASSERT((TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) == 
     2454             IO_STATE(USB_EP_command, enable, no)); 
     2455      /* Disable sub-pointer for EP to avoid next tx_interrupt() to 
     2456         process Ctrl EP. */ 
     2457      TxCtrlEPList[epid].sub = 0; 
     2458      break; 
     2459    } 
     2460  } 
     2461 
     2462  /* Free HC-private URB data*/ 
     2463  urb_priv_free(hcd, urb); 
     2464 
     2465  if(urb->status) { 
     2466    errno_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n", 
     2467              (unsigned int)urb, urb_num, str_dir(urb->pipe), 
     2468              str_type(urb->pipe), urb->actual_length, urb->status); 
     2469  } else { 
     2470    tc_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n", 
     2471           (unsigned int)urb, urb_num, str_dir(urb->pipe), 
     2472           str_type(urb->pipe), urb->actual_length, urb->status); 
     2473  } 
     2474 
     2475  /* If we just finished an active URB, clear active pointer. */ 
     2476  if (urb == activeUrbList[epid]) { 
     2477    /* Make URB not active on EP anymore */ 
     2478    activeUrbList[epid] = NULL; 
     2479 
     2480    if(urb->status == 0) { 
     2481      /* URB finished sucessfully, process queue to see if there are any more 
     2482         URBs waiting before we call completion function.*/ 
     2483      if(crisv10_hcd->running) { 
     2484        /* Only process queue if USB controller is running */ 
     2485        tc_dma_process_queue(epid); 
     2486      } else { 
     2487        tc_warn("No processing of queue for epid:%d, USB Controller not" 
     2488                " running\n", epid); 
     2489      } 
     2490    } 
     2491  } 
     2492 
     2493  /*  Hand the URB from HCD to its USB device driver, using its completion 
     2494      functions */ 
     2495  usb_hcd_giveback_urb (hcd, urb, status); 
     2496 
     2497  /* Check the queue once more if the URB returned with error, because we 
     2498     didn't do it before the completion function because the specification 
     2499     states that the queue should not restart until all it's unlinked 
     2500     URBs have been fully retired, with the completion functions run */ 
     2501  if(crisv10_hcd->running) { 
     2502    /* Only process queue if USB controller is running */ 
     2503    tc_dma_process_queue(epid); 
     2504  } else { 
     2505    tc_warn("No processing of queue for epid:%d, USB Controller not running\n", 
     2506            epid); 
     2507  } 
     2508 
     2509  DBFEXIT; 
     2510} 
     2511 
     2512static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb, 
     2513                               int status) { 
     2514  struct crisv10_urb_priv *urb_priv = urb->hcpriv; 
     2515  int epid, i; 
     2516  volatile int timeout = 10000; 
     2517  int bandwidth = 0; 
     2518 
     2519  ASSERT(urb_priv); 
     2520  epid = urb_priv->epid; 
     2521 
     2522  ASSERT(usb_pipeisoc(urb->pipe)); 
     2523 
     2524  /* Set that all isoc packets have status and length set before 
     2525     completing the urb. */ 
     2526  for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++){ 
     2527    urb->iso_frame_desc[i].actual_length = 0; 
     2528    urb->iso_frame_desc[i].status = -EPROTO; 
     2529  } 
     2530 
     2531  /* Check if the URB is currently active (done or error) */ 
     2532  if(urb == activeUrbList[epid]) { 
     2533    /* Check if there are another In Isoc URB queued for this epid */ 
     2534    if (!list_empty(&urb_list[epid])&& !epid_state[epid].disabled) { 
     2535      /* Move it from queue to active and mark it started so Isoc transfers 
     2536         won't be interrupted. 
     2537         All Isoc URBs data transfers are already added to DMA lists so we 
     2538         don't have to insert anything in DMA lists here. */ 
     2539      activeUrbList[epid] = urb_list_first(epid); 
     2540      ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_state = 
     2541        STARTED; 
     2542      urb_list_del(activeUrbList[epid], epid); 
     2543 
     2544      if(urb->status) { 
     2545        errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)" 
     2546                  " status:%d, new waiting URB:0x%x[%d]\n", 
     2547                  (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe), 
     2548                  str_type(urb->pipe), urb_priv->isoc_packet_counter, 
     2549                  urb->number_of_packets, urb->status, 
     2550                  (unsigned int)activeUrbList[epid], 
     2551                  ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_num); 
     2552      } 
     2553 
     2554    } else { /* No other URB queued for this epid */ 
     2555      if(urb->status) { 
     2556        errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)" 
     2557                  " status:%d, no new URB waiting\n", 
     2558                  (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe), 
     2559                  str_type(urb->pipe), urb_priv->isoc_packet_counter, 
     2560                  urb->number_of_packets, urb->status); 
     2561      } 
     2562 
     2563      /* Check if EP is still enabled, then shut it down. */ 
     2564      if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) { 
     2565        isoc_dbg("Isoc EP enabled for epid:%d, disabling it\n", epid); 
     2566 
     2567        /* Should only occur for In Isoc EPs where SB isn't consumed. */ 
     2568        ASSERT(usb_pipein(urb->pipe)); 
     2569 
     2570        /* Disable it and wait for it to stop */ 
     2571        TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable); 
     2572         
     2573        /* Ah, the luxury of busy-wait. */ 
     2574        while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) && 
     2575              (timeout-- > 0)); 
     2576        if(timeout == 0) { 
     2577          warn("Timeout while waiting for DMA-TX-Isoc to leave EP for epid:%d\n", epid); 
     2578        } 
     2579      } 
     2580 
     2581      /* Unlink SB to say that epid is finished. */ 
     2582      TxIsocEPList[epid].sub = 0; 
     2583      TxIsocEPList[epid].hw_len = 0; 
     2584 
     2585      /* No URB active for EP anymore */ 
     2586      activeUrbList[epid] = NULL; 
     2587    } 
     2588  } else { /* Finishing of not active URB (queued up with SBs thought) */ 
     2589    isoc_warn("finish_isoc_urb (URB:0x%x %s) (%d of %d packets) status:%d," 
     2590              " SB queued but not active\n", 
     2591              (unsigned int)urb, str_dir(urb->pipe), 
     2592              urb_priv->isoc_packet_counter, urb->number_of_packets, 
     2593              urb->status); 
     2594    if(usb_pipeout(urb->pipe)) { 
     2595      /* Finishing of not yet active Out Isoc URB needs unlinking of SBs. */ 
     2596      struct USB_SB_Desc *iter_sb, *prev_sb, *next_sb; 
     2597 
     2598      iter_sb = TxIsocEPList[epid].sub ? 
     2599        phys_to_virt(TxIsocEPList[epid].sub) : 0; 
     2600      prev_sb = 0; 
     2601 
     2602      /* SB that is linked before this URBs first SB */ 
     2603      while (iter_sb && (iter_sb != urb_priv->first_sb)) { 
     2604        prev_sb = iter_sb; 
     2605        iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0; 
     2606      } 
     2607 
     2608      if (iter_sb == 0) { 
     2609        /* Unlink of the URB currently being transmitted. */ 
     2610        prev_sb = 0; 
     2611        iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0; 
     2612      } 
     2613 
     2614      while (iter_sb && (iter_sb != urb_priv->last_sb)) { 
     2615        iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0; 
     2616      } 
     2617 
     2618      if (iter_sb) { 
     2619        next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0; 
     2620      } else { 
     2621        /* This should only happen if the DMA has completed 
     2622           processing the SB list for this EP while interrupts 
     2623           are disabled. */ 
     2624        isoc_dbg("Isoc urb not found, already sent?\n"); 
     2625        next_sb = 0; 
     2626      } 
     2627      if (prev_sb) { 
     2628        prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0; 
     2629      } else { 
     2630        TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0; 
     2631      } 
     2632    } 
     2633  } 
     2634 
     2635  /* Free HC-private URB data*/ 
     2636  bandwidth = urb_priv->bandwidth; 
     2637  urb_priv_free(hcd, urb); 
     2638 
     2639  crisv10_usb_release_bandwidth(hcd, usb_pipeisoc(urb->pipe), bandwidth); 
     2640 
     2641  /*  Hand the URB from HCD to its USB device driver, using its completion 
     2642      functions */ 
     2643  usb_hcd_giveback_urb (hcd, urb, status); 
     2644} 
     2645 
     2646static __u32 urb_num = 0; 
     2647 
     2648/* allocate and initialize URB private data */ 
     2649static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid, 
     2650                           int mem_flags) { 
     2651  struct crisv10_urb_priv *urb_priv; 
     2652   
     2653  urb_priv = kmalloc(sizeof *urb_priv, mem_flags); 
     2654  if (!urb_priv) 
     2655    return -ENOMEM; 
     2656  memset(urb_priv, 0, sizeof *urb_priv); 
     2657 
     2658  urb_priv->epid = epid; 
     2659  urb_priv->urb_state = NOT_STARTED; 
     2660 
     2661  urb->hcpriv = urb_priv; 
     2662  /* Assign URB a sequence number, and increment counter */ 
     2663  urb_priv->urb_num = urb_num; 
     2664  urb_num++; 
     2665  urb_priv->bandwidth = 0; 
     2666  return 0; 
     2667} 
     2668 
     2669/* free URB private data */ 
     2670static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb) { 
     2671  int i; 
     2672  struct crisv10_urb_priv *urb_priv = urb->hcpriv; 
     2673  ASSERT(urb_priv != 0); 
     2674 
     2675  /* Check it has any SBs linked that needs to be freed*/ 
     2676  if(urb_priv->first_sb != NULL) { 
     2677    struct USB_SB_Desc *next_sb, *first_sb, *last_sb; 
     2678    int i = 0; 
     2679    first_sb = urb_priv->first_sb; 
     2680    last_sb = urb_priv->last_sb; 
     2681    ASSERT(last_sb); 
     2682    while(first_sb != last_sb) { 
     2683      next_sb = (struct USB_SB_Desc *)phys_to_virt(first_sb->next); 
     2684      kmem_cache_free(usb_desc_cache, first_sb); 
     2685      first_sb = next_sb; 
     2686      i++; 
     2687    } 
     2688    kmem_cache_free(usb_desc_cache, last_sb); 
     2689    i++; 
     2690  } 
     2691 
     2692  /* Check if it has any EPs in its Intr pool that also needs to be freed */ 
     2693  if(urb_priv->intr_ep_pool_length > 0) { 
     2694    for(i = 0; i < urb_priv->intr_ep_pool_length; i++) { 
     2695      kfree(urb_priv->intr_ep_pool[i]); 
     2696    } 
     2697    /* 
     2698    tc_dbg("Freed %d EPs from URB:0x%x EP pool\n", 
     2699             urb_priv->intr_ep_pool_length, (unsigned int)urb); 
     2700    */ 
     2701  } 
     2702 
     2703  kfree(urb_priv); 
     2704  urb->hcpriv = NULL; 
     2705} 
     2706 
     2707static int ep_priv_create(struct usb_host_endpoint *ep, int mem_flags) { 
     2708  struct crisv10_ep_priv *ep_priv; 
     2709   
     2710  ep_priv = kmalloc(sizeof *ep_priv, mem_flags); 
     2711  if (!ep_priv) 
     2712    return -ENOMEM; 
     2713  memset(ep_priv, 0, sizeof *ep_priv); 
     2714 
     2715  ep->hcpriv = ep_priv; 
     2716  return 0; 
     2717} 
     2718 
     2719static void ep_priv_free(struct usb_host_endpoint *ep) { 
     2720  struct crisv10_ep_priv *ep_priv = ep->hcpriv; 
     2721  ASSERT(ep_priv); 
     2722  kfree(ep_priv); 
     2723  ep->hcpriv = NULL; 
     2724} 
     2725 
     2726/* 
     2727 * usb_check_bandwidth(): 
     2728 * 
     2729 * old_alloc is from host_controller->bandwidth_allocated in microseconds; 
     2730 * bustime is from calc_bus_time(), but converted to microseconds. 
     2731 * 
     2732 * returns <bustime in us> if successful, 
     2733 * or -ENOSPC if bandwidth request fails. 
     2734 * 
     2735 * FIXME: 
     2736 * This initial implementation does not use Endpoint.bInterval 
     2737 * in managing bandwidth allocation. 
     2738 * It probably needs to be expanded to use Endpoint.bInterval. 
     2739 * This can be done as a later enhancement (correction). 
     2740 * 
     2741 * This will also probably require some kind of 
     2742 * frame allocation tracking...meaning, for example, 
     2743 * that if multiple drivers request interrupts every 10 USB frames, 
     2744 * they don't all have to be allocated at 
     2745 * frame numbers N, N+10, N+20, etc.  Some of them could be at 
     2746 * N+11, N+21, N+31, etc., and others at 
     2747 * N+12, N+22, N+32, etc. 
     2748 * 
     2749 * Similarly for isochronous transfers... 
     2750 * 
     2751 * Individual HCDs can schedule more directly ... this logic 
     2752 * is not correct for high speed transfers. 
     2753 */ 
     2754static int crisv10_usb_check_bandwidth( 
     2755  struct usb_device *dev, 
     2756  struct urb *urb) 
    3022757{ 
    303         int retval; 
    304  
    305         for (retval = 0; *ascii && utfmax > 1; utfmax -= 2, retval += 2) { 
    306                 *utf++ = *ascii++ & 0x7f; 
    307                 *utf++ = 0; 
    308         } 
    309         return retval; 
    310 } 
    311  
    312 static int usb_root_hub_string (int id, int serial, char *type, __u8 *data, int len) 
     2758  unsigned int  pipe = urb->pipe; 
     2759  long                                  bustime; 
     2760  int                                           is_in = usb_pipein (pipe); 
     2761  int                                           is_iso = usb_pipeisoc (pipe); 
     2762  int                                           old_alloc = dev->bus->bandwidth_allocated; 
     2763  int                                           new_alloc; 
     2764 
     2765  bustime = NS_TO_US (usb_calc_bus_time (dev->speed, is_in, is_iso, 
     2766                                         usb_maxpacket (dev, pipe, !is_in))); 
     2767  if (is_iso) 
     2768    bustime /= urb->number_of_packets; 
     2769 
     2770  new_alloc = old_alloc + (int) bustime; 
     2771  if (new_alloc > FRAME_TIME_MAX_USECS_ALLOC) { 
     2772    dev_dbg (&dev->dev, "usb_check_bandwidth FAILED: %d + %ld = %d usec\n", 
     2773             old_alloc, bustime, new_alloc); 
     2774    bustime = -ENOSPC;      /* report error */ 
     2775  } 
     2776 
     2777  return bustime; 
     2778} 
     2779 
     2780/** 
     2781 * usb_claim_bandwidth - records bandwidth for a periodic transfer 
     2782 * @dev: source/target of request 
     2783 * @urb: request (urb->dev == dev) 
     2784 * @bustime: bandwidth consumed, in (average) microseconds per frame 
     2785 * @isoc: true iff the request is isochronous 
     2786 * 
     2787 * HCDs are expected not to overcommit periodic bandwidth, and to record such 
     2788 * reservations whenever endpoints are added to the periodic schedule. 
     2789 * 
     2790 * FIXME averaging per-frame is suboptimal.  Better to sum over the HCD's 
     2791 * entire periodic schedule ... 32 frames for OHCI, 1024 for UHCI, settable 
     2792 * for EHCI (256/512/1024 frames, default 1024) and have the bus expose how 
     2793 * large its periodic schedule is. 
     2794 */ 
     2795static void crisv10_usb_claim_bandwidth( 
     2796  struct usb_device *dev, 
     2797  struct urb *urb, int bustime, int isoc) 
    3132798{ 
    314         char buf [30]; 
    315  
    316         // assert (len > (2 * (sizeof (buf) + 1))); 
    317         // assert (strlen (type) <= 8); 
    318  
    319         // language ids 
    320         if (id == 0) { 
    321                 *data++ = 4; *data++ = 3;       /* 4 bytes data */ 
    322                 *data++ = 0; *data++ = 0;       /* some language id */ 
    323                 return 4; 
    324  
    325         // serial number 
    326         } else if (id == 1) { 
    327                 sprintf (buf, "%x", serial); 
    328  
    329         // product description 
    330         } else if (id == 2) { 
    331                 sprintf (buf, "USB %s Root Hub", type); 
    332  
    333         // id 3 == vendor description 
    334  
    335         // unsupported IDs --> "stall" 
    336         } else 
    337             return 0; 
    338  
    339         data [0] = 2 + ascii2utf (buf, data + 2, len - 2); 
    340         data [1] = 3; 
    341         return data [0]; 
    342 } 
     2799  dev->bus->bandwidth_allocated += bustime; 
     2800  if (isoc) 
     2801    dev->bus->bandwidth_isoc_reqs++; 
     2802  else 
     2803    dev->bus->bandwidth_int_reqs++; 
     2804  struct crisv10_urb_priv *urb_priv; 
     2805  urb_priv = urb->hcpriv; 
     2806  urb_priv->bandwidth = bustime; 
     2807} 
     2808 
     2809/** 
     2810 * usb_release_bandwidth - reverses effect of usb_claim_bandwidth() 
     2811 * @hcd: host controller 
     2812 * @isoc: true iff the request is isochronous 
     2813 * @bandwidth: bandwidth returned 
     2814 * 
     2815 * This records that previously allocated bandwidth has been released. 
     2816 * Bandwidth is released when endpoints are removed from the host controller's 
     2817 * periodic schedule. 
     2818 */ 
     2819static void crisv10_usb_release_bandwidth( 
     2820  struct usb_hcd *hcd, 
     2821  int isoc, 
     2822  int bandwidth) 
     2823{ 
     2824  hcd_to_bus(hcd)->bandwidth_allocated -= bandwidth; 
     2825  if (isoc) 
     2826    hcd_to_bus(hcd)->bandwidth_isoc_reqs--; 
     2827  else 
     2828    hcd_to_bus(hcd)->bandwidth_int_reqs--; 
     2829} 
     2830 
     2831 
     2832/* EPID handling functions, managing EP-list in Etrax through wrappers */ 
     2833/* ------------------------------------------------------------------- */ 
     2834 
     2835/* Sets up a new EPID for an endpoint or returns existing if found */ 
     2836static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb, 
     2837                         int mem_flags) { 
     2838  int epid; 
     2839  char devnum, endpoint, out_traffic, slow; 
     2840  int maxlen; 
     2841  __u32 epid_data; 
     2842  struct crisv10_ep_priv *ep_priv = ep->hcpriv; 
     2843   
     2844  DBFENTER; 
     2845   
     2846  /* Check if a valid epid already is setup for this endpoint */ 
     2847  if(ep_priv != NULL) { 
     2848    return ep_priv->epid; 
     2849  } 
     2850 
     2851  /* We must find and initiate a new epid for this urb. */ 
     2852  epid = tc_allocate_epid(); 
     2853   
     2854  if (epid == -1) { 
     2855    /* Failed to allocate a new epid. */ 
     2856    DBFEXIT; 
     2857    return epid; 
     2858  } 
     2859   
     2860  /* We now have a new epid to use. Claim it. */ 
     2861  epid_state[epid].inuse = 1; 
     2862   
     2863  /* Init private data for new endpoint */ 
     2864  if(ep_priv_create(ep, mem_flags) != 0) { 
     2865    return -ENOMEM; 
     2866  } 
     2867  ep_priv = ep->hcpriv; 
     2868  ep_priv->epid = epid; 
     2869 
     2870  devnum = usb_pipedevice(urb->pipe); 
     2871  endpoint = usb_pipeendpoint(urb->pipe); 
     2872  slow = (urb->dev->speed == USB_SPEED_LOW); 
     2873  maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); 
     2874 
     2875  if (usb_pipetype(urb->pipe) == PIPE_CONTROL) { 
     2876    /* We want both IN and OUT control traffic to be put on the same 
     2877       EP/SB list. */ 
     2878    out_traffic = 1; 
     2879  } else { 
     2880    out_traffic = usb_pipeout(urb->pipe); 
     2881  } 
     2882     
     2883  if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 
     2884    epid_data = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) | 
     2885      /* FIXME: Change any to the actual port? */ 
     2886      IO_STATE(R_USB_EPT_DATA_ISO, port, any) | 
     2887      IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) | 
     2888      IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) | 
     2889      IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum); 
     2890    etrax_epid_iso_set(epid, epid_data); 
     2891  } else { 
     2892    epid_data = IO_STATE(R_USB_EPT_DATA, valid, yes) | 
     2893      IO_FIELD(R_USB_EPT_DATA, low_speed, slow) | 
     2894      /* FIXME: Change any to the actual port? */ 
     2895      IO_STATE(R_USB_EPT_DATA, port, any) | 
     2896      IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) | 
     2897      IO_FIELD(R_USB_EPT_DATA, ep, endpoint) | 
     2898      IO_FIELD(R_USB_EPT_DATA, dev, devnum); 
     2899    etrax_epid_set(epid, epid_data); 
     2900  } 
     2901   
     2902  epid_state[epid].out_traffic = out_traffic; 
     2903  epid_state[epid].type = usb_pipetype(urb->pipe); 
     2904 
     2905  tc_warn("Setting up ep:0x%x epid:%d (addr:%d endp:%d max_len:%d %s %s %s)\n", 
     2906          (unsigned int)ep, epid, devnum, endpoint, maxlen, 
     2907          str_type(urb->pipe), out_traffic ? "out" : "in", 
     2908          slow ? "low" : "full"); 
     2909 
     2910  /* Enable Isoc eof interrupt if we set up the first Isoc epid */ 
     2911  if(usb_pipeisoc(urb->pipe)) { 
     2912    isoc_epid_counter++; 
     2913    if(isoc_epid_counter == 1) { 
     2914      isoc_warn("Enabled Isoc eof interrupt\n"); 
     2915      *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set); 
     2916    } 
     2917  } 
     2918 
     2919  DBFEXIT; 
     2920  return epid; 
     2921} 
     2922 
     2923static void tc_free_epid(struct usb_host_endpoint *ep) { 
     2924  unsigned long flags; 
     2925  struct crisv10_ep_priv *ep_priv = ep->hcpriv; 
     2926  int epid; 
     2927  volatile int timeout = 10000; 
     2928 
     2929  DBFENTER; 
     2930 
     2931  if (ep_priv == NULL) { 
     2932    tc_warn("Trying to free unused epid on ep:0x%x\n", (unsigned int)ep); 
     2933    DBFEXIT; 
     2934    return; 
     2935  } 
     2936 
     2937  epid = ep_priv->epid; 
     2938 
     2939  /* Disable Isoc eof interrupt if we free the last Isoc epid */ 
     2940  if(epid_isoc(epid)) { 
     2941    ASSERT(isoc_epid_counter > 0); 
     2942    isoc_epid_counter--; 
     2943    if(isoc_epid_counter == 0) { 
     2944      *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, iso_eof, clr); 
     2945      isoc_warn("Disabled Isoc eof interrupt\n"); 
     2946    } 
     2947  } 
     2948 
     2949  /* Take lock manualy instead of in epid_x_x wrappers, 
     2950     because we need to be polling here */ 
     2951  spin_lock_irqsave(&etrax_epid_lock, flags); 
     2952   
     2953  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid); 
     2954  nop(); 
     2955  while((*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) && 
     2956        (timeout-- > 0)); 
     2957  if(timeout == 0) { 
     2958    warn("Timeout while waiting for epid:%d to drop hold\n", epid); 
     2959  } 
     2960  /* This will, among other things, set the valid field to 0. */ 
     2961  *R_USB_EPT_DATA = 0; 
     2962  spin_unlock_irqrestore(&etrax_epid_lock, flags); 
     2963   
     2964  /* Free resource in software state info list */ 
     2965  epid_state[epid].inuse = 0; 
     2966 
     2967  /* Free private endpoint data */ 
     2968  ep_priv_free(ep); 
     2969   
     2970  DBFEXIT; 
     2971} 
     2972 
     2973static int tc_allocate_epid(void) { 
     2974  int i; 
     2975  DBFENTER; 
     2976  for (i = 0; i < NBR_OF_EPIDS; i++) { 
     2977    if (!epid_inuse(i)) { 
     2978      DBFEXIT; 
     2979      return i; 
     2980    } 
     2981  } 
     2982   
     2983  tc_warn("Found no free epids\n"); 
     2984  DBFEXIT; 
     2985  return -1; 
     2986} 
     2987 
    3432988 
    3442989/* Wrappers around the list functions (include/linux/list.h). */ 
    345  
    346 static inline int urb_list_empty(int epid) 
    347 { 
    348         return list_empty(&urb_list[epid]); 
     2990/* ---------------------------------------------------------- */ 
     2991static inline int __urb_list_empty(int epid) { 
     2992  int retval; 
     2993  retval = list_empty(&urb_list[epid]); 
     2994  return retval; 
    3492995} 
    3502996 
    3512997/* Returns first urb for this epid, or NULL if list is empty. */ 
    352 static inline struct urb *urb_list_first(int epid) 
    353 { 
    354         struct urb *first_urb = 0; 
    355  
    356         if (!urb_list_empty(epid)) { 
    357                 /* Get the first urb (i.e. head->next). */ 
    358                 urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list); 
    359                 first_urb = urb_entry->urb; 
    360         } 
    361         return first_urb; 
     2998static inline struct urb *urb_list_first(int epid) { 
     2999  unsigned long flags; 
     3000  struct urb *first_urb = 0; 
     3001  spin_lock_irqsave(&urb_list_lock, flags); 
     3002  if (!__urb_list_empty(epid)) { 
     3003    /* Get the first urb (i.e. head->next). */ 
     3004    urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list); 
     3005    first_urb = urb_entry->urb; 
     3006  } 
     3007  spin_unlock_irqrestore(&urb_list_lock, flags); 
     3008  return first_urb; 
    3623009} 
    3633010 
    3643011/* Adds an urb_entry last in the list for this epid. */ 
    365 static inline void urb_list_add(struct urb *urb, int epid) 
    366 { 
    367         urb_entry_t *urb_entry = kmalloc(sizeof(urb_entry_t), KMALLOC_FLAG); 
    368         assert(urb_entry); 
    369  
    370         urb_entry->urb = urb; 
    371         list_add_tail(&urb_entry->list, &urb_list[epid]); 
     3012static inline void urb_list_add(struct urb *urb, int epid, int mem_flags) { 
     3013  unsigned long flags; 
     3014  urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), mem_flags); 
     3015  ASSERT(urb_entry); 
     3016   
     3017  urb_entry->urb = urb; 
     3018  spin_lock_irqsave(&urb_list_lock, flags); 
     3019  list_add_tail(&urb_entry->list, &urb_list[epid]); 
     3020  spin_unlock_irqrestore(&urb_list_lock, flags); 
    3723021} 
    3733022 
    3743023/* Search through the list for an element that contains this urb. (The list 
    3753024   is expected to be short and the one we are about to delete will often be 
    376    the first in the list.) */ 
    377 static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid) 
     3025   the first in the list.) 
     3026   Should be protected by spin_locks in calling function */ 
     3027static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid) { 
     3028  struct list_head *entry; 
     3029  struct list_head *tmp; 
     3030  urb_entry_t *urb_entry; 
     3031   
     3032  list_for_each_safe(entry, tmp, &urb_list[epid]) { 
     3033    urb_entry = list_entry(entry, urb_entry_t, list); 
     3034    ASSERT(urb_entry); 
     3035    ASSERT(urb_entry->urb); 
     3036     
     3037    if (urb_entry->urb == urb) { 
     3038      return urb_entry; 
     3039    } 
     3040  } 
     3041  return 0; 
     3042} 
     3043 
     3044/* Same function as above but for global use. Protects list by spinlock */ 
     3045static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid) { 
     3046  unsigned long flags; 
     3047  urb_entry_t *urb_entry; 
     3048  spin_lock_irqsave(&urb_list_lock, flags); 
     3049  urb_entry = __urb_list_entry(urb, epid); 
     3050  spin_unlock_irqrestore(&urb_list_lock, flags); 
     3051  return (urb_entry); 
     3052} 
     3053 
     3054/* Delete an urb from the list. */ 
     3055static inline void urb_list_del(struct urb *urb, int epid) { 
     3056  unsigned long flags; 
     3057  urb_entry_t *urb_entry; 
     3058 
     3059  /* Delete entry and free. */ 
     3060  spin_lock_irqsave(&urb_list_lock, flags); 
     3061  urb_entry = __urb_list_entry(urb, epid); 
     3062  ASSERT(urb_entry); 
     3063 
     3064  list_del(&urb_entry->list); 
     3065  spin_unlock_irqrestore(&urb_list_lock, flags); 
     3066  kfree(urb_entry); 
     3067} 
     3068 
     3069/* Move an urb to the end of the list. */ 
     3070static inline void urb_list_move_last(struct urb *urb, int epid) { 
     3071  unsigned long flags; 
     3072  urb_entry_t *urb_entry; 
     3073   
     3074  spin_lock_irqsave(&urb_list_lock, flags); 
     3075  urb_entry = __urb_list_entry(urb, epid); 
     3076  ASSERT(urb_entry); 
     3077 
     3078  list_del(&urb_entry->list); 
     3079  list_add_tail(&urb_entry->list, &urb_list[epid]); 
     3080  spin_unlock_irqrestore(&urb_list_lock, flags); 
     3081} 
     3082 
     3083/* Get the next urb in the list. */ 
     3084static inline struct urb *urb_list_next(struct urb *urb, int epid) { 
     3085  unsigned long flags; 
     3086  urb_entry_t *urb_entry; 
     3087 
     3088  spin_lock_irqsave(&urb_list_lock, flags); 
     3089  urb_entry = __urb_list_entry(urb, epid); 
     3090  ASSERT(urb_entry); 
     3091 
     3092  if (urb_entry->list.next != &urb_list[epid]) { 
     3093    struct list_head *elem = urb_entry->list.next; 
     3094    urb_entry = list_entry(elem, urb_entry_t, list); 
     3095    spin_unlock_irqrestore(&urb_list_lock, flags); 
     3096    return urb_entry->urb; 
     3097  } else { 
     3098    spin_unlock_irqrestore(&urb_list_lock, flags); 
     3099    return NULL; 
     3100  } 
     3101} 
     3102 
     3103struct USB_EP_Desc* create_ep(int epid, struct USB_SB_Desc* sb_desc, 
     3104                              int mem_flags) { 
     3105  struct USB_EP_Desc *ep_desc; 
     3106  ep_desc = (struct USB_EP_Desc *) kmem_cache_alloc(usb_desc_cache, mem_flags); 
     3107  if(ep_desc == NULL) 
     3108    return NULL; 
     3109  memset(ep_desc, 0, sizeof(struct USB_EP_Desc)); 
     3110 
     3111  ep_desc->hw_len = 0; 
     3112  ep_desc->command = (IO_FIELD(USB_EP_command, epid, epid) | 
     3113                      IO_STATE(USB_EP_command, enable, yes)); 
     3114  if(sb_desc == NULL) { 
     3115    ep_desc->sub = 0; 
     3116  } else { 
     3117    ep_desc->sub = virt_to_phys(sb_desc); 
     3118  } 
     3119  return ep_desc; 
     3120} 
     3121 
     3122#define TT_ZOUT  0 
     3123#define TT_IN    1 
     3124#define TT_OUT   2 
     3125#define TT_SETUP 3 
     3126 
     3127#define CMD_EOL  IO_STATE(USB_SB_command, eol, yes) 
     3128#define CMD_INTR IO_STATE(USB_SB_command, intr, yes) 
     3129#define CMD_FULL IO_STATE(USB_SB_command, full, yes) 
     3130 
     3131/* Allocation and setup of a generic SB. Used to create SETUP, OUT and ZOUT 
     3132   SBs. Also used by create_sb_in() to avoid same allocation procedure at two 
     3133   places */ 
     3134struct USB_SB_Desc* create_sb(struct USB_SB_Desc* sb_prev, int tt, void* data, 
     3135                              int datalen, int mem_flags) { 
     3136  struct USB_SB_Desc *sb_desc; 
     3137  sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags); 
     3138  if(sb_desc == NULL) 
     3139    return NULL; 
     3140  memset(sb_desc, 0, sizeof(struct USB_SB_Desc)); 
     3141 
     3142  sb_desc->command = IO_FIELD(USB_SB_command, tt, tt) | 
     3143                     IO_STATE(USB_SB_command, eot, yes); 
     3144 
     3145  sb_desc->sw_len = datalen; 
     3146  if(data != NULL) { 
     3147    sb_desc->buf = virt_to_phys(data); 
     3148  } else { 
     3149    sb_desc->buf = 0; 
     3150  } 
     3151  if(sb_prev != NULL) { 
     3152    sb_prev->next = virt_to_phys(sb_desc); 
     3153  } 
     3154  return sb_desc; 
     3155} 
     3156 
     3157/* Creates a copy of an existing SB by allocation space for it and copy 
     3158   settings */ 
     3159struct USB_SB_Desc* create_sb_copy(struct USB_SB_Desc* sb_orig, int mem_flags) { 
     3160  struct USB_SB_Desc *sb_desc; 
     3161  sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags); 
     3162  if(sb_desc == NULL) 
     3163    return NULL; 
     3164 
     3165  memcpy(sb_desc, sb_orig, sizeof(struct USB_SB_Desc)); 
     3166  return sb_desc; 
     3167} 
     3168 
     3169/* A specific create_sb function for creation of in SBs. This is due to 
     3170   that datalen in In SBs shows how many packets we are expecting. It also 
     3171   sets up the rem field to show if how many bytes we expect in last packet 
     3172   if it's not a full one */ 
     3173struct USB_SB_Desc* create_sb_in(struct USB_SB_Desc* sb_prev, int datalen, 
     3174                                 int maxlen, int mem_flags) { 
     3175  struct USB_SB_Desc *sb_desc; 
     3176  sb_desc = create_sb(sb_prev, TT_IN, NULL, 
     3177                      datalen ? (datalen - 1) / maxlen + 1 : 0, mem_flags); 
     3178  if(sb_desc == NULL) 
     3179    return NULL; 
     3180  sb_desc->command |= IO_FIELD(USB_SB_command, rem, datalen % maxlen); 
     3181  return sb_desc; 
     3182} 
     3183 
     3184void set_sb_cmds(struct USB_SB_Desc *sb_desc, __u16 flags) { 
     3185  sb_desc->command |= flags; 
     3186} 
     3187 
     3188int create_sb_for_urb(struct urb *urb, int mem_flags) { 
     3189  int is_out = !usb_pipein(urb->pipe); 
     3190  int type = usb_pipetype(urb->pipe); 
     3191  int maxlen = usb_maxpacket(urb->dev, urb->pipe, is_out); 
     3192  int buf_len = urb->transfer_buffer_length; 
     3193  void *buf = buf_len > 0 ? urb->transfer_buffer : NULL; 
     3194  struct USB_SB_Desc *sb_desc = NULL; 
     3195 
     3196  struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv; 
     3197  ASSERT(urb_priv != NULL); 
     3198 
     3199  switch(type) { 
     3200  case PIPE_CONTROL: 
     3201    /* Setup stage */ 
     3202    sb_desc = create_sb(NULL, TT_SETUP, urb->setup_packet, 8, mem_flags); 
     3203    if(sb_desc == NULL) 
     3204      return -ENOMEM; 
     3205    set_sb_cmds(sb_desc, CMD_FULL); 
     3206 
     3207    /* Attach first SB to URB */ 
     3208    urb_priv->first_sb = sb_desc;     
     3209 
     3210    if (is_out) { /* Out Control URB */ 
     3211      /* If this Control OUT transfer has an optional data stage we add 
     3212         an OUT token before the mandatory IN (status) token */ 
     3213      if ((buf_len > 0) && buf) { 
     3214        sb_desc = create_sb(sb_desc, TT_OUT, buf, buf_len, mem_flags); 
     3215        if(sb_desc == NULL) 
     3216          return -ENOMEM; 
     3217        set_sb_cmds(sb_desc, CMD_FULL); 
     3218      } 
     3219 
     3220      /* Status stage */ 
     3221      /* The data length has to be exactly 1. This is due to a requirement 
     3222         of the USB specification that a host must be prepared to receive 
     3223         data in the status phase */ 
     3224      sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags); 
     3225      if(sb_desc == NULL) 
     3226        return -ENOMEM; 
     3227    } else { /* In control URB */ 
     3228      /* Data stage */ 
     3229      sb_desc = create_sb_in(sb_desc, buf_len, maxlen, mem_flags); 
     3230      if(sb_desc == NULL) 
     3231        return -ENOMEM; 
     3232 
     3233      /* Status stage */ 
     3234      /* Read comment at zout_buffer declaration for an explanation to this. */ 
     3235      sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags); 
     3236      if(sb_desc == NULL) 
     3237        return -ENOMEM; 
     3238      /* Set descriptor interrupt flag for in URBs so we can finish URB after 
     3239         zout-packet has been sent */ 
     3240      set_sb_cmds(sb_desc, CMD_INTR | CMD_FULL); 
     3241    } 
     3242    /* Set end-of-list flag in last SB */ 
     3243    set_sb_cmds(sb_desc, CMD_EOL); 
     3244    /* Attach last SB to URB */ 
     3245    urb_priv->last_sb = sb_desc; 
     3246    break; 
     3247 
     3248  case PIPE_BULK: 
     3249    if (is_out) { /* Out Bulk URB */ 
     3250      sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags); 
     3251      if(sb_desc == NULL) 
     3252        return -ENOMEM; 
     3253      /* The full field is set to yes, even if we don't actually check that 
     3254         this is a full-length transfer (i.e., that transfer_buffer_length % 
     3255         maxlen = 0). 
     3256         Setting full prevents the USB controller from sending an empty packet 
     3257         in that case.  However, if URB_ZERO_PACKET was set we want that. */ 
     3258      if (!(urb->transfer_flags & URB_ZERO_PACKET)) { 
     3259        set_sb_cmds(sb_desc, CMD_FULL); 
     3260      } 
     3261    } else { /* In Bulk URB */ 
     3262      sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags); 
     3263      if(sb_desc == NULL) 
     3264        return -ENOMEM; 
     3265    } 
     3266    /* Set end-of-list flag for last SB */ 
     3267    set_sb_cmds(sb_desc, CMD_EOL); 
     3268 
     3269    /* Attach SB to URB */ 
     3270    urb_priv->first_sb = sb_desc; 
     3271    urb_priv->last_sb = sb_desc; 
     3272    break; 
     3273 
     3274  case PIPE_INTERRUPT: 
     3275    if(is_out) { /* Out Intr URB */ 
     3276      sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags); 
     3277      if(sb_desc == NULL) 
     3278        return -ENOMEM; 
     3279 
     3280      /* The full field is set to yes, even if we don't actually check that 
     3281         this is a full-length transfer (i.e., that transfer_buffer_length % 
     3282         maxlen = 0). 
     3283         Setting full prevents the USB controller from sending an empty packet 
     3284         in that case.  However, if URB_ZERO_PACKET was set we want that. */ 
     3285      if (!(urb->transfer_flags & URB_ZERO_PACKET)) { 
     3286        set_sb_cmds(sb_desc, CMD_FULL); 
     3287      } 
     3288      /* Only generate TX interrupt if it's a Out URB*/ 
     3289      set_sb_cmds(sb_desc, CMD_INTR); 
     3290 
     3291    } else { /* In Intr URB */ 
     3292      sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags); 
     3293      if(sb_desc == NULL) 
     3294        return -ENOMEM; 
     3295    } 
     3296    /* Set end-of-list flag for last SB */ 
     3297    set_sb_cmds(sb_desc, CMD_EOL); 
     3298 
     3299    /* Attach SB to URB */ 
     3300    urb_priv->first_sb = sb_desc; 
     3301    urb_priv->last_sb = sb_desc; 
     3302 
     3303    break; 
     3304  case PIPE_ISOCHRONOUS: 
     3305    if(is_out) { /* Out Isoc URB */ 
     3306      int i; 
     3307      if(urb->number_of_packets == 0) { 
     3308        tc_err("Can't create SBs for Isoc URB with zero packets\n"); 
     3309        return -EPIPE; 
     3310      } 
     3311      /* Create one SB descriptor for each packet and link them together. */ 
     3312      for(i = 0; i < urb->number_of_packets; i++) { 
     3313        if (urb->iso_frame_desc[i].length > 0) { 
     3314 
     3315          sb_desc = create_sb(sb_desc, TT_OUT, urb->transfer_buffer + 
     3316                              urb->iso_frame_desc[i].offset, 
     3317                              urb->iso_frame_desc[i].length, mem_flags); 
     3318          if(sb_desc == NULL) 
     3319            return -ENOMEM; 
     3320 
     3321          /* Check if it's a full length packet */ 
     3322          if (urb->iso_frame_desc[i].length == 
     3323              usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) { 
     3324            set_sb_cmds(sb_desc, CMD_FULL); 
     3325          } 
     3326           
     3327        } else { /* zero length packet */ 
     3328          sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags); 
     3329          if(sb_desc == NULL) 
     3330            return -ENOMEM; 
     3331          set_sb_cmds(sb_desc, CMD_FULL); 
     3332        } 
     3333        /* Attach first SB descriptor to URB */ 
     3334        if (i == 0) { 
     3335          urb_priv->first_sb = sb_desc; 
     3336        } 
     3337      } 
     3338      /* Set interrupt and end-of-list flags in last SB */ 
     3339      set_sb_cmds(sb_desc, CMD_INTR | CMD_EOL); 
     3340      /* Attach last SB descriptor to URB */ 
     3341      urb_priv->last_sb = sb_desc; 
     3342      tc_dbg("Created %d out SBs for Isoc URB:0x%x\n", 
     3343               urb->number_of_packets, (unsigned int)urb); 
     3344    } else { /* In Isoc URB */ 
     3345      /* Actual number of packets is not relevant for periodic in traffic as 
     3346         long as it is more than zero.  Set to 1 always. */ 
     3347      sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags); 
     3348      if(sb_desc == NULL) 
     3349        return -ENOMEM; 
     3350      /* Set end-of-list flags for SB */ 
     3351      set_sb_cmds(sb_desc, CMD_EOL); 
     3352 
     3353      /* Attach SB to URB */ 
     3354      urb_priv->first_sb = sb_desc; 
     3355      urb_priv->last_sb = sb_desc; 
     3356    } 
     3357    break; 
     3358  default: 
     3359    tc_err("Unknown pipe-type\n"); 
     3360    return -EPIPE; 
     3361    break; 
     3362  } 
     3363  return 0; 
     3364} 
     3365 
     3366int init_intr_urb(struct urb *urb, int mem_flags) { 
     3367  struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv; 
     3368  struct USB_EP_Desc* ep_desc; 
     3369  int interval; 
     3370  int i; 
     3371  int ep_count; 
     3372 
     3373  ASSERT(urb_priv != NULL); 
     3374  ASSERT(usb_pipeint(urb->pipe)); 
     3375  /* We can't support interval longer than amount of eof descriptors in 
     3376     TxIntrEPList */ 
     3377  if(urb->interval > MAX_INTR_INTERVAL) { 
     3378    tc_err("Interrupt interval %dms too big (max: %dms)\n", urb->interval, 
     3379           MAX_INTR_INTERVAL); 
     3380    return -EINVAL; 
     3381  } 
     3382 
     3383  /* We assume that the SB descriptors already have been setup */ 
     3384  ASSERT(urb_priv->first_sb != NULL); 
     3385 
     3386  /* Round of the interval to 2^n, it is obvious that this code favours 
     3387     smaller numbers, but that is actually a good thing */ 
     3388  /* FIXME: The "rounding error" for larger intervals will be quite 
     3389     large. For in traffic this shouldn't be a problem since it will only 
     3390     mean that we "poll" more often. */ 
     3391  interval = urb->interval; 
     3392  for (i = 0; interval; i++) { 
     3393    interval = interval >> 1; 
     3394  } 
     3395  urb_priv->interval = 1 << (i - 1); 
     3396 
     3397  /* We can only have max interval for Out Interrupt due to that we can only 
     3398     handle one linked in EP for a certain epid in the Intr descr array at the 
     3399     time. The USB Controller in the Etrax 100LX continues to process Intr EPs 
     3400     so we have no way of knowing which one that caused the actual transfer if 
     3401     we have several linked in. */ 
     3402  if(usb_pipeout(urb->pipe)) { 
     3403    urb_priv->interval = MAX_INTR_INTERVAL; 
     3404  } 
     3405 
     3406  /* Calculate amount of EPs needed */ 
     3407  ep_count = MAX_INTR_INTERVAL / urb_priv->interval; 
     3408 
     3409  for(i = 0; i < ep_count; i++) { 
     3410    ep_desc = create_ep(urb_priv->epid, urb_priv->first_sb, mem_flags); 
     3411    if(ep_desc == NULL) { 
     3412      /* Free any descriptors that we may have allocated before failure */ 
     3413      while(i > 0) { 
     3414        i--; 
     3415        kfree(urb_priv->intr_ep_pool[i]); 
     3416      } 
     3417      return -ENOMEM; 
     3418    } 
     3419    urb_priv->intr_ep_pool[i] = ep_desc; 
     3420  } 
     3421  urb_priv->intr_ep_pool_length = ep_count; 
     3422  return 0; 
     3423} 
     3424 
     3425/* DMA RX/TX functions */ 
     3426/* ----------------------- */ 
     3427 
     3428static void tc_dma_init_rx_list(void) { 
     3429  int i; 
     3430 
     3431  /* Setup descriptor list except last one */ 
     3432  for (i = 0; i < (NBR_OF_RX_DESC - 1); i++) { 
     3433    RxDescList[i].sw_len = RX_DESC_BUF_SIZE; 
     3434    RxDescList[i].command = 0; 
     3435    RxDescList[i].next = virt_to_phys(&RxDescList[i + 1]); 
     3436    RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE)); 
     3437    RxDescList[i].hw_len = 0; 
     3438    RxDescList[i].status = 0; 
     3439     
     3440    /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as 
     3441       USB_IN_Desc for the relevant fields.) */ 
     3442    prepare_rx_descriptor((struct etrax_dma_descr*)&RxDescList[i]); 
     3443     
     3444  } 
     3445  /* Special handling of last descriptor */ 
     3446  RxDescList[i].sw_len = RX_DESC_BUF_SIZE; 
     3447  RxDescList[i].command = IO_STATE(USB_IN_command, eol, yes); 
     3448  RxDescList[i].next = virt_to_phys(&RxDescList[0]); 
     3449  RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE)); 
     3450  RxDescList[i].hw_len = 0; 
     3451  RxDescList[i].status = 0; 
     3452   
     3453  /* Setup list pointers that show progress in list */ 
     3454  myNextRxDesc = &RxDescList[0]; 
     3455  myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1]; 
     3456   
     3457  flush_etrax_cache(); 
     3458  /* Point DMA to first descriptor in list and start it */ 
     3459  *R_DMA_CH9_FIRST = virt_to_phys(myNextRxDesc); 
     3460  *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, start); 
     3461} 
     3462 
     3463 
     3464static void tc_dma_init_tx_bulk_list(void) { 
     3465  int i; 
     3466  volatile struct USB_EP_Desc *epDescr; 
     3467 
     3468  for (i = 0; i < (NBR_OF_EPIDS - 1); i++) { 
     3469    epDescr = &(TxBulkEPList[i]); 
     3470    CHECK_ALIGN(epDescr); 
     3471    epDescr->hw_len = 0; 
     3472    epDescr->command = IO_FIELD(USB_EP_command, epid, i); 
     3473    epDescr->sub = 0; 
     3474    epDescr->next = virt_to_phys(&TxBulkEPList[i + 1]); 
     3475 
     3476    /* Initiate two EPs, disabled and with the eol flag set. No need for any 
     3477       preserved epid. */ 
     3478     
     3479    /* The first one has the intr flag set so we get an interrupt when the DMA 
     3480       channel is about to become disabled. */ 
     3481    CHECK_ALIGN(&TxBulkDummyEPList[i][0]); 
     3482    TxBulkDummyEPList[i][0].hw_len = 0; 
     3483    TxBulkDummyEPList[i][0].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) | 
     3484                                       IO_STATE(USB_EP_command, eol, yes) | 
     3485                                       IO_STATE(USB_EP_command, intr, yes)); 
     3486    TxBulkDummyEPList[i][0].sub = 0; 
     3487    TxBulkDummyEPList[i][0].next = virt_to_phys(&TxBulkDummyEPList[i][1]); 
     3488     
     3489    /* The second one. */ 
     3490    CHECK_ALIGN(&TxBulkDummyEPList[i][1]); 
     3491    TxBulkDummyEPList[i][1].hw_len = 0; 
     3492    TxBulkDummyEPList[i][1].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) | 
     3493                                       IO_STATE(USB_EP_command, eol, yes)); 
     3494    TxBulkDummyEPList[i][1].sub = 0; 
     3495    /* The last dummy's next pointer is the same as the current EP's next pointer. */ 
     3496    TxBulkDummyEPList[i][1].next = virt_to_phys(&TxBulkEPList[i + 1]); 
     3497  } 
     3498 
     3499  /* Special handling of last descr in list, make list circular */ 
     3500  epDescr = &TxBulkEPList[i]; 
     3501  CHECK_ALIGN(epDescr); 
     3502  epDescr->hw_len = 0; 
     3503  epDescr->command = IO_STATE(USB_EP_command, eol, yes) | 
     3504    IO_FIELD(USB_EP_command, epid, i); 
     3505  epDescr->sub = 0; 
     3506  epDescr->next = virt_to_phys(&TxBulkEPList[0]); 
     3507   
     3508  /* Init DMA sub-channel pointers to last item in each list */ 
     3509  *R_DMA_CH8_SUB0_EP = virt_to_phys(&TxBulkEPList[i]); 
     3510  /* No point in starting the bulk channel yet. 
     3511   *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */ 
     3512} 
     3513 
     3514static void tc_dma_init_tx_ctrl_list(void) { 
     3515  int i; 
     3516  volatile struct USB_EP_Desc *epDescr; 
     3517 
     3518  for (i = 0; i < (NBR_OF_EPIDS - 1); i++) { 
     3519    epDescr = &(TxCtrlEPList[i]); 
     3520    CHECK_ALIGN(epDescr); 
     3521    epDescr->hw_len = 0; 
     3522    epDescr->command = IO_FIELD(USB_EP_command, epid, i); 
     3523    epDescr->sub = 0; 
     3524    epDescr->next = virt_to_phys(&TxCtrlEPList[i + 1]); 
     3525  } 
     3526  /* Special handling of last descr in list, make list circular */ 
     3527  epDescr = &TxCtrlEPList[i]; 
     3528  CHECK_ALIGN(epDescr); 
     3529  epDescr->hw_len = 0; 
     3530  epDescr->command = IO_STATE(USB_EP_command, eol, yes) | 
     3531    IO_FIELD(USB_EP_command, epid, i); 
     3532  epDescr->sub = 0; 
     3533  epDescr->next = virt_to_phys(&TxCtrlEPList[0]); 
     3534   
     3535  /* Init DMA sub-channel pointers to last item in each list */ 
     3536  *R_DMA_CH8_SUB1_EP = virt_to_phys(&TxCtrlEPList[i]); 
     3537  /* No point in starting the ctrl channel yet. 
     3538   *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */ 
     3539} 
     3540 
     3541 
     3542static void tc_dma_init_tx_intr_list(void) { 
     3543  int i; 
     3544 
     3545  TxIntrSB_zout.sw_len = 1; 
     3546  TxIntrSB_zout.next = 0; 
     3547  TxIntrSB_zout.buf = virt_to_phys(&zout_buffer[0]); 
     3548  TxIntrSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) | 
     3549                           IO_STATE(USB_SB_command, tt, zout) | 
     3550                           IO_STATE(USB_SB_command, full, yes) | 
     3551                           IO_STATE(USB_SB_command, eot, yes) | 
     3552                           IO_STATE(USB_SB_command, eol, yes)); 
     3553   
     3554  for (i = 0; i < (MAX_INTR_INTERVAL - 1); i++) { 
     3555    CHECK_ALIGN(&TxIntrEPList[i]); 
     3556    TxIntrEPList[i].hw_len = 0; 
     3557    TxIntrEPList[i].command = 
     3558      (IO_STATE(USB_EP_command, eof, yes) | 
     3559       IO_STATE(USB_EP_command, enable, yes) | 
     3560       IO_FIELD(USB_EP_command, epid, INVALID_EPID)); 
     3561    TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout); 
     3562    TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[i + 1]); 
     3563  } 
     3564 
     3565  /* Special handling of last descr in list, make list circular */ 
     3566  CHECK_ALIGN(&TxIntrEPList[i]); 
     3567  TxIntrEPList[i].hw_len = 0; 
     3568  TxIntrEPList[i].command = 
     3569    (IO_STATE(USB_EP_command, eof, yes) | 
     3570     IO_STATE(USB_EP_command, eol, yes) | 
     3571     IO_STATE(USB_EP_command, enable, yes) | 
     3572     IO_FIELD(USB_EP_command, epid, INVALID_EPID)); 
     3573  TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout); 
     3574  TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[0]); 
     3575 
     3576  intr_dbg("Initiated Intr EP descriptor list\n"); 
     3577 
     3578 
     3579  /* Connect DMA 8 sub-channel 2 to first in list */ 
     3580  *R_DMA_CH8_SUB2_EP = virt_to_phys(&TxIntrEPList[0]); 
     3581} 
     3582 
     3583static void tc_dma_init_tx_isoc_list(void) { 
     3584  int i; 
     3585 
     3586  DBFENTER; 
     3587 
     3588  /* Read comment at zout_buffer declaration for an explanation to this. */ 
     3589  TxIsocSB_zout.sw_len = 1; 
     3590  TxIsocSB_zout.next = 0; 
     3591  TxIsocSB_zout.buf = virt_to_phys(&zout_buffer[0]); 
     3592  TxIsocSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) | 
     3593                           IO_STATE(USB_SB_command, tt, zout) | 
     3594                           IO_STATE(USB_SB_command, full, yes) | 
     3595                           IO_STATE(USB_SB_command, eot, yes) | 
     3596                           IO_STATE(USB_SB_command, eol, yes)); 
     3597 
     3598  /* The last isochronous EP descriptor is a dummy. */ 
     3599  for (i = 0; i < (NBR_OF_EPIDS - 1); i++) { 
     3600    CHECK_ALIGN(&TxIsocEPList[i]); 
     3601    TxIsocEPList[i].hw_len = 0; 
     3602    TxIsocEPList[i].command = IO_FIELD(USB_EP_command, epid, i); 
     3603    TxIsocEPList[i].sub = 0; 
     3604    TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[i + 1]); 
     3605  } 
     3606 
     3607  CHECK_ALIGN(&TxIsocEPList[i]); 
     3608  TxIsocEPList[i].hw_len = 0; 
     3609 
     3610  /* Must enable the last EP descr to get eof interrupt. */ 
     3611  TxIsocEPList[i].command = (IO_STATE(USB_EP_command, enable, yes) | 
     3612                             IO_STATE(USB_EP_command, eof, yes) | 
     3613                             IO_STATE(USB_EP_command, eol, yes) | 
     3614                             IO_FIELD(USB_EP_command, epid, INVALID_EPID)); 
     3615  TxIsocEPList[i].sub = virt_to_phys(&TxIsocSB_zout); 
     3616  TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[0]); 
     3617 
     3618  *R_DMA_CH8_SUB3_EP = virt_to_phys(&TxIsocEPList[0]); 
     3619  *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start); 
     3620} 
     3621 
     3622static int tc_dma_init(struct usb_hcd *hcd) { 
     3623  tc_dma_init_rx_list(); 
     3624  tc_dma_init_tx_bulk_list(); 
     3625  tc_dma_init_tx_ctrl_list(); 
     3626  tc_dma_init_tx_intr_list(); 
     3627  tc_dma_init_tx_isoc_list(); 
     3628 
     3629  if (cris_request_dma(USB_TX_DMA_NBR, 
     3630                       "ETRAX 100LX built-in USB (Tx)", 
     3631                       DMA_VERBOSE_ON_ERROR, 
     3632                       dma_usb)) { 
     3633    err("Could not allocate DMA ch 8 for USB"); 
     3634    return -EBUSY; 
     3635  } 
     3636         
     3637  if (cris_request_dma(USB_RX_DMA_NBR, 
     3638                       "ETRAX 100LX built-in USB (Rx)", 
     3639                       DMA_VERBOSE_ON_ERROR, 
     3640                       dma_usb)) { 
     3641    err("Could not allocate DMA ch 9 for USB"); 
     3642    return -EBUSY; 
     3643  } 
     3644 
     3645  *R_IRQ_MASK2_SET = 
     3646    /* Note that these interrupts are not used. */ 
     3647    IO_STATE(R_IRQ_MASK2_SET, dma8_sub0_descr, set) | 
     3648    /* Sub channel 1 (ctrl) descr. interrupts are used. */ 
     3649    IO_STATE(R_IRQ_MASK2_SET, dma8_sub1_descr, set) | 
     3650    IO_STATE(R_IRQ_MASK2_SET, dma8_sub2_descr, set) | 
     3651    /* Sub channel 3 (isoc) descr. interrupts are used. */ 
     3652    IO_STATE(R_IRQ_MASK2_SET, dma8_sub3_descr, set); 
     3653   
     3654  /* Note that the dma9_descr interrupt is not used. */ 
     3655  *R_IRQ_MASK2_SET = 
     3656    IO_STATE(R_IRQ_MASK2_SET, dma9_eop, set) | 
     3657    IO_STATE(R_IRQ_MASK2_SET, dma9_descr, set); 
     3658 
     3659  if (request_irq(ETRAX_USB_RX_IRQ, tc_dma_rx_interrupt, 0, 
     3660                  "ETRAX 100LX built-in USB (Rx)", hcd)) { 
     3661    err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ); 
     3662    return -EBUSY; 
     3663  } 
     3664   
     3665  if (request_irq(ETRAX_USB_TX_IRQ, tc_dma_tx_interrupt, 0, 
     3666                  "ETRAX 100LX built-in USB (Tx)", hcd)) { 
     3667    err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ); 
     3668    return -EBUSY; 
     3669  } 
     3670 
     3671  return 0; 
     3672} 
     3673 
     3674static void tc_dma_destroy(void) { 
     3675  free_irq(ETRAX_USB_RX_IRQ, NULL); 
     3676  free_irq(ETRAX_USB_TX_IRQ, NULL); 
     3677 
     3678  cris_free_dma(USB_TX_DMA_NBR, "ETRAX 100LX built-in USB (Tx)"); 
     3679  cris_free_dma(USB_RX_DMA_NBR, "ETRAX 100LX built-in USB (Rx)"); 
     3680 
     3681} 
     3682 
     3683static void tc_dma_link_intr_urb(struct urb *urb); 
     3684 
     3685/* Handle processing of Bulk, Ctrl and Intr queues */ 
     3686static void tc_dma_process_queue(int epid) { 
     3687  struct urb *urb; 
     3688  struct crisv10_urb_priv *urb_priv; 
     3689  unsigned long flags; 
     3690  char toggle; 
     3691 
     3692  if(epid_state[epid].disabled) { 
     3693    /* Don't process any URBs on a disabled endpoint */ 
     3694    return; 
     3695  } 
     3696 
     3697  /* Do not disturb us while fiddling with EPs and epids */ 
     3698  local_irq_save(flags); 
     3699 
     3700  /* For bulk, Ctrl and Intr can we only have one URB active at a time for 
     3701     a specific EP. */ 
     3702  if(activeUrbList[epid] != NULL) { 
     3703    /* An URB is already active on EP, skip checking queue */ 
     3704    local_irq_restore(flags); 
     3705    return; 
     3706  } 
     3707 
     3708  urb = urb_list_first(epid); 
     3709  if(urb == NULL) { 
     3710    /* No URB waiting in EP queue. Nothing do to */ 
     3711    local_irq_restore(flags); 
     3712    return; 
     3713  } 
     3714 
     3715  urb_priv = urb->hcpriv; 
     3716  ASSERT(urb_priv != NULL); 
     3717  ASSERT(urb_priv->urb_state == NOT_STARTED); 
     3718  ASSERT(!usb_pipeisoc(urb->pipe)); 
     3719 
     3720  /* Remove this URB from the queue and move it to active */ 
     3721  activeUrbList[epid] = urb; 
     3722  urb_list_del(urb, epid); 
     3723 
     3724  urb_priv->urb_state = STARTED; 
     3725 
     3726  /* Reset error counters (regardless of which direction this traffic is). */ 
     3727  etrax_epid_clear_error(epid); 
     3728 
     3729  /* Special handling of Intr EP lists */ 
     3730  if(usb_pipeint(urb->pipe)) { 
     3731    tc_dma_link_intr_urb(urb); 
     3732    local_irq_restore(flags); 
     3733    return; 
     3734  } 
     3735 
     3736  /* Software must preset the toggle bits for Bulk and Ctrl */ 
     3737  if(usb_pipecontrol(urb->pipe)) { 
     3738    /* Toggle bits are initialized only during setup transaction in a 
     3739       CTRL transfer */ 
     3740    etrax_epid_set_toggle(epid, 0, 0); 
     3741    etrax_epid_set_toggle(epid, 1, 0); 
     3742  } else { 
     3743    toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), 
     3744                           usb_pipeout(urb->pipe)); 
     3745    etrax_epid_set_toggle(epid, usb_pipeout(urb->pipe), toggle); 
     3746  } 
     3747 
     3748  tc_dbg("Added SBs from (URB:0x%x %s %s) to epid %d: %s\n", 
     3749         (unsigned int)urb, str_dir(urb->pipe), str_type(urb->pipe), epid, 
     3750         sblist_to_str(urb_priv->first_sb)); 
     3751 
     3752  /* We start the DMA sub channel without checking if it's running or not, 
     3753     because: 
     3754     1) If it's already running, issuing the start command is a nop. 
     3755     2) We avoid a test-and-set race condition. */ 
     3756  switch(usb_pipetype(urb->pipe)) { 
     3757  case PIPE_BULK: 
     3758    /* Assert that the EP descriptor is disabled. */ 
     3759    ASSERT(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable))); 
     3760 
     3761    /* Set up and enable the EP descriptor. */ 
     3762    TxBulkEPList[epid].sub = virt_to_phys(urb_priv->first_sb); 
     3763    TxBulkEPList[epid].hw_len = 0; 
     3764    TxBulkEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes); 
     3765 
     3766    /* Check if the dummy list is already with us (if several urbs were queued). */ 
     3767    if (usb_pipein(urb->pipe) && (TxBulkEPList[epid].next != virt_to_phys(&TxBulkDummyEPList[epid][0]))) { 
     3768      tc_dbg("Inviting dummy list to the party for urb 0x%lx, epid %d",  
     3769             (unsigned long)urb, epid); 
     3770       
     3771      /* We don't need to check if the DMA is at this EP or not before changing the 
     3772         next pointer, since we will do it in one 32-bit write (EP descriptors are 
     3773         32-bit aligned). */ 
     3774      TxBulkEPList[epid].next = virt_to_phys(&TxBulkDummyEPList[epid][0]); 
     3775    } 
     3776 
     3777    restart_dma8_sub0(); 
     3778 
     3779    /* Update/restart the bulk start timer since we just started the channel.*/ 
     3780    mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL); 
     3781    /* Update/restart the bulk eot timer since we just inserted traffic. */ 
     3782    mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL); 
     3783    break; 
     3784  case PIPE_CONTROL: 
     3785    /* Assert that the EP descriptor is disabled. */ 
     3786    ASSERT(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable))); 
     3787 
     3788    /* Set up and enable the EP descriptor. */ 
     3789    TxCtrlEPList[epid].sub = virt_to_phys(urb_priv->first_sb); 
     3790    TxCtrlEPList[epid].hw_len = 0; 
     3791    TxCtrlEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes); 
     3792 
     3793    *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start); 
     3794    break; 
     3795  } 
     3796  local_irq_restore(flags); 
     3797} 
     3798 
     3799static void tc_dma_link_intr_urb(struct urb *urb) { 
     3800  struct crisv10_urb_priv *urb_priv = urb->hcpriv; 
     3801  volatile struct USB_EP_Desc *tmp_ep; 
     3802  struct USB_EP_Desc *ep_desc; 
     3803  int i = 0, epid; 
     3804  int pool_idx = 0; 
     3805 
     3806  ASSERT(urb_priv != NULL); 
     3807  epid = urb_priv->epid; 
     3808  ASSERT(urb_priv->interval > 0); 
     3809  ASSERT(urb_priv->intr_ep_pool_length > 0); 
     3810 
     3811  tmp_ep = &TxIntrEPList[0]; 
     3812 
     3813  /* Only insert one EP descriptor in list for Out Intr URBs. 
     3814     We can only handle Out Intr with interval of 128ms because 
     3815     it's not possible to insert several Out Intr EPs because they 
     3816     are not consumed by the DMA. */ 
     3817  if(usb_pipeout(urb->pipe)) { 
     3818    ep_desc = urb_priv->intr_ep_pool[0]; 
     3819    ASSERT(ep_desc); 
     3820    ep_desc->next = tmp_ep->next; 
     3821    tmp_ep->next = virt_to_phys(ep_desc); 
     3822    i++; 
     3823  } else { 
     3824    /* Loop through Intr EP descriptor list and insert EP for URB at 
     3825       specified interval */ 
     3826    do { 
     3827      /* Each EP descriptor with eof flag sat signals a new frame */ 
     3828      if (tmp_ep->command & IO_MASK(USB_EP_command, eof)) { 
     3829        /* Insert a EP from URBs EP pool at correct interval */ 
     3830        if ((i % urb_priv->interval) == 0) { 
     3831          ep_desc = urb_priv->intr_ep_pool[pool_idx]; 
     3832          ASSERT(ep_desc); 
     3833          ep_desc->next = tmp_ep->next; 
     3834          tmp_ep->next = virt_to_phys(ep_desc); 
     3835          pool_idx++; 
     3836          ASSERT(pool_idx <= urb_priv->intr_ep_pool_length); 
     3837        } 
     3838        i++; 
     3839      } 
     3840      tmp_ep = (struct USB_EP_Desc *)phys_to_virt(tmp_ep->next); 
     3841    } while(tmp_ep != &TxIntrEPList[0]); 
     3842  } 
     3843 
     3844  intr_dbg("Added SBs to intr epid %d: %s interval:%d (%d EP)\n", epid, 
     3845           sblist_to_str(urb_priv->first_sb), urb_priv->interval, pool_idx); 
     3846 
     3847  /* We start the DMA sub channel without checking if it's running or not, 
     3848     because: 
     3849     1) If it's already running, issuing the start command is a nop. 
     3850     2) We avoid a test-and-set race condition. */ 
     3851  *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start); 
     3852} 
     3853 
     3854static void tc_dma_process_isoc_urb(struct urb *urb) { 
     3855  unsigned long flags; 
     3856  struct crisv10_urb_priv *urb_priv = urb->hcpriv; 
     3857  int epid; 
     3858 
     3859  /* Do not disturb us while fiddling with EPs and epids */ 
     3860  local_irq_save(flags); 
     3861 
     3862  ASSERT(urb_priv); 
     3863  ASSERT(urb_priv->first_sb); 
     3864  epid = urb_priv->epid; 
     3865 
     3866  if(activeUrbList[epid] == NULL) { 
     3867    /* EP is idle, so make this URB active */ 
     3868    activeUrbList[epid] = urb; 
     3869    urb_list_del(urb, epid); 
     3870    ASSERT(TxIsocEPList[epid].sub == 0); 
     3871    ASSERT(!(TxIsocEPList[epid].command & 
     3872             IO_STATE(USB_EP_command, enable, yes))); 
     3873 
     3874    /* Differentiate between In and Out Isoc. Because In SBs are not consumed*/ 
     3875    if(usb_pipein(urb->pipe)) { 
     3876    /* Each EP for In Isoc will have only one SB descriptor, setup when 
     3877       submitting the first active urb. We do it here by copying from URBs 
     3878       pre-allocated SB. */ 
     3879      memcpy((void *)&(TxIsocSBList[epid]), urb_priv->first_sb, 
     3880             sizeof(TxIsocSBList[epid])); 
     3881      TxIsocEPList[epid].hw_len = 0; 
     3882      TxIsocEPList[epid].sub = virt_to_phys(&(TxIsocSBList[epid])); 
     3883    } else { 
     3884      /* For Out Isoc we attach the pre-allocated list of SBs for the URB */ 
     3885      TxIsocEPList[epid].hw_len = 0; 
     3886      TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb); 
     3887 
     3888      isoc_dbg("Attached first URB:0x%x[%d] to epid:%d first_sb:0x%x" 
     3889               " last_sb::0x%x\n", 
     3890               (unsigned int)urb, urb_priv->urb_num, epid, 
     3891               (unsigned int)(urb_priv->first_sb), 
     3892               (unsigned int)(urb_priv->last_sb)); 
     3893    } 
     3894 
     3895    if (urb->transfer_flags & URB_ISO_ASAP) { 
     3896      /* The isoc transfer should be started as soon as possible. The 
     3897         start_frame field is a return value if URB_ISO_ASAP was set. Comparing 
     3898         R_USB_FM_NUMBER with a USB Chief trace shows that the first isoc IN 
     3899         token is sent 2 frames later. I'm not sure how this affects usage of 
     3900         the start_frame field by the device driver, or how it affects things 
     3901         when USB_ISO_ASAP is not set, so therefore there's no compensation for 
     3902         the 2 frame "lag" here. */ 
     3903      urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff); 
     3904      TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes); 
     3905      urb_priv->urb_state = STARTED; 
     3906      isoc_dbg("URB_ISO_ASAP set, urb->start_frame set to %d\n", 
     3907               urb->start_frame); 
     3908    } else { 
     3909      /* Not started yet. */ 
     3910      urb_priv->urb_state = NOT_STARTED; 
     3911      isoc_warn("urb_priv->urb_state set to NOT_STARTED for URB:0x%x\n", 
     3912                (unsigned int)urb); 
     3913    } 
     3914 
     3915  } else { 
     3916    /* An URB is already active on the EP. Leave URB in queue and let 
     3917       finish_isoc_urb process it after current active URB */ 
     3918    ASSERT(TxIsocEPList[epid].sub != 0); 
     3919 
     3920    if(usb_pipein(urb->pipe)) { 
     3921      /* Because there already is a active In URB on this epid we do nothing 
     3922         and the finish_isoc_urb() function will handle switching to next URB*/ 
     3923 
     3924    } else { /* For Out Isoc, insert new URBs traffic last in SB-list. */ 
     3925      struct USB_SB_Desc *temp_sb_desc; 
     3926 
     3927      /* Set state STARTED to all Out Isoc URBs added to SB list because we 
     3928         don't know how many of them that are finished before descr interrupt*/ 
     3929      urb_priv->urb_state = STARTED; 
     3930 
     3931      /* Find end of current SB list by looking for SB with eol flag sat */ 
     3932      temp_sb_desc = phys_to_virt(TxIsocEPList[epid].sub); 
     3933      while ((temp_sb_desc->command & IO_MASK(USB_SB_command, eol)) != 
     3934             IO_STATE(USB_SB_command, eol, yes)) { 
     3935        ASSERT(temp_sb_desc->next); 
     3936        temp_sb_desc = phys_to_virt(temp_sb_desc->next); 
     3937      } 
     3938 
     3939      isoc_dbg("Appended URB:0x%x[%d] (first:0x%x last:0x%x) to epid:%d" 
     3940               " sub:0x%x eol:0x%x\n", 
     3941               (unsigned int)urb, urb_priv->urb_num, 
     3942               (unsigned int)(urb_priv->first_sb), 
     3943               (unsigned int)(urb_priv->last_sb), epid, 
     3944               (unsigned int)phys_to_virt(TxIsocEPList[epid].sub), 
     3945               (unsigned int)temp_sb_desc); 
     3946 
     3947      /* Next pointer must be set before eol is removed. */ 
     3948      temp_sb_desc->next = virt_to_phys(urb_priv->first_sb); 
     3949      /* Clear the previous end of list flag since there is a new in the 
     3950         added SB descriptor list. */ 
     3951      temp_sb_desc->command &= ~IO_MASK(USB_SB_command, eol); 
     3952 
     3953      if (!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) { 
     3954        __u32 epid_data; 
     3955        /* 8.8.5 in Designer's Reference says we should check for and correct 
     3956           any errors in the EP here.  That should not be necessary if 
     3957           epid_attn is handled correctly, so we assume all is ok. */ 
     3958        epid_data = etrax_epid_iso_get(epid); 
     3959        if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) != 
     3960            IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) { 
     3961          isoc_err("Disabled Isoc EP with error:%d on epid:%d when appending" 
     3962                   " URB:0x%x[%d]\n", 
     3963                   IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data), epid, 
     3964                   (unsigned int)urb, urb_priv->urb_num); 
     3965        } 
     3966 
     3967        /* The SB list was exhausted. */ 
     3968        if (virt_to_phys(urb_priv->last_sb) != TxIsocEPList[epid].sub) { 
     3969          /* The new sublist did not get processed before the EP was 
     3970             disabled.  Setup the EP again. */ 
     3971 
     3972          if(virt_to_phys(temp_sb_desc) == TxIsocEPList[epid].sub) { 
     3973            isoc_dbg("EP for epid:%d stoped at SB:0x%x before newly inserted" 
     3974                     ", restarting from this URBs SB:0x%x\n", 
     3975                     epid, (unsigned int)temp_sb_desc, 
     3976                     (unsigned int)(urb_priv->first_sb)); 
     3977            TxIsocEPList[epid].hw_len = 0; 
     3978            TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb); 
     3979            urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff); 
     3980            /* Enable the EP again so data gets processed this time */ 
     3981            TxIsocEPList[epid].command |= 
     3982              IO_STATE(USB_EP_command, enable, yes); 
     3983 
     3984          } else { 
     3985            /* The EP has been disabled but not at end this URB (god knows 
     3986               where). This should generate an epid_attn so we should not be 
     3987               here */ 
     3988            isoc_warn("EP was disabled on sb:0x%x before SB list for" 
     3989                     " URB:0x%x[%d] got processed\n", 
     3990                     (unsigned int)phys_to_virt(TxIsocEPList[epid].sub), 
     3991                     (unsigned int)urb, urb_priv->urb_num); 
     3992          } 
     3993        } else { 
     3994          /* This might happend if we are slow on this function and isn't 
     3995             an error. */ 
     3996          isoc_dbg("EP was disabled and finished with SBs from appended" 
     3997                   " URB:0x%x[%d]\n", (unsigned int)urb, urb_priv->urb_num); 
     3998        } 
     3999      } 
     4000    } 
     4001  } 
     4002   
     4003  /* Start the DMA sub channel */ 
     4004  *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start); 
     4005 
     4006  local_irq_restore(flags); 
     4007} 
     4008 
     4009static void tc_dma_unlink_intr_urb(struct urb *urb) { 
     4010  struct crisv10_urb_priv *urb_priv = urb->hcpriv; 
     4011  volatile struct USB_EP_Desc *first_ep;  /* First EP in the list. */ 
     4012  volatile struct USB_EP_Desc *curr_ep;   /* Current EP, the iterator. */ 
     4013  volatile struct USB_EP_Desc *next_ep;   /* The EP after current. */ 
     4014  volatile struct USB_EP_Desc *unlink_ep; /* The one we should remove from 
     4015                                             the list. */ 
     4016  int count = 0; 
     4017  volatile int timeout = 10000; 
     4018  int epid; 
     4019 
     4020  /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the 
     4021     List". */ 
     4022  ASSERT(urb_priv); 
     4023  ASSERT(urb_priv->intr_ep_pool_length > 0); 
     4024  epid = urb_priv->epid; 
     4025 
     4026  /* First disable all Intr EPs belonging to epid for this URB */ 
     4027  first_ep = &TxIntrEPList[0]; 
     4028  curr_ep = first_ep; 
     4029  do { 
     4030    next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next); 
     4031    if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) { 
     4032      /* Disable EP */ 
     4033      next_ep->command &= ~IO_MASK(USB_EP_command, enable); 
     4034    } 
     4035    curr_ep = phys_to_virt(curr_ep->next); 
     4036  } while (curr_ep != first_ep); 
     4037 
     4038 
     4039  /* Now unlink all EPs belonging to this epid from Descr list */ 
     4040  first_ep = &TxIntrEPList[0]; 
     4041  curr_ep = first_ep; 
     4042  do { 
     4043    next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next); 
     4044    if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) { 
     4045      /* This is the one we should unlink. */ 
     4046      unlink_ep = next_ep; 
     4047 
     4048      /* Actually unlink the EP from the DMA list. */ 
     4049      curr_ep->next = unlink_ep->next; 
     4050 
     4051      /* Wait until the DMA is no longer at this descriptor. */ 
     4052      while((*R_DMA_CH8_SUB2_EP == virt_to_phys(unlink_ep)) && 
     4053            (timeout-- > 0)); 
     4054      if(timeout == 0) { 
     4055        warn("Timeout while waiting for DMA-TX-Intr to leave unlink EP\n"); 
     4056      } 
     4057       
     4058      count++; 
     4059    } 
     4060    curr_ep = phys_to_virt(curr_ep->next); 
     4061  } while (curr_ep != first_ep); 
     4062 
     4063  if(count != urb_priv->intr_ep_pool_length) { 
     4064    intr_warn("Unlinked %d of %d Intr EPs for URB:0x%x[%d]\n", count, 
     4065              urb_priv->intr_ep_pool_length, (unsigned int)urb, 
     4066              urb_priv->urb_num); 
     4067  } else { 
     4068    intr_dbg("Unlinked %d of %d interrupt EPs for URB:0x%x\n", count, 
     4069             urb_priv->intr_ep_pool_length, (unsigned int)urb); 
     4070  } 
     4071} 
     4072 
     4073static void check_finished_bulk_tx_epids(struct usb_hcd *hcd, 
     4074                                                    int timer) { 
     4075  unsigned long flags; 
     4076  int epid; 
     4077  struct urb *urb; 
     4078  struct crisv10_urb_priv * urb_priv; 
     4079  __u32 epid_data; 
     4080 
     4081  /* Protect TxEPList */ 
     4082  local_irq_save(flags); 
     4083 
     4084  for (epid = 0; epid < NBR_OF_EPIDS; epid++) { 
     4085    /* A finished EP descriptor is disabled and has a valid sub pointer */ 
     4086    if (!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) && 
     4087        (TxBulkEPList[epid].sub != 0)) { 
     4088 
     4089      /* Get the active URB for this epid */ 
     4090      urb = activeUrbList[epid]; 
     4091      /* Sanity checks */ 
     4092      ASSERT(urb); 
     4093      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv; 
     4094      ASSERT(urb_priv); 
     4095       
     4096      /* Only handle finished out Bulk EPs here, 
     4097         and let RX interrupt take care of the rest */ 
     4098      if(!epid_out_traffic(epid)) { 
     4099        continue; 
     4100      } 
     4101 
     4102      if(timer) { 
     4103        tc_warn("Found finished %s Bulk epid:%d URB:0x%x[%d] from timeout\n", 
     4104                epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb, 
     4105                urb_priv->urb_num); 
     4106      } else { 
     4107        tc_dbg("Found finished %s Bulk epid:%d URB:0x%x[%d] from interrupt\n", 
     4108               epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb, 
     4109               urb_priv->urb_num); 
     4110      } 
     4111 
     4112      if(urb_priv->urb_state == UNLINK) { 
     4113        /* This Bulk URB is requested to be unlinked, that means that the EP 
     4114           has been disabled and we might not have sent all data */ 
     4115        tc_finish_urb(hcd, urb, urb->status); 
     4116        continue; 
     4117      } 
     4118 
     4119      ASSERT(urb_priv->urb_state == STARTED); 
     4120      if (phys_to_virt(TxBulkEPList[epid].sub) != urb_priv->last_sb) { 
     4121        tc_err("Endpoint got disabled before reaching last sb\n"); 
     4122      } 
     4123         
     4124      epid_data = etrax_epid_get(epid); 
     4125      if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) == 
     4126          IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) { 
     4127        /* This means that the endpoint has no error, is disabled 
     4128           and had inserted traffic, i.e. transfer successfully completed. */ 
     4129        tc_finish_urb(hcd, urb, 0); 
     4130      } else { 
     4131        /* Shouldn't happen. We expect errors to be caught by epid 
     4132           attention. */ 
     4133        tc_err("Found disabled bulk EP desc (epid:%d error:%d)\n", 
     4134               epid, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data)); 
     4135      } 
     4136    } else { 
     4137      tc_dbg("Ignoring In Bulk epid:%d, let RX interrupt handle it\n", epid); 
     4138    } 
     4139  } 
     4140 
     4141  local_irq_restore(flags); 
     4142} 
     4143 
     4144static void check_finished_ctrl_tx_epids(struct usb_hcd *hcd) { 
     4145  unsigned long flags; 
     4146  int epid; 
     4147  struct urb *urb; 
     4148  struct crisv10_urb_priv * urb_priv; 
     4149  __u32 epid_data; 
     4150 
     4151  /* Protect TxEPList */ 
     4152  local_irq_save(flags); 
     4153 
     4154  for (epid = 0; epid < NBR_OF_EPIDS; epid++) { 
     4155    if(epid == DUMMY_EPID) 
     4156      continue; 
     4157 
     4158    /* A finished EP descriptor is disabled and has a valid sub pointer */ 
     4159    if (!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) && 
     4160        (TxCtrlEPList[epid].sub != 0)) { 
     4161       
     4162      /* Get the active URB for this epid */ 
     4163      urb = activeUrbList[epid]; 
     4164 
     4165      if(urb == NULL) { 
     4166        tc_warn("Found finished Ctrl epid:%d with no active URB\n", epid); 
     4167        continue; 
     4168      } 
     4169       
     4170      /* Sanity checks */ 
     4171      ASSERT(usb_pipein(urb->pipe)); 
     4172      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv; 
     4173      ASSERT(urb_priv); 
     4174      if (phys_to_virt(TxCtrlEPList[epid].sub) != urb_priv->last_sb) { 
     4175        tc_err("Endpoint got disabled before reaching last sb\n"); 
     4176      } 
     4177 
     4178      epid_data = etrax_epid_get(epid); 
     4179      if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) == 
     4180          IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) { 
     4181        /* This means that the endpoint has no error, is disabled 
     4182           and had inserted traffic, i.e. transfer successfully completed. */ 
     4183 
     4184        /* Check if RX-interrupt for In Ctrl has been processed before 
     4185           finishing the URB */ 
     4186        if(urb_priv->ctrl_rx_done) { 
     4187          tc_dbg("Finishing In Ctrl URB:0x%x[%d] in tx_interrupt\n", 
     4188                 (unsigned int)urb, urb_priv->urb_num); 
     4189          tc_finish_urb(hcd, urb, 0); 
     4190        } else { 
     4191          /* If we get zout descriptor interrupt before RX was done for a 
     4192             In Ctrl transfer, then we flag that and it will be finished 
     4193             in the RX-Interrupt */ 
     4194          urb_priv->ctrl_zout_done = 1; 
     4195          tc_dbg("Got zout descr interrupt before RX interrupt\n"); 
     4196        } 
     4197      } else { 
     4198        /* Shouldn't happen. We expect errors to be caught by epid 
     4199           attention. */ 
     4200        tc_err("Found disabled Ctrl EP desc (epid:%d URB:0x%x[%d]) error_code:%d\n", epid, (unsigned int)urb, urb_priv->urb_num, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data)); 
     4201        __dump_ep_desc(&(TxCtrlEPList[epid])); 
     4202        __dump_ept_data(epid); 
     4203      }       
     4204    } 
     4205  } 
     4206  local_irq_restore(flags); 
     4207} 
     4208 
     4209/* This function goes through all epids that are setup for Out Isoc transfers 
     4210   and marks (isoc_out_done) all queued URBs that the DMA has finished 
     4211   transfer for. 
     4212   No URB completetion is done here to make interrupt routine return quickly. 
     4213   URBs are completed later with help of complete_isoc_bottom_half() that 
     4214   becomes schedules when this functions is finished. */ 
     4215static void check_finished_isoc_tx_epids(void) { 
     4216  unsigned long flags; 
     4217  int epid; 
     4218  struct urb *urb; 
     4219  struct crisv10_urb_priv * urb_priv; 
     4220  struct USB_SB_Desc* sb_desc; 
     4221  int epid_done; 
     4222 
     4223  /* Protect TxIsocEPList */ 
     4224  local_irq_save(flags); 
     4225 
     4226  for (epid = 0; epid < NBR_OF_EPIDS; epid++) { 
     4227    if (TxIsocEPList[epid].sub == 0 || epid == INVALID_EPID || 
     4228        !epid_out_traffic(epid)) { 
     4229      /* Nothing here to see. */ 
     4230      continue; 
     4231    } 
     4232    ASSERT(epid_inuse(epid)); 
     4233    ASSERT(epid_isoc(epid)); 
     4234 
     4235    sb_desc = phys_to_virt(TxIsocEPList[epid].sub); 
     4236    /* Find the last descriptor of the currently active URB for this ep. 
     4237       This is the first descriptor in the sub list marked for a descriptor 
     4238       interrupt. */ 
     4239    while (sb_desc && !IO_EXTRACT(USB_SB_command, intr, sb_desc->command)) { 
     4240      sb_desc = sb_desc->next ? phys_to_virt(sb_desc->next) : 0; 
     4241    } 
     4242    ASSERT(sb_desc); 
     4243 
     4244    isoc_dbg("Descr IRQ checking epid:%d sub:0x%x intr:0x%x\n", 
     4245             epid, (unsigned int)phys_to_virt(TxIsocEPList[epid].sub), 
     4246             (unsigned int)sb_desc); 
     4247 
     4248    urb = activeUrbList[epid]; 
     4249    if(urb == NULL) { 
     4250      isoc_err("Isoc Descr irq on epid:%d with no active URB\n", epid); 
     4251      continue; 
     4252    } 
     4253 
     4254    epid_done = 0; 
     4255    while(urb && !epid_done) { 
     4256      /* Sanity check. */ 
     4257      ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS); 
     4258      ASSERT(usb_pipeout(urb->pipe)); 
     4259       
     4260      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv; 
     4261      ASSERT(urb_priv); 
     4262      ASSERT(urb_priv->urb_state == STARTED || 
     4263             urb_priv->urb_state == UNLINK); 
     4264       
     4265      if (sb_desc != urb_priv->last_sb) { 
     4266        /* This urb has been sent. */ 
     4267        urb_priv->isoc_out_done = 1; 
     4268 
     4269      } else { /* Found URB that has last_sb as the interrupt reason */ 
     4270 
     4271        /* Check if EP has been disabled, meaning that all transfers are done*/ 
     4272        if(!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) { 
     4273          ASSERT((sb_desc->command & IO_MASK(USB_SB_command, eol)) == 
     4274                 IO_STATE(USB_SB_command, eol, yes)); 
     4275          ASSERT(sb_desc->next == 0); 
     4276          urb_priv->isoc_out_done = 1; 
     4277        } else { 
     4278          isoc_dbg("Skipping URB:0x%x[%d] because EP not disabled yet\n", 
     4279                   (unsigned int)urb, urb_priv->urb_num); 
     4280        } 
     4281        /* Stop looking any further in queue */ 
     4282        epid_done = 1;   
     4283      } 
     4284 
     4285      if (!epid_done) { 
     4286        if(urb == activeUrbList[epid]) { 
     4287          urb = urb_list_first(epid); 
     4288        } else { 
     4289          urb = urb_list_next(urb, epid); 
     4290        } 
     4291      } 
     4292    } /* END: while(urb && !epid_done) */ 
     4293  } 
     4294 
     4295  local_irq_restore(flags); 
     4296} 
     4297 
     4298 
     4299/* This is where the Out Isoc URBs are realy completed. This function is 
     4300   scheduled from tc_dma_tx_interrupt() when one or more Out Isoc transfers 
     4301   are done. This functions completes all URBs earlier marked with 
     4302   isoc_out_done by fast interrupt routine check_finished_isoc_tx_epids() */ 
     4303 
     4304static void complete_isoc_bottom_half(struct work_struct* work) { 
     4305  struct crisv10_isoc_complete_data *comp_data; 
     4306  struct usb_iso_packet_descriptor *packet; 
     4307  struct crisv10_urb_priv * urb_priv; 
     4308  unsigned long flags; 
     4309  struct urb* urb; 
     4310  int epid_done; 
     4311  int epid; 
     4312  int i; 
     4313 
     4314  comp_data = container_of(work, struct crisv10_isoc_complete_data, usb_bh); 
     4315 
     4316  local_irq_save(flags); 
     4317 
     4318  for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) { 
     4319    if(!epid_inuse(epid) || !epid_isoc(epid) || !epid_out_traffic(epid) || epid == DUMMY_EPID) { 
     4320      /* Only check valid Out Isoc epids */ 
     4321      continue; 
     4322    } 
     4323 
     4324    isoc_dbg("Isoc bottom-half checking epid:%d, sub:0x%x\n", epid, 
     4325             (unsigned int)phys_to_virt(TxIsocEPList[epid].sub)); 
     4326 
     4327    /* The descriptor interrupt handler has marked all transmitted Out Isoc 
     4328       URBs with isoc_out_done.  Now we traverse all epids and for all that 
     4329       have out Isoc traffic we traverse its URB list and complete the 
     4330       transmitted URBs. */ 
     4331    epid_done = 0; 
     4332    while (!epid_done) { 
     4333 
     4334      /* Get the active urb (if any) */ 
     4335      urb = activeUrbList[epid]; 
     4336      if (urb == 0) { 
     4337        isoc_dbg("No active URB on epid:%d anymore\n", epid); 
     4338        epid_done = 1; 
     4339        continue; 
     4340      } 
     4341 
     4342      /* Sanity check. */ 
     4343      ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS); 
     4344      ASSERT(usb_pipeout(urb->pipe)); 
     4345 
     4346      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv; 
     4347      ASSERT(urb_priv); 
     4348 
     4349      if (!(urb_priv->isoc_out_done)) { 
     4350        /* We have reached URB that isn't flaged done yet, stop traversing. */ 
     4351        isoc_dbg("Stoped traversing Out Isoc URBs on epid:%d" 
     4352                 " before not yet flaged URB:0x%x[%d]\n", 
     4353                 epid, (unsigned int)urb, urb_priv->urb_num); 
     4354        epid_done = 1; 
     4355        continue; 
     4356      } 
     4357 
     4358      /* This urb has been sent. */ 
     4359      isoc_dbg("Found URB:0x%x[%d] that is flaged isoc_out_done\n", 
     4360               (unsigned int)urb, urb_priv->urb_num); 
     4361 
     4362      /* Set ok on transfered packets for this URB and finish it */ 
     4363      for (i = 0; i < urb->number_of_packets; i++) { 
     4364        packet = &urb->iso_frame_desc[i]; 
     4365        packet->status = 0; 
     4366        packet->actual_length = packet->length; 
     4367      } 
     4368      urb_priv->isoc_packet_counter = urb->number_of_packets; 
     4369      tc_finish_urb(comp_data->hcd, urb, 0); 
     4370 
     4371    } /* END: while(!epid_done) */ 
     4372  } /* END: for(epid...) */ 
     4373 
     4374  local_irq_restore(flags); 
     4375  kmem_cache_free(isoc_compl_cache, comp_data); 
     4376} 
     4377 
     4378 
     4379static void check_finished_intr_tx_epids(struct usb_hcd *hcd) { 
     4380  unsigned long flags; 
     4381  int epid; 
     4382  struct urb *urb; 
     4383  struct crisv10_urb_priv * urb_priv; 
     4384  volatile struct USB_EP_Desc *curr_ep;   /* Current EP, the iterator. */ 
     4385  volatile struct USB_EP_Desc *next_ep;   /* The EP after current. */ 
     4386 
     4387  /* Protect TxintrEPList */ 
     4388  local_irq_save(flags); 
     4389 
     4390  for (epid = 0; epid < NBR_OF_EPIDS; epid++) { 
     4391    if(!epid_inuse(epid) || !epid_intr(epid) || !epid_out_traffic(epid)) { 
     4392      /* Nothing to see on this epid. Only check valid Out Intr epids */ 
     4393      continue; 
     4394    } 
     4395 
     4396    urb = activeUrbList[epid]; 
     4397    if(urb == 0) { 
     4398      intr_warn("Found Out Intr epid:%d with no active URB\n", epid); 
     4399      continue; 
     4400    } 
     4401 
     4402    /* Sanity check. */ 
     4403    ASSERT(usb_pipetype(urb->pipe) == PIPE_INTERRUPT); 
     4404    ASSERT(usb_pipeout(urb->pipe)); 
     4405     
     4406    urb_priv = (struct crisv10_urb_priv *)urb->hcpriv; 
     4407    ASSERT(urb_priv); 
     4408 
     4409    /* Go through EPs between first and second sof-EP. It's here Out Intr EPs 
     4410       are inserted.*/ 
     4411    curr_ep = &TxIntrEPList[0]; 
     4412    do { 
     4413      next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next); 
     4414      if(next_ep == urb_priv->intr_ep_pool[0]) { 
     4415        /* We found the Out Intr EP for this epid */ 
     4416         
     4417        /* Disable it so it doesn't get processed again */ 
     4418        next_ep->command &= ~IO_MASK(USB_EP_command, enable); 
     4419 
     4420        /* Finish the active Out Intr URB with status OK */ 
     4421        tc_finish_urb(hcd, urb, 0); 
     4422      } 
     4423      curr_ep = phys_to_virt(curr_ep->next); 
     4424    } while (curr_ep != &TxIntrEPList[1]); 
     4425 
     4426  } 
     4427  local_irq_restore(flags); 
     4428} 
     4429 
     4430/* Interrupt handler for DMA8/IRQ24 with subchannels (called from hardware intr) */ 
     4431static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc) { 
     4432  struct usb_hcd *hcd = (struct usb_hcd*)vhc; 
     4433  ASSERT(hcd); 
     4434 
     4435  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub0_descr)) { 
     4436    /* Clear this interrupt */ 
     4437    *R_DMA_CH8_SUB0_CLR_INTR = IO_STATE(R_DMA_CH8_SUB0_CLR_INTR, clr_descr, do); 
     4438    restart_dma8_sub0(); 
     4439  } 
     4440 
     4441  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub1_descr)) { 
     4442    /* Clear this interrupt */ 
     4443    *R_DMA_CH8_SUB1_CLR_INTR = IO_STATE(R_DMA_CH8_SUB1_CLR_INTR, clr_descr, do); 
     4444    check_finished_ctrl_tx_epids(hcd); 
     4445  } 
     4446 
     4447  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub2_descr)) { 
     4448    /* Clear this interrupt */ 
     4449    *R_DMA_CH8_SUB2_CLR_INTR = IO_STATE(R_DMA_CH8_SUB2_CLR_INTR, clr_descr, do); 
     4450    check_finished_intr_tx_epids(hcd); 
     4451  } 
     4452 
     4453  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub3_descr)) { 
     4454    struct crisv10_isoc_complete_data* comp_data; 
     4455 
     4456    /* Flag done Out Isoc for later completion */ 
     4457    check_finished_isoc_tx_epids(); 
     4458 
     4459    /* Clear this interrupt */ 
     4460    *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do); 
     4461    /* Schedule bottom half of Out Isoc completion function. This function 
     4462       finishes the URBs marked with isoc_out_done */ 
     4463    comp_data = (struct crisv10_isoc_complete_data*) 
     4464      kmem_cache_alloc(isoc_compl_cache, GFP_ATOMIC); 
     4465    ASSERT(comp_data != NULL); 
     4466    comp_data ->hcd = hcd; 
     4467 
     4468    INIT_WORK(&comp_data->usb_bh, complete_isoc_bottom_half); 
     4469    schedule_work(&comp_data->usb_bh); 
     4470  } 
     4471 
     4472  return IRQ_HANDLED; 
     4473} 
     4474 
     4475/* Interrupt handler for DMA9/IRQ25 (called from hardware intr) */ 
     4476static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc) { 
     4477  unsigned long flags; 
     4478  struct urb *urb; 
     4479  struct usb_hcd *hcd = (struct usb_hcd*)vhc; 
     4480  struct crisv10_urb_priv *urb_priv; 
     4481  int epid = 0; 
     4482  int real_error; 
     4483 
     4484  ASSERT(hcd); 
     4485 
     4486  /* Clear this interrupt. */ 
     4487  *R_DMA_CH9_CLR_INTR = IO_STATE(R_DMA_CH9_CLR_INTR, clr_eop, do); 
     4488 
     4489  /* Custom clear interrupt for this interrupt */ 
     4490  /* The reason we cli here is that we call the driver's callback functions. */ 
     4491  local_irq_save(flags); 
     4492 
     4493  /* Note that this while loop assumes that all packets span only 
     4494     one rx descriptor. */ 
     4495  while(myNextRxDesc->status & IO_MASK(USB_IN_status, eop)) { 
     4496    epid = IO_EXTRACT(USB_IN_status, epid, myNextRxDesc->status); 
     4497    /* Get the active URB for this epid */ 
     4498    urb = activeUrbList[epid]; 
     4499 
     4500    ASSERT(epid_inuse(epid)); 
     4501    if (!urb) { 
     4502      dma_err("No urb for epid %d in rx interrupt\n", epid); 
     4503      goto skip_out; 
     4504    } 
     4505 
     4506    /* Check if any errors on epid */ 
     4507    real_error = 0; 
     4508    if (myNextRxDesc->status & IO_MASK(USB_IN_status, error)) { 
     4509      __u32 r_usb_ept_data; 
     4510 
     4511      if (usb_pipeisoc(urb->pipe)) { 
     4512        r_usb_ept_data = etrax_epid_iso_get(epid); 
     4513        if((r_usb_ept_data & IO_MASK(R_USB_EPT_DATA_ISO, valid)) && 
     4514           (IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data) == 0) && 
     4515           (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata))) { 
     4516          /* Not an error, just a failure to receive an expected iso 
     4517             in packet in this frame.  This is not documented 
     4518             in the designers reference. Continue processing. 
     4519          */ 
     4520        } else real_error = 1; 
     4521      } else real_error = 1; 
     4522    } 
     4523 
     4524    if(real_error) { 
     4525      dma_err("Error in RX descr on epid:%d for URB 0x%x", 
     4526              epid, (unsigned int)urb); 
     4527      dump_ept_data(epid); 
     4528      dump_in_desc(myNextRxDesc); 
     4529      goto skip_out; 
     4530    } 
     4531 
     4532    urb_priv = (struct crisv10_urb_priv *)urb->hcpriv; 
     4533    ASSERT(urb_priv); 
     4534    ASSERT(urb_priv->urb_state == STARTED || 
     4535           urb_priv->urb_state == UNLINK); 
     4536 
     4537    if ((usb_pipetype(urb->pipe) == PIPE_BULK) || 
     4538        (usb_pipetype(urb->pipe) == PIPE_CONTROL) || 
     4539        (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) { 
     4540 
     4541      /* We get nodata for empty data transactions, and the rx descriptor's 
     4542         hw_len field is not valid in that case. No data to copy in other 
     4543         words. */ 
     4544      if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) { 
     4545        /* No data to copy */ 
     4546      } else { 
     4547        /* 
     4548        dma_dbg("Processing RX for URB:0x%x epid:%d (data:%d ofs:%d)\n", 
     4549                (unsigned int)urb, epid, myNextRxDesc->hw_len, 
     4550                urb_priv->rx_offset); 
     4551        */ 
     4552        /* Only copy data if URB isn't flaged to be unlinked*/ 
     4553        if(urb_priv->urb_state != UNLINK) { 
     4554          /* Make sure the data fits in the buffer. */ 
     4555          if(urb_priv->rx_offset + myNextRxDesc->hw_len 
     4556             <= urb->transfer_buffer_length) { 
     4557 
     4558            /* Copy the data to URBs buffer */ 
     4559            memcpy(urb->transfer_buffer + urb_priv->rx_offset, 
     4560                   phys_to_virt(myNextRxDesc->buf), myNextRxDesc->hw_len); 
     4561            urb_priv->rx_offset += myNextRxDesc->hw_len; 
     4562          } else { 
     4563            /* Signal overflow when returning URB */ 
     4564            urb->status = -EOVERFLOW; 
     4565            tc_finish_urb_later(hcd, urb, urb->status); 
     4566          } 
     4567        } 
     4568      } 
     4569 
     4570      /* Check if it was the last packet in the transfer */ 
     4571      if (myNextRxDesc->status & IO_MASK(USB_IN_status, eot)) { 
     4572        /* Special handling for In Ctrl URBs. */ 
     4573        if(usb_pipecontrol(urb->pipe) && usb_pipein(urb->pipe) && 
     4574           !(urb_priv->ctrl_zout_done)) { 
     4575          /* Flag that RX part of Ctrl transfer is done. Because zout descr 
     4576             interrupt hasn't happend yet will the URB be finished in the 
     4577             TX-Interrupt. */ 
     4578          urb_priv->ctrl_rx_done = 1; 
     4579          tc_dbg("Not finishing In Ctrl URB:0x%x from rx_interrupt, waiting" 
     4580                 " for zout\n", (unsigned int)urb); 
     4581        } else { 
     4582          tc_finish_urb(hcd, urb, 0); 
     4583        } 
     4584      } 
     4585    } else { /* ISOC RX */ 
     4586      /* 
     4587      isoc_dbg("Processing RX for epid:%d (URB:0x%x) ISOC pipe\n", 
     4588               epid, (unsigned int)urb); 
     4589      */ 
     4590 
     4591      struct usb_iso_packet_descriptor *packet; 
     4592 
     4593      if (urb_priv->urb_state == UNLINK) { 
     4594        isoc_warn("Ignoring Isoc Rx data for urb being unlinked.\n"); 
     4595        goto skip_out; 
     4596      } else if (urb_priv->urb_state == NOT_STARTED) { 
     4597        isoc_err("What? Got Rx data for Isoc urb that isn't started?\n"); 
     4598        goto skip_out; 
     4599      } 
     4600 
     4601      packet = &urb->iso_frame_desc[urb_priv->isoc_packet_counter]; 
     4602      ASSERT(packet); 
     4603      packet->status = 0; 
     4604 
     4605      if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) { 
     4606        /* We get nodata for empty data transactions, and the rx descriptor's 
     4607           hw_len field is not valid in that case. We copy 0 bytes however to 
     4608           stay in synch. */ 
     4609        packet->actual_length = 0; 
     4610      } else { 
     4611        packet->actual_length = myNextRxDesc->hw_len; 
     4612        /* Make sure the data fits in the buffer. */ 
     4613        ASSERT(packet->actual_length <= packet->length); 
     4614        memcpy(urb->transfer_buffer + packet->offset, 
     4615               phys_to_virt(myNextRxDesc->buf), packet->actual_length); 
     4616        if(packet->actual_length > 0) 
     4617          isoc_dbg("Copied %d bytes, packet %d for URB:0x%x[%d]\n", 
     4618                   packet->actual_length, urb_priv->isoc_packet_counter, 
     4619                   (unsigned int)urb, urb_priv->urb_num); 
     4620      } 
     4621 
     4622      /* Increment the packet counter. */ 
     4623      urb_priv->isoc_packet_counter++; 
     4624 
     4625      /* Note that we don't care about the eot field in the rx descriptor's 
     4626         status. It will always be set for isoc traffic. */ 
     4627      if (urb->number_of_packets == urb_priv->isoc_packet_counter) { 
     4628        /* Complete the urb with status OK. */ 
     4629        tc_finish_urb(hcd, urb, 0); 
     4630      } 
     4631    } 
     4632 
     4633  skip_out: 
     4634    myNextRxDesc->status = 0; 
     4635    myNextRxDesc->command |= IO_MASK(USB_IN_command, eol); 
     4636    myLastRxDesc->command &= ~IO_MASK(USB_IN_command, eol); 
     4637    myLastRxDesc = myNextRxDesc; 
     4638    myNextRxDesc = phys_to_virt(myNextRxDesc->next); 
     4639    flush_etrax_cache(); 
     4640    *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, restart); 
     4641  } 
     4642 
     4643  local_irq_restore(flags); 
     4644 
     4645  return IRQ_HANDLED; 
     4646} 
     4647 
     4648static void tc_bulk_start_timer_func(unsigned long dummy) { 
     4649  /* We might enable an EP descriptor behind the current DMA position when 
     4650     it's about to decide that there are no more bulk traffic and it should 
     4651     stop the bulk channel. 
     4652     Therefore we periodically check if the bulk channel is stopped and there 
     4653     is an enabled bulk EP descriptor, in which case we start the bulk 
     4654     channel. */ 
     4655   
     4656  if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) { 
     4657    int epid; 
     4658 
     4659    timer_dbg("bulk_start_timer: Bulk DMA channel not running.\n"); 
     4660 
     4661    for (epid = 0; epid < NBR_OF_EPIDS; epid++) { 
     4662      if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) { 
     4663        timer_warn("Found enabled EP for epid %d, starting bulk channel.\n", 
     4664                   epid); 
     4665        restart_dma8_sub0(); 
     4666 
     4667        /* Restart the bulk eot timer since we just started the bulk channel.*/ 
     4668        mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL); 
     4669 
     4670        /* No need to search any further. */ 
     4671        break; 
     4672      } 
     4673    } 
     4674  } else { 
     4675    timer_dbg("bulk_start_timer: Bulk DMA channel running.\n"); 
     4676  } 
     4677} 
     4678 
     4679static void tc_bulk_eot_timer_func(unsigned long dummy) { 
     4680  struct usb_hcd *hcd = (struct usb_hcd*)dummy; 
     4681  ASSERT(hcd); 
     4682  /* Because of a race condition in the top half, we might miss a bulk eot. 
     4683     This timer "simulates" a bulk eot if we don't get one for a while, 
     4684     hopefully correcting the situation. */ 
     4685  timer_dbg("bulk_eot_timer timed out.\n"); 
     4686  check_finished_bulk_tx_epids(hcd, 1); 
     4687} 
     4688 
     4689 
     4690/*************************************************************/ 
     4691/*************************************************************/ 
     4692/* Device driver block                                       */ 
     4693/*************************************************************/ 
     4694/*************************************************************/ 
     4695 
     4696/* Forward declarations for device driver functions */ 
     4697static int devdrv_hcd_probe(struct device *); 
     4698static int devdrv_hcd_remove(struct device *); 
     4699#ifdef CONFIG_PM 
     4700static int devdrv_hcd_suspend(struct device *, u32, u32); 
     4701static int devdrv_hcd_resume(struct device *, u32); 
     4702#endif /* CONFIG_PM */ 
     4703 
     4704/* the device */ 
     4705static struct platform_device *devdrv_hc_platform_device; 
     4706 
     4707/* device driver interface */ 
     4708static struct device_driver devdrv_hc_device_driver = { 
     4709  .name =                       (char *) hc_name, 
     4710  .bus =                        &platform_bus_type, 
     4711 
     4712  .probe =              devdrv_hcd_probe, 
     4713  .remove =             devdrv_hcd_remove, 
     4714 
     4715#ifdef CONFIG_PM 
     4716  .suspend =            devdrv_hcd_suspend, 
     4717  .resume =             devdrv_hcd_resume, 
     4718#endif /* CONFIG_PM */ 
     4719}; 
     4720 
     4721/* initialize the host controller and driver  */ 
     4722static int __init_or_module devdrv_hcd_probe(struct device *dev) 
    3784723{ 
    379         struct list_head *entry; 
    380         struct list_head *tmp; 
    381         urb_entry_t *urb_entry; 
    382  
    383         list_for_each_safe(entry, tmp, &urb_list[epid]) { 
    384                 urb_entry = list_entry(entry, urb_entry_t, list); 
    385                 assert(urb_entry); 
    386                 assert(urb_entry->urb); 
    387  
    388                 if (urb_entry->urb == urb) { 
    389                         return urb_entry; 
    390                 } 
    391         } 
    392         return 0; 
    393 } 
    394  
    395 /* Delete an urb from the list. */ 
    396 static inline void urb_list_del(struct urb *urb, int epid) 
     4724  struct usb_hcd *hcd; 
     4725  struct crisv10_hcd *crisv10_hcd; 
     4726  int retval; 
     4727 
     4728  /* Check DMA burst length */ 
     4729  if(IO_EXTRACT(R_BUS_CONFIG, dma_burst, *R_BUS_CONFIG) != 
     4730     IO_STATE(R_BUS_CONFIG, dma_burst, burst32)) { 
     4731    devdrv_err("Invalid DMA burst length in Etrax 100LX," 
     4732               " needs to be 32\n"); 
     4733    return -EPERM; 
     4734  } 
     4735 
     4736  hcd = usb_create_hcd(&crisv10_hc_driver, dev, dev->bus_id); 
     4737  if (!hcd) 
     4738    return -ENOMEM; 
     4739 
     4740  crisv10_hcd = hcd_to_crisv10_hcd(hcd); 
     4741  spin_lock_init(&crisv10_hcd->lock); 
     4742  crisv10_hcd->num_ports = num_ports(); 
     4743  crisv10_hcd->running = 0; 
     4744 
     4745  dev_set_drvdata(dev, crisv10_hcd); 
     4746 
     4747  devdrv_dbg("ETRAX USB IRQs HC:%d  RX:%d  TX:%d\n", ETRAX_USB_HC_IRQ, 
     4748          ETRAX_USB_RX_IRQ, ETRAX_USB_TX_IRQ); 
     4749 
     4750  /* Print out chip version read from registers */ 
     4751  int rev_maj = *R_USB_REVISION & IO_MASK(R_USB_REVISION, major); 
     4752  int rev_min = *R_USB_REVISION & IO_MASK(R_USB_REVISION, minor); 
     4753  if(rev_min == 0) { 
     4754    devdrv_info("Etrax 100LX USB Revision %d v1,2\n", rev_maj); 
     4755  } else { 
     4756    devdrv_info("Etrax 100LX USB Revision %d v%d\n", rev_maj, rev_min); 
     4757  } 
     4758 
     4759  devdrv_info("Bulk timer interval, start:%d eot:%d\n", 
     4760              BULK_START_TIMER_INTERVAL, 
     4761              BULK_EOT_TIMER_INTERVAL); 
     4762 
     4763 
     4764  /* Init root hub data structures */ 
     4765  if(rh_init()) { 
     4766    devdrv_err("Failed init data for Root Hub\n"); 
     4767    retval = -ENOMEM; 
     4768  } 
     4769 
     4770  if(port_in_use(0)) { 
     4771    if (cris_request_io_interface(if_usb_1, "ETRAX100LX USB-HCD")) { 
     4772      printk(KERN_CRIT "usb-host: request IO interface usb1 failed"); 
     4773      retval = -EBUSY; 
     4774      goto out; 
     4775    } 
     4776    devdrv_info("Claimed interface for USB physical port 1\n"); 
     4777  } 
     4778  if(port_in_use(1)) { 
     4779    if (cris_request_io_interface(if_usb_2, "ETRAX100LX USB-HCD")) { 
     4780      /* Free first interface if second failed to be claimed */ 
     4781      if(port_in_use(0)) { 
     4782        cris_free_io_interface(if_usb_1); 
     4783      } 
     4784      printk(KERN_CRIT "usb-host: request IO interface usb2 failed"); 
     4785      retval = -EBUSY; 
     4786      goto out; 
     4787    } 
     4788    devdrv_info("Claimed interface for USB physical port 2\n"); 
     4789  } 
     4790   
     4791  /* Init transfer controller structs and locks */ 
     4792  if((retval = tc_init(hcd)) != 0) { 
     4793    goto out; 
     4794  } 
     4795 
     4796  /* Attach interrupt functions for DMA and init DMA controller */ 
     4797  if((retval = tc_dma_init(hcd)) != 0) { 
     4798    goto out; 
     4799  } 
     4800 
     4801  /* Attach the top IRQ handler for USB controller interrupts */ 
     4802  if (request_irq(ETRAX_USB_HC_IRQ, crisv10_hcd_top_irq, 0, 
     4803                  "ETRAX 100LX built-in USB (HC)", hcd)) { 
     4804    err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ); 
     4805    retval = -EBUSY; 
     4806    goto out; 
     4807  } 
     4808 
     4809  /* iso_eof is only enabled when isoc traffic is running. */ 
     4810  *R_USB_IRQ_MASK_SET = 
     4811    /* IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) | */ 
     4812    IO_STATE(R_USB_IRQ_MASK_SET, bulk_eot, set) | 
     4813    IO_STATE(R_USB_IRQ_MASK_SET, epid_attn, set) | 
     4814    IO_STATE(R_USB_IRQ_MASK_SET, port_status, set) | 
     4815    IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set); 
     4816 
     4817 
     4818  crisv10_ready_wait(); 
     4819  /* Reset the USB interface. */ 
     4820  *R_USB_COMMAND = 
     4821    IO_STATE(R_USB_COMMAND, port_sel, nop) | 
     4822    IO_STATE(R_USB_COMMAND, port_cmd, reset) | 
     4823    IO_STATE(R_USB_COMMAND, ctrl_cmd, reset); 
     4824 
     4825  /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to 
     4826     0x2A30 (10800), to guarantee that control traffic gets 10% of the 
     4827     bandwidth, and periodic transfer may allocate the rest (90%). 
     4828     This doesn't work though. 
     4829     The value 11960 is chosen to be just after the SOF token, with a couple 
     4830     of bit times extra for possible bit stuffing. */ 
     4831  *R_USB_FM_PSTART = IO_FIELD(R_USB_FM_PSTART, value, 11960); 
     4832 
     4833  crisv10_ready_wait(); 
     4834  /* Configure the USB interface as a host controller. */ 
     4835  *R_USB_COMMAND = 
     4836    IO_STATE(R_USB_COMMAND, port_sel, nop) | 
     4837    IO_STATE(R_USB_COMMAND, port_cmd, reset) | 
     4838    IO_STATE(R_USB_COMMAND, ctrl_cmd, host_config); 
     4839 
     4840 
     4841  /* Check so controller not busy before enabling ports */ 
     4842  crisv10_ready_wait(); 
     4843 
     4844  /* Enable selected USB ports */ 
     4845  if(port_in_use(0)) { 
     4846    *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no); 
     4847  } else { 
     4848    *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes); 
     4849  } 
     4850  if(port_in_use(1)) { 
     4851    *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no); 
     4852  } else { 
     4853    *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes); 
     4854  } 
     4855 
     4856  crisv10_ready_wait(); 
     4857  /* Start processing of USB traffic. */ 
     4858  *R_USB_COMMAND = 
     4859    IO_STATE(R_USB_COMMAND, port_sel, nop) | 
     4860    IO_STATE(R_USB_COMMAND, port_cmd, reset) | 
     4861    IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run); 
     4862 
     4863  /* Do not continue probing initialization before USB interface is done */ 
     4864  crisv10_ready_wait(); 
     4865 
     4866  /* Register our Host Controller to USB Core 
     4867   * Finish the remaining parts of generic HCD initialization: allocate the 
     4868   * buffers of consistent memory, register the bus 
     4869   * and call the driver's reset() and start() routines. */ 
     4870  retval = usb_add_hcd(hcd, ETRAX_USB_HC_IRQ, IRQF_DISABLED); 
     4871  if (retval != 0) { 
     4872    devdrv_err("Failed registering HCD driver\n"); 
     4873    goto out; 
     4874  } 
     4875 
     4876  return 0; 
     4877 
     4878 out: 
     4879  devdrv_hcd_remove(dev); 
     4880  return retval; 
     4881} 
     4882 
     4883 
     4884/* cleanup after the host controller and driver */ 
     4885static int __init_or_module devdrv_hcd_remove(struct device *dev) 
    3974886{ 
    398         urb_entry_t *urb_entry = __urb_list_entry(urb, epid); 
    399         assert(urb_entry); 
    400  
    401         /* Delete entry and free. */ 
    402         list_del(&urb_entry->list); 
    403         kfree(urb_entry); 
    404 } 
    405  
    406 /* Move an urb to the end of the list. */ 
    407 static inline void urb_list_move_last(struct urb *urb, int epid) 
     4887  struct crisv10_hcd *crisv10_hcd = dev_get_drvdata(dev); 
     4888  struct usb_hcd *hcd; 
     4889 
     4890  if (!crisv10_hcd) 
     4891    return 0; 
     4892  hcd = crisv10_hcd_to_hcd(crisv10_hcd); 
     4893 
     4894 
     4895  /* Stop USB Controller in Etrax 100LX */ 
     4896  crisv10_hcd_reset(hcd); 
     4897 
     4898  usb_remove_hcd(hcd); 
     4899  devdrv_dbg("Removed HCD from USB Core\n"); 
     4900 
     4901  /* Free USB Controller IRQ */ 
     4902  free_irq(ETRAX_USB_HC_IRQ, NULL); 
     4903 
     4904  /* Free resources */ 
     4905  tc_dma_destroy(); 
     4906  tc_destroy(); 
     4907 
     4908 
     4909  if(port_in_use(0)) { 
     4910    cris_free_io_interface(if_usb_1); 
     4911  } 
     4912  if(port_in_use(1)) { 
     4913    cris_free_io_interface(if_usb_2); 
     4914  } 
     4915 
     4916  devdrv_dbg("Freed all claimed resources\n"); 
     4917 
     4918  return 0; 
     4919} 
     4920 
     4921 
     4922#ifdef  CONFIG_PM 
     4923 
     4924static int devdrv_hcd_suspend(struct usb_hcd *hcd, u32 state, u32 level) 
    4084925{ 
    409         urb_entry_t *urb_entry = __urb_list_entry(urb, epid); 
    410         assert(urb_entry); 
    411  
    412         list_move_tail(&urb_entry->list, &urb_list[epid]); 
    413 } 
    414  
    415 /* Get the next urb in the list. */ 
    416 static inline struct urb *urb_list_next(struct urb *urb, int epid) 
     4926  return 0; /* no-op for now */ 
     4927} 
     4928 
     4929static int devdrv_hcd_resume(struct usb_hcd *hcd, u32 level) 
    4174930{ 
    418         urb_entry_t *urb_entry = __urb_list_entry(urb, epid); 
    419  
    420         assert(urb_entry); 
    421  
    422         if (urb_entry->list.next != &urb_list[epid]) { 
    423                 struct list_head *elem = urb_entry->list.next; 
    424                 urb_entry = list_entry(elem, urb_entry_t, list); 
    425                 return urb_entry->urb; 
    426         } else { 
    427                 return NULL; 
    428         } 
    429 } 
    430  
    431  
    432  
    433 /* For debug purposes only. */ 
    434 static inline void urb_list_dump(int epid) 
     4931  return 0; /* no-op for now */ 
     4932} 
     4933 
     4934#endif /* CONFIG_PM */ 
     4935 
     4936 
     4937/*************************************************************/ 
     4938/*************************************************************/ 
     4939/* Module block                                              */ 
     4940/*************************************************************/ 
     4941/*************************************************************/ 
     4942  
     4943/* register driver */ 
     4944static int __init module_hcd_init(void)  
    4354945{ 
    436         struct list_head *entry; 
    437         struct list_head *tmp; 
    438         urb_entry_t *urb_entry; 
    439         int i = 0; 
    440  
    441         info("Dumping urb list for epid %d", epid); 
    442  
    443         list_for_each_safe(entry, tmp, &urb_list[epid]) { 
    444                 urb_entry = list_entry(entry, urb_entry_t, list); 
    445                 info("   entry %d, urb = 0x%lx", i, (unsigned long)urb_entry->urb); 
    446         } 
    447 } 
    448  
    449 static void init_rx_buffers(void); 
    450 static int etrax_rh_unlink_urb(struct urb *urb); 
    451 static void etrax_rh_send_irq(struct urb *urb); 
    452 static void etrax_rh_init_int_timer(struct urb *urb); 
    453 static void etrax_rh_int_timer_do(unsigned long ptr); 
    454  
    455 static int etrax_usb_setup_epid(struct urb *urb); 
    456 static int etrax_usb_lookup_epid(struct urb *urb); 
    457 static int etrax_usb_allocate_epid(void); 
    458 static void etrax_usb_free_epid(int epid); 
    459  
    460 static int etrax_remove_from_sb_list(struct urb *urb); 
    461  
    462 static void* etrax_usb_buffer_alloc(struct usb_bus* bus, size_t size, 
    463         unsigned mem_flags, dma_addr_t *dma); 
    464 static void etrax_usb_buffer_free(struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma); 
    465  
    466 static void etrax_usb_add_to_bulk_sb_list(struct urb *urb, int epid); 
    467 static void etrax_usb_add_to_ctrl_sb_list(struct urb *urb, int epid); 
    468 static void etrax_usb_add_to_intr_sb_list(struct urb *urb, int epid); 
    469 static void etrax_usb_add_to_isoc_sb_list(struct urb *urb, int epid); 
    470  
    471 static int etrax_usb_submit_bulk_urb(struct urb *urb); 
    472 static int etrax_usb_submit_ctrl_urb(struct urb *urb); 
    473 static int etrax_usb_submit_intr_urb(struct urb *urb); 
    474 static int etrax_usb_submit_isoc_urb(struct urb *urb); 
    475  
    476 static int etrax_usb_submit_urb(struct urb *urb, unsigned mem_flags); 
    477 static int etrax_usb_unlink_urb(struct urb *urb, int status); 
    478 static int etrax_usb_get_frame_number(struct usb_device *usb_dev); 
    479  
    480 static irqreturn_t etrax_usb_tx_interrupt(int irq, void *vhc); 
    481 static irqreturn_t etrax_usb_rx_interrupt(int irq, void *vhc); 
    482 static irqreturn_t etrax_usb_hc_interrupt_top_half(int irq, void *vhc); 
    483 static void etrax_usb_hc_interrupt_bottom_half(void *data); 
    484  
    485 static void etrax_usb_isoc_descr_interrupt_bottom_half(void *data); 
    486  
    487  
    488 /* The following is a list of interrupt handlers for the host controller interrupts we use. 
    489    They are called from etrax_usb_hc_interrupt_bottom_half. */ 
    490 static void etrax_usb_hc_isoc_eof_interrupt(void); 
    491 static void etrax_usb_hc_bulk_eot_interrupt(int timer_induced); 
    492 static void etrax_usb_hc_epid_attn_interrupt(usb_interrupt_registers_t *reg); 
    493 static void etrax_usb_hc_port_status_interrupt(usb_interrupt_registers_t *reg); 
    494 static void etrax_usb_hc_ctl_status_interrupt(usb_interrupt_registers_t *reg); 
    495  
    496 static int etrax_rh_submit_urb (struct urb *urb); 
    497  
    498 /* Forward declaration needed because they are used in the rx interrupt routine. */ 
    499 static void etrax_usb_complete_urb(struct urb *urb, int status); 
    500 static void etrax_usb_complete_bulk_urb(struct urb *urb, int status); 
    501 static void etrax_usb_complete_ctrl_urb(struct urb *urb, int status); 
    502 static void etrax_usb_complete_intr_urb(struct urb *urb, int status); 
    503 static void etrax_usb_complete_isoc_urb(struct urb *urb, int status); 
    504  
    505 static int etrax_usb_hc_init(void); 
    506 static void etrax_usb_hc_cleanup(void); 
    507  
    508 static struct usb_operations etrax_usb_device_operations = 
    509 { 
    510         .get_frame_number = etrax_usb_get_frame_number, 
    511         .submit_urb = etrax_usb_submit_urb, 
    512         .unlink_urb = etrax_usb_unlink_urb, 
    513         .buffer_alloc = etrax_usb_buffer_alloc, 
    514         .buffer_free = etrax_usb_buffer_free 
    515 }; 
    516  
    517 /* Note that these functions are always available in their "__" variants, for use in 
    518    error situations. The "__" missing variants are controlled by the USB_DEBUG_DESC/ 
    519    USB_DEBUG_URB macros. */ 
    520 static void __dump_urb(struct urb* purb) 
    521 { 
    522         printk("\nurb                  :0x%08lx\n", (unsigned long)purb); 
    523         printk("dev                   :0x%08lx\n", (unsigned long)purb->dev); 
    524         printk("pipe                  :0x%08x\n", purb->pipe); 
    525         printk("status                :%d\n", purb->status); 
    526         printk("transfer_flags        :0x%08x\n", purb->transfer_flags); 
    527         printk("transfer_buffer       :0x%08lx\n", (unsigned long)purb->transfer_buffer); 
    528         printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length); 
    529         printk("actual_length         :%d\n", purb->actual_length); 
    530         printk("setup_packet          :0x%08lx\n", (unsigned long)purb->setup_packet); 
    531         printk("start_frame           :%d\n", purb->start_frame); 
    532         printk("number_of_packets     :%d\n", purb->number_of_packets); 
    533         printk("interval              :%d\n", purb->interval); 
    534         printk("error_count           :%d\n", purb->error_count); 
    535         printk("context               :0x%08lx\n", (unsigned long)purb->context); 
    536         printk("complete              :0x%08lx\n\n", (unsigned long)purb->complete); 
    537 } 
    538  
    539 static void __dump_in_desc(volatile USB_IN_Desc_t *in) 
    540 { 
    541         printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in); 
    542         printk("  sw_len  : 0x%04x (%d)\n", in->sw_len, in->sw_len); 
    543         printk("  command : 0x%04x\n", in->command); 
    544         printk("  next    : 0x%08lx\n", in->next); 
    545         printk("  buf     : 0x%08lx\n", in->buf); 
    546         printk("  hw_len  : 0x%04x (%d)\n", in->hw_len, in->hw_len); 
    547         printk("  status  : 0x%04x\n\n", in->status); 
    548 } 
    549  
    550 static void __dump_sb_desc(volatile USB_SB_Desc_t *sb) 
    551 { 
    552         char tt = (sb->command & 0x30) >> 4; 
    553         char *tt_string; 
    554  
    555         switch (tt) { 
    556         case 0: 
    557                 tt_string = "zout"; 
    558                 break; 
    559         case 1: 
    560                 tt_string = "in"; 
    561                 break; 
    562         case 2: 
    563                 tt_string = "out"; 
    564                 break; 
    565         case 3: 
    566                 tt_string = "setup"; 
    567                 break; 
    568         default: 
    569                 tt_string = "unknown (weird)"; 
    570         } 
    571  
    572         printk("\n   USB_SB_Desc at 0x%08lx\n", (unsigned long)sb); 
    573         printk("     command : 0x%04x\n", sb->command); 
    574         printk("        rem     : %d\n", (sb->command & 0x3f00) >> 8); 
    575         printk("        full    : %d\n", (sb->command & 0x40) >> 6); 
    576         printk("        tt      : %d (%s)\n", tt, tt_string); 
    577         printk("        intr    : %d\n", (sb->command & 0x8) >> 3); 
    578         printk("        eot     : %d\n", (sb->command & 0x2) >> 1); 
    579         printk("        eol     : %d\n", sb->command & 0x1); 
    580         printk("     sw_len  : 0x%04x (%d)\n", sb->sw_len, sb->sw_len); 
    581         printk("     next    : 0x%08lx\n", sb->next); 
    582         printk("     buf     : 0x%08lx\n\n", sb->buf); 
    583 } 
    584  
    585  
    586 static void __dump_ep_desc(volatile USB_EP_Desc_t *ep) 
    587 { 
    588         printk("\nUSB_EP_Desc at 0x%08lx\n", (unsigned long)ep); 
    589         printk("  command : 0x%04x\n", ep->command); 
    590         printk("     ep_id   : %d\n", (ep->command & 0x1f00) >> 8); 
    591         printk("     enable  : %d\n", (ep->command & 0x10) >> 4); 
    592         printk("     intr    : %d\n", (ep->command & 0x8) >> 3); 
    593         printk("     eof     : %d\n", (ep->command & 0x2) >> 1); 
    594         printk("     eol     : %d\n", ep->command & 0x1); 
    595         printk("  hw_len  : 0x%04x (%d)\n", ep->hw_len, ep->hw_len); 
    596         printk("  next    : 0x%08lx\n", ep->next); 
    597         printk("  sub     : 0x%08lx\n\n", ep->sub); 
    598 } 
    599  
    600 static inline void __dump_ep_list(int pipe_type) 
    601 { 
    602         volatile USB_EP_Desc_t *ep; 
    603         volatile USB_EP_Desc_t *first_ep; 
    604         volatile USB_SB_Desc_t *sb; 
    605  
    606         switch (pipe_type) 
    607         { 
    608         case PIPE_BULK: 
    609                 first_ep = &TxBulkEPList[0]; 
    610                 break; 
    611         case PIPE_CONTROL: 
    612                 first_ep = &TxCtrlEPList[0]; 
    613                 break; 
    614         case PIPE_INTERRUPT: 
    615                 first_ep = &TxIntrEPList[0]; 
    616                 break; 
    617         case PIPE_ISOCHRONOUS: 
    618                 first_ep = &TxIsocEPList[0]; 
    619                 break; 
    620         default: 
    621                 warn("Cannot dump unknown traffic type"); 
    622                 return; 
    623         } 
    624         ep = first_ep; 
    625  
    626         printk("\n\nDumping EP list...\n\n"); 
    627  
    628         do { 
    629                 __dump_ep_desc(ep); 
    630                 /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */ 
    631                 sb = ep->sub ? phys_to_virt(ep->sub) : 0; 
    632                 while (sb) { 
    633                         __dump_sb_desc(sb); 
    634                         sb = sb->next ? phys_to_virt(sb->next) : 0; 
    635                 } 
    636                 ep = (volatile USB_EP_Desc_t *)(phys_to_virt(ep->next)); 
    637  
    638         } while (ep != first_ep); 
    639 } 
    640  
    641 static inline void __dump_ept_data(int epid) 
    642 { 
    643         unsigned long flags; 
    644         __u32 r_usb_ept_data; 
    645  
    646         if (epid < 0 || epid > 31) { 
    647                 printk("Cannot dump ept data for invalid epid %d\n", epid); 
    648                 return; 
    649         } 
    650  
    651         save_flags(flags); 
    652         cli(); 
    653         *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid); 
    654         nop(); 
    655         r_usb_ept_data = *R_USB_EPT_DATA; 
    656         restore_flags(flags); 
    657  
    658         printk("\nR_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid); 
    659         if (r_usb_ept_data == 0) { 
    660                 /* No need for more detailed printing. */ 
    661                 return; 
    662         } 
    663         printk("  valid           : %d\n", (r_usb_ept_data & 0x80000000) >> 31); 
    664         printk("  hold            : %d\n", (r_usb_ept_data & 0x40000000) >> 30); 
    665         printk("  error_count_in  : %d\n", (r_usb_ept_data & 0x30000000) >> 28); 
    666         printk("  t_in            : %d\n", (r_usb_ept_data & 0x08000000) >> 27); 
    667         printk("  low_speed       : %d\n", (r_usb_ept_data & 0x04000000) >> 26); 
    668         printk("  port            : %d\n", (r_usb_ept_data & 0x03000000) >> 24); 
    669         printk("  error_code      : %d\n", (r_usb_ept_data & 0x00c00000) >> 22); 
    670         printk("  t_out           : %d\n", (r_usb_ept_data & 0x00200000) >> 21); 
    671         printk("  error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19); 
    672         printk("  max_len         : %d\n", (r_usb_ept_data & 0x0003f800) >> 11); 
    673         printk("  ep              : %d\n", (r_usb_ept_data & 0x00000780) >> 7); 
    674         printk("  dev             : %d\n", (r_usb_ept_data & 0x0000003f)); 
    675 } 
    676  
    677 static inline void __dump_ept_data_list(void) 
    678 { 
    679         int i; 
    680  
    681         printk("Dumping the whole R_USB_EPT_DATA list\n"); 
    682  
    683         for (i = 0; i < 32; i++) { 
    684                 __dump_ept_data(i); 
    685         } 
    686 } 
    687 #ifdef USB_DEBUG_DESC 
    688 #define dump_in_desc(...) __dump_in_desc(...) 
    689 #define dump_sb_desc(...) __dump_sb_desc(...) 
    690 #define dump_ep_desc(...) __dump_ep_desc(...) 
    691 #else 
    692 #define dump_in_desc(...) do {} while (0) 
    693 #define dump_sb_desc(...) do {} while (0) 
    694 #define dump_ep_desc(...) do {} while (0) 
     4946   
     4947  if (usb_disabled()) 
     4948    return -ENODEV; 
     4949 
     4950  /* Here we select enabled ports by following defines created from 
     4951     menuconfig */ 
     4952#ifndef CONFIG_ETRAX_USB_HOST_PORT1 
     4953  ports &= ~(1<<0); 
    6954954#endif 
    696  
    697 #ifdef USB_DEBUG_URB 
    698 #define dump_urb(x)     __dump_urb(x) 
    699 #else 
    700 #define dump_urb(x)     do {} while (0) 
     4955#ifndef CONFIG_ETRAX_USB_HOST_PORT2 
     4956  ports &= ~(1<<1); 
    7014957#endif 
    7024958 
    703 static void init_rx_buffers(void) 
    704 { 
    705         int i; 
    706  
    707         DBFENTER; 
    708  
    709         for (i = 0; i < (NBR_OF_RX_DESC - 1); i++) { 
    710                 RxDescList[i].sw_len = RX_DESC_BUF_SIZE; 
    711                 RxDescList[i].command = 0; 
    712                 RxDescList[i].next = virt_to_phys(&RxDescList[i + 1]); 
    713                 RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE)); 
    714                 RxDescList[i].hw_len = 0; 
    715                 RxDescList[i].status = 0; 
    716  
    717                 /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as USB_IN_Desc 
    718                    for the relevant fields.) */ 
    719                 prepare_rx_descriptor((struct etrax_dma_descr*)&RxDescList[i]); 
    720  
    721         } 
    722  
    723         RxDescList[i].sw_len = RX_DESC_BUF_SIZE; 
    724         RxDescList[i].command = IO_STATE(USB_IN_command, eol, yes); 
    725         RxDescList[i].next = virt_to_phys(&RxDescList[0]); 
    726         RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE)); 
    727         RxDescList[i].hw_len = 0; 
    728         RxDescList[i].status = 0; 
    729  
    730         myNextRxDesc = &RxDescList[0]; 
    731         myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1]; 
    732         myPrevRxDesc = &RxDescList[NBR_OF_RX_DESC - 1]; 
    733  
    734         *R_DMA_CH9_FIRST = virt_to_phys(myNextRxDesc); 
    735         *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, start); 
    736  
    737         DBFEXIT; 
    738 } 
    739  
    740 static void init_tx_bulk_ep(void) 
    741 { 
    742         int i; 
    743  
    744         DBFENTER; 
    745  
    746         for (i = 0; i < (NBR_OF_EPIDS - 1); i++) { 
    747                 CHECK_ALIGN(&TxBulkEPList[i]); 
    748                 TxBulkEPList[i].hw_len = 0; 
    749                 TxBulkEPList[i].command = IO_FIELD(USB_EP_command, epid, i); 
    750                 TxBulkEPList[i].sub = 0; 
    751                 TxBulkEPList[i].next = virt_to_phys(&TxBulkEPList[i + 1]); 
    752  
    753                 /* Initiate two EPs, disabled and with the eol flag set. No need for any 
    754                    preserved epid. */ 
    755  
    756                 /* The first one has the intr flag set so we get an interrupt when the DMA 
    757                    channel is about to become disabled. */ 
    758                 CHECK_ALIGN(&TxBulkDummyEPList[i][0]); 
    759                 TxBulkDummyEPList[i][0].hw_len = 0; 
    760                 TxBulkDummyEPList[i][0].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) | 
    761                                                    IO_STATE(USB_EP_command, eol, yes) | 
    762                                                    IO_STATE(USB_EP_command, intr, yes)); 
    763                 TxBulkDummyEPList[i][0].sub = 0; 
    764                 TxBulkDummyEPList[i][0].next = virt_to_phys(&TxBulkDummyEPList[i][1]); 
    765  
    766                 /* The second one. */ 
    767                 CHECK_ALIGN(&TxBulkDummyEPList[i][1]); 
    768                 TxBulkDummyEPList[i][1].hw_len = 0; 
    769                 TxBulkDummyEPList[i][1].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) | 
    770                                                    IO_STATE(USB_EP_command, eol, yes)); 
    771                 TxBulkDummyEPList[i][1].sub = 0; 
    772                 /* The last dummy's next pointer is the same as the current EP's next pointer. */ 
    773                 TxBulkDummyEPList[i][1].next = virt_to_phys(&TxBulkEPList[i + 1]); 
    774         } 
    775  
    776         /* Configure the last one. */ 
    777         CHECK_ALIGN(&TxBulkEPList[i]); 
    778         TxBulkEPList[i].hw_len = 0; 
    779         TxBulkEPList[i].command = (IO_STATE(USB_EP_command, eol, yes) | 
    780                                    IO_FIELD(USB_EP_command, epid, i)); 
    781         TxBulkEPList[i].sub = 0; 
    782         TxBulkEPList[i].next = virt_to_phys(&TxBulkEPList[0]); 
    783  
    784         /* No need configuring dummy EPs for the last one as it will never be used for 
    785            bulk traffic (i == INVALD_EPID at this point). */ 
    786  
    787         /* Set up to start on the last EP so we will enable it when inserting traffic 
    788            for the first time (imitating the situation where the DMA has stopped 
    789            because there was no more traffic). */ 
    790         *R_DMA_CH8_SUB0_EP = virt_to_phys(&TxBulkEPList[i]); 
    791         /* No point in starting the bulk channel yet. 
    792          *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */ 
    793         DBFEXIT; 
    794 } 
    795  
    796 static void init_tx_ctrl_ep(void) 
    797 { 
    798         int i; 
    799  
    800         DBFENTER; 
    801  
    802         for (i = 0; i < (NBR_OF_EPIDS - 1); i++) { 
    803                 CHECK_ALIGN(&TxCtrlEPList[i]); 
    804                 TxCtrlEPList[i].hw_len = 0; 
    805                 TxCtrlEPList[i].command = IO_FIELD(USB_EP_command, epid, i); 
    806                 TxCtrlEPList[i].sub = 0; 
    807                 TxCtrlEPList[i].next = virt_to_phys(&TxCtrlEPList[i + 1]); 
    808         } 
    809  
    810         CHECK_ALIGN(&TxCtrlEPList[i]); 
    811         TxCtrlEPList[i].hw_len = 0; 
    812         TxCtrlEPList[i].command = (IO_STATE(USB_EP_command, eol, yes) | 
    813                                    IO_FIELD(USB_EP_command, epid, i)); 
    814  
    815         TxCtrlEPList[i].sub = 0; 
    816         TxCtrlEPList[i].next = virt_to_phys(&TxCtrlEPList[0]); 
    817  
    818         *R_DMA_CH8_SUB1_EP = virt_to_phys(&TxCtrlEPList[0]); 
    819         *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start); 
    820  
    821         DBFEXIT; 
    822 } 
    823  
    824  
    825 static void init_tx_intr_ep(void) 
    826 { 
    827         int i; 
    828  
    829         DBFENTER; 
    830  
    831         /* Read comment at zout_buffer declaration for an explanation to this. */ 
    832         TxIntrSB_zout.sw_len = 1; 
    833         TxIntrSB_zout.next = 0; 
    834         TxIntrSB_zout.buf = virt_to_phys(&zout_buffer[0]); 
    835         TxIntrSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) | 
    836                                  IO_STATE(USB_SB_command, tt, zout) | 
    837                                  IO_STATE(USB_SB_command, full, yes) | 
    838                                  IO_STATE(USB_SB_command, eot, yes) | 
    839                                  IO_STATE(USB_SB_command, eol, yes)); 
    840  
    841         for (i = 0; i < (MAX_INTR_INTERVAL - 1); i++) { 
    842                 CHECK_ALIGN(&TxIntrEPList[i]); 
    843                 TxIntrEPList[i].hw_len = 0; 
    844                 TxIntrEPList[i].command = 
    845                         (IO_STATE(USB_EP_command, eof, yes) | 
    846                          IO_STATE(USB_EP_command, enable, yes) | 
    847                          IO_FIELD(USB_EP_command, epid, INVALID_EPID)); 
    848                 TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout); 
    849                 TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[i + 1]); 
    850         } 
    851  
    852         CHECK_ALIGN(&TxIntrEPList[i]); 
    853         TxIntrEPList[i].hw_len = 0; 
    854         TxIntrEPList[i].command = 
    855                 (IO_STATE(USB_EP_command, eof, yes) | 
    856                  IO_STATE(USB_EP_command, eol, yes) | 
    857                  IO_STATE(USB_EP_command, enable, yes) | 
    858                  IO_FIELD(USB_EP_command, epid, INVALID_EPID)); 
    859         TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout); 
    860         TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[0]); 
    861  
    862         *R_DMA_CH8_SUB2_EP = virt_to_phys(&TxIntrEPList[0]); 
    863         *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start); 
    864         DBFEXIT; 
    865 } 
    866  
    867 static void init_tx_isoc_ep(void) 
    868 { 
    869         int i; 
    870  
    871         DBFENTER; 
    872  
    873         /* Read comment at zout_buffer declaration for an explanation to this. */ 
    874         TxIsocSB_zout.sw_len = 1; 
    875         TxIsocSB_zout.next = 0; 
    876         TxIsocSB_zout.buf = virt_to_phys(&zout_buffer[0]); 
    877         TxIsocSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) | 
    878                                  IO_STATE(USB_SB_command, tt, zout) | 
    879                                  IO_STATE(USB_SB_command, full, yes) | 
    880                                  IO_STATE(USB_SB_command, eot, yes) | 
    881                                  IO_STATE(USB_SB_command, eol, yes)); 
    882  
    883         /* The last isochronous EP descriptor is a dummy. */ 
    884  
    885         for (i = 0; i < (NBR_OF_EPIDS - 1); i++) { 
    886                 CHECK_ALIGN(&TxIsocEPList[i]); 
    887                 TxIsocEPList[i].hw_len = 0; 
    888                 TxIsocEPList[i].command = IO_FIELD(USB_EP_command, epid, i); 
    889                 TxIsocEPList[i].sub = 0; 
    890                 TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[i + 1]); 
    891         } 
    892  
    893         CHECK_ALIGN(&TxIsocEPList[i]); 
    894         TxIsocEPList[i].hw_len = 0; 
    895  
    896         /* Must enable the last EP descr to get eof interrupt. */ 
    897         TxIsocEPList[i].command = (IO_STATE(USB_EP_command, enable, yes) | 
    898                                    IO_STATE(USB_EP_command, eof, yes) | 
    899                                    IO_STATE(USB_EP_command, eol, yes) | 
    900                                    IO_FIELD(USB_EP_command, epid, INVALID_EPID)); 
    901         TxIsocEPList[i].sub = virt_to_phys(&TxIsocSB_zout); 
    902         TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[0]); 
    903  
    904         *R_DMA_CH8_SUB3_EP = virt_to_phys(&TxIsocEPList[0]); 
    905         *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start); 
    906  
    907         DBFEXIT; 
    908 } 
    909  
    910 static void etrax_usb_unlink_intr_urb(struct urb *urb) 
    911 { 
    912         volatile USB_EP_Desc_t *first_ep;  /* First EP in the list. */ 
    913         volatile USB_EP_Desc_t *curr_ep;   /* Current EP, the iterator. */ 
    914         volatile USB_EP_Desc_t *next_ep;   /* The EP after current. */ 
    915         volatile USB_EP_Desc_t *unlink_ep; /* The one we should remove from the list. */ 
    916  
    917         int epid; 
    918  
    919         /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the List". */ 
    920  
    921         DBFENTER; 
    922  
    923         epid = ((etrax_urb_priv_t *)urb->hcpriv)->epid; 
    924  
    925         first_ep = &TxIntrEPList[0]; 
    926         curr_ep = first_ep; 
    927  
    928  
    929         /* Note that this loop removes all EP descriptors with this epid. This assumes 
    930            that all EP descriptors belong to the one and only urb for this epid. */ 
    931  
    932         do { 
    933                 next_ep = (USB_EP_Desc_t *)phys_to_virt(curr_ep->next); 
    934  
    935                 if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) { 
    936  
    937                         dbg_intr("Found EP to unlink for epid %d", epid); 
    938  
    939                         /* This is the one we should unlink. */ 
    940                         unlink_ep = next_ep; 
    941  
    942                         /* Actually unlink the EP from the DMA list. */ 
    943                         curr_ep->next = unlink_ep->next; 
    944  
    945                         /* Wait until the DMA is no longer at this descriptor. */ 
    946                         while (*R_DMA_CH8_SUB2_EP == virt_to_phys(unlink_ep)); 
    947  
    948                         /* Now we are free to remove it and its SB descriptor. 
    949                            Note that it is assumed here that there is only one sb in the 
    950                            sb list for this ep. */ 
    951                         kmem_cache_free(usb_desc_cache, phys_to_virt(unlink_ep->sub)); 
    952                         kmem_cache_free(usb_desc_cache, (USB_EP_Desc_t *)unlink_ep); 
    953                 } 
    954  
    955                 curr_ep = phys_to_virt(curr_ep->next); 
    956  
    957         } while (curr_ep != first_ep); 
    958         urb->hcpriv = NULL; 
    959 } 
    960  
    961 void etrax_usb_do_intr_recover(int epid) 
    962 { 
    963         USB_EP_Desc_t *first_ep, *tmp_ep; 
    964  
    965         DBFENTER; 
    966  
    967         first_ep = (USB_EP_Desc_t *)phys_to_virt(*R_DMA_CH8_SUB2_EP); 
    968         tmp_ep = first_ep; 
    969  
    970         /* What this does is simply to walk the list of interrupt 
    971            ep descriptors and enable those that are disabled. */ 
    972  
    973         do { 
    974                 if (IO_EXTRACT(USB_EP_command, epid, tmp_ep->command) == epid && 
    975                     !(tmp_ep->command & IO_MASK(USB_EP_command, enable))) { 
    976                         tmp_ep->command |= IO_STATE(USB_EP_command, enable, yes); 
    977                 } 
    978  
    979                 tmp_ep = (USB_EP_Desc_t *)phys_to_virt(tmp_ep->next); 
    980  
    981         } while (tmp_ep != first_ep); 
    982  
    983  
    984         DBFEXIT; 
    985 } 
    986  
    987 static int etrax_rh_unlink_urb (struct urb *urb) 
    988 { 
    989         etrax_hc_t *hc; 
    990  
    991         DBFENTER; 
    992  
    993         hc = urb->dev->bus->hcpriv; 
    994  
    995         if (hc->rh.urb == urb) { 
    996                 hc->rh.send = 0; 
    997                 del_timer(&hc->rh.rh_int_timer); 
    998         } 
    999  
    1000         DBFEXIT; 
    1001         return 0; 
    1002 } 
    1003  
    1004 static void etrax_rh_send_irq(struct urb *urb) 
    1005 { 
    1006         __u16 data = 0; 
    1007         etrax_hc_t *hc = urb->dev->bus->hcpriv; 
    1008         DBFENTER; 
    1009  
    1010 /* 
    1011   dbg_rh("R_USB_FM_NUMBER   : 0x%08X", *R_USB_FM_NUMBER); 
    1012   dbg_rh("R_USB_FM_REMAINING: 0x%08X", *R_USB_FM_REMAINING); 
    1013 */ 
    1014  
    1015         data |= (hc->rh.wPortChange_1) ? (1 << 1) : 0; 
    1016         data |= (hc->rh.wPortChange_2) ? (1 << 2) : 0; 
    1017  
    1018         *((__u16 *)urb->transfer_buffer) = cpu_to_le16(data); 
    1019         /* FIXME: Why is actual_length set to 1 when data is 2 bytes? 
    1020            Since only 1 byte is used, why not declare data as __u8? */ 
    1021         urb->actual_length = 1; 
    1022         urb->status = 0; 
    1023  
    1024         if (hc->rh.send && urb->complete) { 
    1025                 dbg_rh("wPortChange_1: 0x%04X", hc->rh.wPortChange_1); 
    1026                 dbg_rh("wPortChange_2: 0x%04X", hc->rh.wPortChange_2); 
    1027  
    1028                 urb->complete(urb, NULL); 
    1029         } 
    1030  
    1031         DBFEXIT; 
    1032 } 
    1033  
    1034 static void etrax_rh_init_int_timer(struct urb *urb) 
    1035 { 
    1036         etrax_hc_t *hc; 
    1037  
    1038         DBFENTER; 
    1039  
    1040         hc = urb->dev->bus->hcpriv; 
    1041         hc->rh.interval = urb->interval; 
    1042         init_timer(&hc->rh.rh_int_timer); 
    1043         hc->rh.rh_int_timer.function = etrax_rh_int_timer_do; 
    1044         hc->rh.rh_int_timer.data = (unsigned long)urb; 
    1045         /* FIXME: Is the jiffies resolution enough? All intervals < 10 ms will be mapped 
    1046            to 0, and the rest to the nearest lower 10 ms. */ 
    1047         hc->rh.rh_int_timer.expires = jiffies + ((HZ * hc->rh.interval) / 1000); 
    1048         add_timer(&hc->rh.rh_int_timer); 
    1049  
    1050         DBFEXIT; 
    1051 } 
    1052  
    1053 static void etrax_rh_int_timer_do(unsigned long ptr) 
    1054 { 
    1055         struct urb *urb; 
    1056         etrax_hc_t *hc; 
    1057  
    1058         DBFENTER; 
    1059  
    1060         urb = (struct urb*)ptr; 
    1061         hc = urb->dev->bus->hcpriv; 
    1062  
    1063         if (hc->rh.send) { 
    1064                 etrax_rh_send_irq(urb); 
    1065         } 
    1066  
    1067         DBFEXIT; 
    1068 } 
    1069  
    1070 static int etrax_usb_setup_epid(struct urb *urb) 
    1071 { 
    1072         int epid; 
    1073         char devnum, endpoint, out_traffic, slow; 
    1074         int maxlen; 
    1075         unsigned long flags; 
    1076  
    1077         DBFENTER; 
    1078  
    1079         epid = etrax_usb_lookup_epid(urb); 
    1080         if ((epid != -1)){ 
    1081                 /* An epid that fits this urb has been found. */ 
    1082                 DBFEXIT; 
    1083                 return epid; 
    1084         } 
    1085  
    1086         /* We must find and initiate a new epid for this urb. */ 
    1087         epid = etrax_usb_allocate_epid(); 
    1088  
    1089         if (epid == -1) { 
    1090                 /* Failed to allocate a new epid. */ 
    1091                 DBFEXIT; 
    1092                 return epid; 
    1093         } 
    1094  
    1095         /* We now have a new epid to use. Initiate it. */ 
    1096         set_bit(epid, (void *)&epid_usage_bitmask); 
    1097  
    1098         devnum = usb_pipedevice(urb->pipe); 
    1099         endpoint = usb_pipeendpoint(urb->pipe); 
    1100         slow = usb_pipeslow(urb->pipe); 
    1101         maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); 
    1102         if (usb_pipetype(urb->pipe) == PIPE_CONTROL) { 
    1103                 /* We want both IN and OUT control traffic to be put on the same EP/SB list. */ 
    1104                 out_traffic = 1; 
    1105         } else { 
    1106                 out_traffic = usb_pipeout(urb->pipe); 
    1107         } 
    1108  
    1109         save_flags(flags); 
    1110         cli(); 
    1111  
    1112         *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid); 
    1113         nop(); 
    1114  
    1115         if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 
    1116                 *R_USB_EPT_DATA_ISO = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) | 
    1117                         /* FIXME: Change any to the actual port? */ 
    1118                         IO_STATE(R_USB_EPT_DATA_ISO, port, any) | 
    1119                         IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) | 
    1120                         IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) | 
    1121                         IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum); 
    1122         } else { 
    1123                 *R_USB_EPT_DATA = IO_STATE(R_USB_EPT_DATA, valid, yes) | 
    1124                         IO_FIELD(R_USB_EPT_DATA, low_speed, slow) | 
    1125                         /* FIXME: Change any to the actual port? */ 
    1126                         IO_STATE(R_USB_EPT_DATA, port, any) | 
    1127                         IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) | 
    1128                         IO_FIELD(R_USB_EPT_DATA, ep, endpoint) | 
    1129                         IO_FIELD(R_USB_EPT_DATA, dev, devnum); 
    1130         } 
    1131  
    1132         restore_flags(flags); 
    1133  
    1134         if (out_traffic) { 
    1135                 set_bit(epid, (void *)&epid_out_traffic); 
    1136         } else { 
    1137                 clear_bit(epid, (void *)&epid_out_traffic); 
    1138         } 
    1139  
    1140         dbg_epid("Setting up epid %d with devnum %d, endpoint %d and max_len %d (%s)", 
    1141                  epid, devnum, endpoint, maxlen, out_traffic ? "OUT" : "IN"); 
    1142  
    1143         DBFEXIT; 
    1144         return epid; 
    1145 } 
    1146  
    1147 static void etrax_usb_free_epid(int epid) 
    1148 { 
    1149         unsigned long flags; 
    1150  
    1151         DBFENTER; 
    1152  
    1153         if (!test_bit(epid, (void *)&epid_usage_bitmask)) { 
    1154                 warn("Trying to free unused epid %d", epid); 
    1155                 DBFEXIT; 
    1156                 return; 
    1157         } 
    1158  
    1159         save_flags(flags); 
    1160         cli(); 
    1161  
    1162         *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid); 
    1163         nop(); 
    1164         while (*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)); 
    1165         /* This will, among other things, set the valid field to 0. */ 
    1166         *R_USB_EPT_DATA = 0; 
    1167         restore_flags(flags); 
    1168  
    1169         clear_bit(epid, (void *)&epid_usage_bitmask); 
    1170  
    1171  
    1172         dbg_epid("Freed epid %d", epid); 
    1173  
    1174         DBFEXIT; 
    1175 } 
    1176  
    1177 static int etrax_usb_lookup_epid(struct urb *urb) 
    1178 { 
    1179         int i; 
    1180         __u32 data; 
    1181         char devnum, endpoint, slow, out_traffic; 
    1182         int maxlen; 
    1183         unsigned long flags; 
    1184  
    1185         DBFENTER; 
    1186  
    1187         devnum = usb_pipedevice(urb->pipe); 
    1188         endpoint = usb_pipeendpoint(urb->pipe); 
    1189         slow = usb_pipeslow(urb->pipe); 
    1190         maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); 
    1191         if (usb_pipetype(urb->pipe) == PIPE_CONTROL) { 
    1192                 /* We want both IN and OUT control traffic to be put on the same EP/SB list. */ 
    1193                 out_traffic = 1; 
    1194         } else { 
    1195                 out_traffic = usb_pipeout(urb->pipe); 
    1196         } 
    1197  
    1198         /* Step through att epids. */ 
    1199         for (i = 0; i < NBR_OF_EPIDS; i++) { 
    1200                 if (test_bit(i, (void *)&epid_usage_bitmask) && 
    1201                     test_bit(i, (void *)&epid_out_traffic) == out_traffic) { 
    1202  
    1203                         save_flags(flags); 
    1204                         cli(); 
    1205                         *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, i); 
    1206                         nop(); 
    1207  
    1208                         if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 
    1209                                 data = *R_USB_EPT_DATA_ISO; 
    1210                                 restore_flags(flags); 
    1211  
    1212                                 if ((IO_MASK(R_USB_EPT_DATA_ISO, valid) & data) && 
    1213                                     (IO_EXTRACT(R_USB_EPT_DATA_ISO, dev, data) == devnum) && 
    1214                                     (IO_EXTRACT(R_USB_EPT_DATA_ISO, ep, data) == endpoint) && 
    1215                                     (IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len, data) == maxlen)) { 
    1216                                         dbg_epid("Found epid %d for devnum %d, endpoint %d (%s)", 
    1217                                                  i, devnum, endpoint, out_traffic ? "OUT" : "IN"); 
    1218                                         DBFEXIT; 
    1219                                         return i; 
    1220                                 } 
    1221                         } else { 
    1222                                 data = *R_USB_EPT_DATA; 
    1223                                 restore_flags(flags); 
    1224  
    1225                                 if ((IO_MASK(R_USB_EPT_DATA, valid) & data) && 
    1226                                     (IO_EXTRACT(R_USB_EPT_DATA, dev, data) == devnum) && 
    1227                                     (IO_EXTRACT(R_USB_EPT_DATA, ep, data) == endpoint) && 
    1228                                     (IO_EXTRACT(R_USB_EPT_DATA, low_speed, data) == slow) && 
    1229                                     (IO_EXTRACT(R_USB_EPT_DATA, max_len, data) == maxlen)) { 
    1230                                         dbg_epid("Found epid %d for devnum %d, endpoint %d (%s)", 
    1231                                                  i, devnum, endpoint, out_traffic ? "OUT" : "IN"); 
    1232                                         DBFEXIT; 
    1233                                         return i; 
    1234                                 } 
    1235                         } 
    1236                 } 
    1237         } 
    1238  
    1239         DBFEXIT; 
    1240         return -1; 
    1241 } 
    1242  
    1243 static int etrax_usb_allocate_epid(void) 
    1244 { 
    1245         int i; 
    1246  
    1247         DBFENTER; 
    1248  
    1249         for (i = 0; i < NBR_OF_EPIDS; i++) { 
    1250                 if (!test_bit(i, (void *)&epid_usage_bitmask)) { 
    1251                         dbg_epid("Found free epid %d", i); 
    1252                         DBFEXIT; 
    1253                         return i; 
    1254                 } 
    1255         } 
    1256  
    1257         dbg_epid("Found no free epids"); 
    1258         DBFEXIT; 
    1259         return -1; 
    1260 } 
    1261  
    1262 static int etrax_usb_submit_urb(struct urb *urb, unsigned mem_flags) 
    1263 { 
    1264         etrax_hc_t *hc; 
    1265         int ret = -EINVAL; 
    1266  
    1267         DBFENTER; 
    1268  
    1269         if (!urb->dev || !urb->dev->bus) { 
    1270                 return -ENODEV; 
    1271         } 
    1272         if (usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)) <= 0) { 
    1273                 info("Submit urb to pipe with maxpacketlen 0, pipe 0x%X\n", urb->pipe); 
    1274                 return -EMSGSIZE; 
    1275         } 
    1276  
    1277         if (urb->timeout) { 
    1278                 /* FIXME. */ 
    1279                 warn("urb->timeout specified, ignoring."); 
    1280         } 
    1281  
    1282         hc = (etrax_hc_t*)urb->dev->bus->hcpriv; 
    1283  
    1284         if (usb_pipedevice(urb->pipe) == hc->rh.devnum) { 
    1285                 /* This request is for the Virtual Root Hub. */ 
    1286                 ret = etrax_rh_submit_urb(urb); 
    1287  
    1288         } else if (usb_pipetype(urb->pipe) == PIPE_BULK) { 
    1289  
    1290                 ret = etrax_usb_submit_bulk_urb(urb); 
    1291  
    1292         } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) { 
    1293  
    1294                 ret = etrax_usb_submit_ctrl_urb(urb); 
    1295  
    1296         } else if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { 
    1297                 int bustime; 
    1298  
    1299                 if (urb->bandwidth == 0) { 
    1300                         bustime = usb_check_bandwidth(urb->dev, urb); 
    1301                         if (bustime < 0) { 
    1302                                 ret = bustime; 
    1303                         } else { 
    1304                                 ret = etrax_usb_submit_intr_urb(urb); 
    1305                                 if (ret == 0) 
    1306                                         usb_claim_bandwidth(urb->dev, urb, bustime, 0); 
    1307                         } 
    1308                 } else { 
    1309                         /* Bandwidth already set. */ 
    1310                         ret = etrax_usb_submit_intr_urb(urb); 
    1311                 } 
    1312  
    1313         } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 
    1314                 int bustime; 
    1315  
    1316                 if (urb->bandwidth == 0) { 
    1317                         bustime = usb_check_bandwidth(urb->dev, urb); 
    1318                         if (bustime < 0) { 
    1319                                 ret = bustime; 
    1320                         } else { 
    1321                                 ret = etrax_usb_submit_isoc_urb(urb); 
    1322                                 if (ret == 0) 
    1323                                         usb_claim_bandwidth(urb->dev, urb, bustime, 0); 
    1324                         } 
    1325                 } else { 
    1326                         /* Bandwidth already set. */ 
    1327                         ret = etrax_usb_submit_isoc_urb(urb); 
    1328                 } 
    1329         } 
    1330  
    1331         DBFEXIT; 
    1332  
    1333         if (ret != 0) 
    1334           printk("Submit URB error %d\n", ret); 
    1335  
    1336         return ret; 
    1337 } 
    1338  
    1339 static int etrax_usb_unlink_urb(struct urb *urb, int status) 
    1340 { 
    1341         etrax_hc_t *hc; 
    1342         etrax_urb_priv_t *urb_priv; 
    1343         int epid; 
    1344         unsigned int flags; 
    1345  
    1346         DBFENTER; 
    1347  
    1348         if (!urb) { 
    1349                 return -EINVAL; 
    1350         } 
    1351  
    1352         /* Disable interrupts here since a descriptor interrupt for the isoc epid 
    1353            will modify the sb list.  This could possibly be done more granular, but 
    1354            unlink_urb should not be used frequently anyway. 
    1355         */ 
    1356  
    1357         save_flags(flags); 
    1358         cli(); 
    1359  
    1360         if (!urb->dev || !urb->dev->bus) { 
    1361                 restore_flags(flags); 
    1362                 return -ENODEV; 
    1363         } 
    1364         if (!urb->hcpriv) { 
    1365                 /* This happens if a device driver calls unlink on an urb that 
    1366                    was never submitted (lazy driver) or if the urb was completed 
    1367                    while unlink was being called. */ 
    1368                 restore_flags(flags); 
    1369                 return 0; 
    1370         } 
    1371         if (urb->transfer_flags & URB_ASYNC_UNLINK) { 
    1372                 /* FIXME. */ 
    1373                 /* If URB_ASYNC_UNLINK is set: 
    1374                    unlink 
    1375                    move to a separate urb list 
    1376                    call complete at next sof with ECONNRESET 
    1377  
    1378                    If not: 
    1379                    wait 1 ms 
    1380                    unlink 
    1381                    call complete with ENOENT 
    1382                 */ 
    1383                 warn("URB_ASYNC_UNLINK set, ignoring."); 
    1384         } 
    1385  
    1386         /* One might think that urb->status = -EINPROGRESS would be a requirement for unlinking, 
    1387            but that doesn't work for interrupt and isochronous traffic since they are completed 
    1388            repeatedly, and urb->status is set then. That may in itself be a bug though. */ 
    1389  
    1390         hc = urb->dev->bus->hcpriv; 
    1391         urb_priv = (etrax_urb_priv_t *)urb->hcpriv; 
    1392         epid = urb_priv->epid; 
    1393  
    1394         /* Set the urb status (synchronous unlink). */ 
    1395         urb->status = -ENOENT; 
    1396         urb_priv->urb_state = UNLINK; 
    1397  
    1398         if (usb_pipedevice(urb->pipe) == hc->rh.devnum) { 
    1399                 int ret; 
    1400                 ret = etrax_rh_unlink_urb(urb); 
    1401                 DBFEXIT; 
    1402                 restore_flags(flags); 
    1403                 return ret; 
    1404  
    1405         } else if (usb_pipetype(urb->pipe) == PIPE_BULK) { 
    1406  
    1407                 dbg_bulk("Unlink of bulk urb (0x%lx)", (unsigned long)urb); 
    1408  
    1409                 if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) { 
    1410                         /* The EP was enabled, disable it and wait. */ 
    1411                         TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable); 
    1412  
    1413                         /* Ah, the luxury of busy-wait. */ 
    1414                         while (*R_DMA_CH8_SUB0_EP == virt_to_phys(&TxBulkEPList[epid])); 
    1415                 } 
    1416                 /* Kicking dummy list out of the party. */ 
    1417                 TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]); 
    1418  
    1419         } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) { 
    1420  
    1421                 dbg_ctrl("Unlink of ctrl urb (0x%lx)", (unsigned long)urb); 
    1422  
    1423                 if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) { 
    1424                         /* The EP was enabled, disable it and wait. */ 
    1425                         TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable); 
    1426  
    1427                         /* Ah, the luxury of busy-wait. */ 
    1428                         while (*R_DMA_CH8_SUB1_EP == virt_to_phys(&TxCtrlEPList[epid])); 
    1429                 } 
    1430  
    1431         } else if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { 
    1432  
    1433                 dbg_intr("Unlink of intr urb (0x%lx)", (unsigned long)urb); 
    1434  
    1435                 /* Separate function because it's a tad more complicated. */ 
    1436                 etrax_usb_unlink_intr_urb(urb); 
    1437  
    1438         } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 
    1439  
    1440                 dbg_isoc("Unlink of isoc urb (0x%lx)", (unsigned long)urb); 
    1441  
    1442                 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) { 
    1443                         /* The EP was enabled, disable it and wait. */ 
    1444                         TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable); 
    1445  
    1446                         /* Ah, the luxury of busy-wait. */ 
    1447                         while (*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])); 
    1448                 } 
    1449         } 
    1450  
    1451         /* Note that we need to remove the urb from the urb list *before* removing its SB 
    1452            descriptors. (This means that the isoc eof handler might get a null urb when we 
    1453 &nbs