Changeset 33489
- Timestamp:
- 2012-09-20T01:50:12+02:00 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/target/linux/cns3xxx/patches-3.3/051-cns3xxx_gigabit.patch
r33487 r33489 1 1 --- /dev/null 2 2 +++ b/drivers/net/ethernet/cavium/cns3xxx_eth.c 3 @@ -0,0 +1,12 70@@3 @@ -0,0 +1,1298 @@ 4 4 +/* 5 5 + * Cavium CNS3xxx Gigabit driver for Linux … … 37 37 +#define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS) 38 38 +#define REGS_SIZE 336 39 +#define MAX_MRU 9500 39 +#define MAX_MRU (1536 + SKB_DMA_REALIGN) 40 +#define CNS3XXX_MAX_MTU (1536) 40 41 + 41 42 +#define NAPI_WEIGHT 64 … … 58 59 + 59 60 +/* Port Config Defines */ 61 +#define PORT_BP_ENABLE 0x00020000 60 62 +#define PORT_DISABLE 0x00040000 63 +#define PORT_LEARN_DIS 0x00080000 64 +#define PORT_BLOCK_STATE 0x00100000 65 +#define PORT_BLOCK_MODE 0x00200000 66 + 61 67 +#define PROMISC_OFFSET 29 62 68 + … … 79 85 +#define QUEUE_THRESHOLD 0x000000f0 80 86 +#define CLR_FS_STATE 0x80000000 87 + 88 +/* Interrupt Status Defines */ 89 +#define MAC0_STATUS_CHANGE 0x00004000 90 +#define MAC1_STATUS_CHANGE 0x00008000 91 +#define MAC2_STATUS_CHANGE 0x00010000 92 +#define MAC0_RX_ERROR 0x00100000 93 +#define MAC1_RX_ERROR 0x00200000 94 +#define MAC2_RX_ERROR 0x00400000 81 95 + 82 96 +struct tx_desc … … 188 202 + u8 alignment[16]; /* for 32 byte alignment */ 189 203 +}; 204 + 190 205 + 191 206 +struct switch_regs { … … 234 249 + u32 fs_desc_ptr1; 235 250 + u32 fs_desc_base_addr1; 251 + u32 __res7[109]; 252 + u32 mac_counter0[13]; 236 253 +}; 237 254 + … … 241 258 + struct tx_desc *cur_addr; 242 259 + struct sk_buff *buff_tab[TX_DESCS]; 260 + unsigned int phys_tab[TX_DESCS]; 243 261 + u32 free_index; 244 262 + u32 count_index; … … 253 271 + struct rx_desc *cur_addr; 254 272 + struct sk_buff *buff_tab[RX_DESCS]; 273 + unsigned int phys_tab[RX_DESCS]; 255 274 + u32 cur_index; 256 275 + u32 alloc_index; … … 265 284 + struct _tx_ring *tx_ring; 266 285 + struct _rx_ring *rx_ring; 267 + u32 mtu;268 286 +}; 269 287 + … … 274 292 + int id; /* logical port ID */ 275 293 + int speed, duplex; 276 + u32 mtu;277 294 +}; 278 295 + 279 296 +static spinlock_t mdio_lock; 280 +static spinlock_t tx_lock; 281 +static spinlock_t stat_lock; 297 +static DEFINE_SPINLOCK(tx_lock); 282 298 +static struct switch_regs __iomem *mdio_regs; /* mdio command and status only */ 283 299 +struct mii_bus *mdio_bus; 284 300 +static int ports_open; 285 +static struct port *switch_port_tab[ 3];301 +static struct port *switch_port_tab[4]; 286 302 +static struct dma_pool *rx_dma_pool; 287 303 +static struct dma_pool *tx_dma_pool; … … 381 397 +} 382 398 + 399 +static void enable_tx_dma(struct sw *sw) 400 +{ 401 + __raw_writel(0x1, &sw->regs->ts_dma_ctrl0); 402 +} 403 + 404 +static void enable_rx_dma(struct sw *sw) 405 +{ 406 + __raw_writel(0x1, &sw->regs->fs_dma_ctrl0); 407 +} 408 + 383 409 +static void cns3xxx_adjust_link(struct net_device *dev) 384 410 +{ … … 415 441 +} 416 442 + 443 +irqreturn_t eth_stat_irq(int irq, void *pdev) 444 +{ 445 + struct net_device *dev = pdev; 446 + struct sw *sw = netdev_priv(dev); 447 + u32 cfg; 448 + u32 stat = __raw_readl(&sw->regs->intr_stat); 449 + __raw_writel(0xffffffff, &sw->regs->intr_stat); 450 + 451 + if (stat & MAC2_RX_ERROR) 452 + switch_port_tab[3]->netdev->stats.rx_dropped++; 453 + if (stat & MAC1_RX_ERROR) 454 + switch_port_tab[1]->netdev->stats.rx_dropped++; 455 + if (stat & MAC0_RX_ERROR) 456 + switch_port_tab[0]->netdev->stats.rx_dropped++; 457 + 458 + if (stat & MAC0_STATUS_CHANGE) { 459 + cfg = __raw_readl(&sw->regs->mac_cfg[0]); 460 + switch_port_tab[0]->phydev->link = (cfg & 0x1); 461 + switch_port_tab[0]->phydev->duplex = ((cfg >> 4) & 0x1); 462 + if (((cfg >> 2) & 0x3) == 2) 463 + switch_port_tab[0]->phydev->speed = 1000; 464 + else if (((cfg >> 2) & 0x3) == 1) 465 + switch_port_tab[0]->phydev->speed = 100; 466 + else 467 + switch_port_tab[0]->phydev->speed = 10; 468 + cns3xxx_adjust_link(switch_port_tab[0]->netdev); 469 + } 470 + 471 + if (stat & MAC1_STATUS_CHANGE) { 472 + cfg = __raw_readl(&sw->regs->mac_cfg[1]); 473 + switch_port_tab[1]->phydev->link = (cfg & 0x1); 474 + switch_port_tab[1]->phydev->duplex = ((cfg >> 4) & 0x1); 475 + if (((cfg >> 2) & 0x3) == 2) 476 + switch_port_tab[1]->phydev->speed = 1000; 477 + else if (((cfg >> 2) & 0x3) == 1) 478 + switch_port_tab[1]->phydev->speed = 100; 479 + else 480 + switch_port_tab[1]->phydev->speed = 10; 481 + cns3xxx_adjust_link(switch_port_tab[1]->netdev); 482 + } 483 + 484 + if (stat & MAC2_STATUS_CHANGE) { 485 + cfg = __raw_readl(&sw->regs->mac_cfg[3]); 486 + switch_port_tab[3]->phydev->link = (cfg & 0x1); 487 + switch_port_tab[3]->phydev->duplex = ((cfg >> 4) & 0x1); 488 + if (((cfg >> 2) & 0x3) == 2) 489 + switch_port_tab[3]->phydev->speed = 1000; 490 + else if (((cfg >> 2) & 0x3) == 1) 491 + switch_port_tab[3]->phydev->speed = 100; 492 + else 493 + switch_port_tab[3]->phydev->speed = 10; 494 + cns3xxx_adjust_link(switch_port_tab[3]->netdev); 495 + } 496 + 497 + return (IRQ_HANDLED); 498 +} 499 + 500 + 417 501 +static void cns3xxx_alloc_rx_buf(struct sw *sw, int received) 418 502 +{ 419 503 + struct _rx_ring *rx_ring = sw->rx_ring; 420 504 + unsigned int i = rx_ring->alloc_index; 421 + struct rx_desc *desc ;505 + struct rx_desc *desc = &(rx_ring)->desc[i]; 422 506 + struct sk_buff *skb; 423 + u32 mtu = sw->mtu; 424 + 425 + rx_ring->alloc_count += received; 426 + 427 + for (received = rx_ring->alloc_count; received > 0; received--) { 428 + desc = &(rx_ring)->desc[i]; 429 + 430 + if ((skb = dev_alloc_skb(mtu))) { 507 + unsigned int phys; 508 + 509 + for (received += rx_ring->alloc_count; received > 0; received--) { 510 + if ((skb = dev_alloc_skb(MAX_MRU))) { 431 511 + if (SKB_DMA_REALIGN) 432 512 + skb_reserve(skb, SKB_DMA_REALIGN); 433 513 + skb_reserve(skb, NET_IP_ALIGN); 434 + desc->sdp= dma_map_single(NULL, skb->data,435 + mtu, DMA_FROM_DEVICE);436 + if (dma_mapping_error(NULL, desc->sdp)) {514 + phys = dma_map_single(NULL, skb->data, 515 + CNS3XXX_MAX_MTU, DMA_FROM_DEVICE); 516 + if (dma_mapping_error(NULL, phys)) { 437 517 + dev_kfree_skb(skb); 438 518 + /* Failed to map, better luck next time */ 439 519 + goto out;; 440 520 + } 521 + desc->sdp = phys; 441 522 + } else { 442 523 + /* Failed to allocate skb, try again next time */ … … 446 527 + /* put the new buffer on RX-free queue */ 447 528 + rx_ring->buff_tab[i] = skb; 448 + 449 + if ( ++i == RX_DESCS) {529 + rx_ring->phys_tab[i] = phys; 530 + if (i == RX_DESCS - 1) { 450 531 + i = 0; 451 532 + desc->config0 = END_OF_RING | FIRST_SEGMENT | 452 + LAST_SEGMENT | mtu; 533 + LAST_SEGMENT | CNS3XXX_MAX_MTU; 534 + desc = &(rx_ring)->desc[i]; 453 535 + } else { 454 + desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | mtu; 536 + desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | CNS3XXX_MAX_MTU; 537 + i++; 538 + desc++; 455 539 + } 456 540 + } … … 460 544 +} 461 545 + 462 +static void update_tx_stats(struct sw *sw)546 +static void clear_tx_desc(struct sw *sw) 463 547 +{ 464 548 + struct _tx_ring *tx_ring = sw->tx_ring; 465 549 + struct tx_desc *desc; 466 + struct tx_desc *next_desc;467 + struct sk_buff *skb;468 550 + int i; 469 551 + int index; 470 + int num_count; 471 + 472 + spin_lock_bh(&stat_lock); 473 + 474 + num_count = tx_ring->num_count; 475 + 476 + if (!num_count) { 477 + spin_unlock_bh(&stat_lock); 478 + return; 479 + } 480 + 481 + index = tx_ring->count_index; 482 + desc = &(tx_ring)->desc[index]; 483 + for (i = 0; i < num_count; i++) { 484 + skb = tx_ring->buff_tab[index]; 485 + if (desc->cown) { 486 + tx_ring->buff_tab[index] = 0; 487 + if (unlikely(++index == TX_DESCS)) index = 0; 488 + next_desc = &(tx_ring)->desc[index]; 489 + prefetch(next_desc + 4); 490 + if (likely(skb)) { 491 + skb->dev->stats.tx_packets++; 492 + skb->dev->stats.tx_bytes += skb->len; 493 + dev_kfree_skb_any(skb); 494 + } 495 + desc = next_desc; 496 + } else { 497 + break; 498 + } 499 + } 500 + tx_ring->num_count -= i; 501 + tx_ring->count_index = index; 502 + 503 + spin_unlock_bh(&stat_lock); 504 +} 505 + 506 +static void clear_tx_desc(struct sw *sw) 507 +{ 508 + struct _tx_ring *tx_ring = sw->tx_ring; 509 + struct tx_desc *desc; 510 + struct tx_desc *next_desc; 511 + int i; 512 + int index; 513 + int num_used = tx_ring->num_used - tx_ring->num_count; 552 + int num_used = tx_ring->num_used; 553 + struct sk_buff *skb; 514 554 + 515 555 + if (num_used < (TX_DESCS >> 1)) … … 520 560 + for (i = 0; i < num_used; i++) { 521 561 + if (desc->cown) { 522 + if (unlikely(++index == TX_DESCS)) index = 0; 523 + next_desc = &(tx_ring)->desc[index]; 524 + prefetch(next_desc); 525 + prefetch(next_desc + 4); 526 + if (likely(desc->sdp)) 527 + dma_unmap_single(NULL, desc->sdp, 528 + desc->sdl, DMA_TO_DEVICE); 529 + desc = next_desc; 562 + skb = tx_ring->buff_tab[index]; 563 + tx_ring->buff_tab[index] = 0; 564 + if (skb) 565 + dev_kfree_skb_any(skb); 566 + dma_unmap_single(NULL, tx_ring->phys_tab[index], 567 + desc->sdl, DMA_TO_DEVICE); 568 + if (++index == TX_DESCS) { 569 + index = 0; 570 + desc = &(tx_ring)->desc[index]; 571 + } else { 572 + desc++; 573 + } 530 574 + } else { 531 575 + break; … … 544 588 + unsigned int length; 545 589 + unsigned int i = rx_ring->cur_index; 546 + struct rx_desc *next_desc;547 590 + struct rx_desc *desc = &(rx_ring)->desc[i]; 548 + int port_id;549 591 + 550 592 + while (desc->cown) { … … 556 598 + skb = rx_ring->buff_tab[i]; 557 599 + 558 + if (++i == RX_DESCS) i = 0; 559 + next_desc = &(rx_ring)->desc[i]; 560 + prefetch(next_desc); 561 + 562 + port_id = desc->sp; 563 + if (port_id == 4) 564 + dev = switch_port_tab[2]->netdev; 565 + else 566 + dev = switch_port_tab[port_id]->netdev; 600 + dev = switch_port_tab[desc->sp]->netdev; 567 601 + 568 602 + length = desc->sdl; 569 603 + /* process received frame */ 570 + dma_unmap_single(&dev->dev, desc->sdp,604 + dma_unmap_single(&dev->dev, rx_ring->phys_tab[i], 571 605 + length, DMA_FROM_DEVICE); 572 606 + … … 579 613 + dev->stats.rx_bytes += length; 580 614 + 615 + /* RX Hardware checksum offload */ 581 616 + switch (desc->prot) { 582 617 + case 1: … … 599 634 + 600 635 + received++; 601 + desc = next_desc; 636 + 637 + if (++i == RX_DESCS) { 638 + i = 0; 639 + desc = &(rx_ring)->desc[i]; 640 + } else { 641 + desc++; 642 + } 602 643 + } 603 644 + 604 645 + cns3xxx_alloc_rx_buf(sw, received); 646 + 605 647 + rx_ring->cur_index = i; 606 648 + … … 609 651 + enable_irq(IRQ_CNS3XXX_SW_R0RXC); 610 652 + } 653 + 654 + enable_rx_dma(sw); 611 655 + 612 656 + return received; … … 620 664 + struct tx_desc *tx_desc; 621 665 + int index; 622 + int len = skb->len;666 + int len; 623 667 + char pmap = (1 << port->id); 668 + unsigned int phys; 669 + int nr_frags = skb_shinfo(skb)->nr_frags; 670 + struct skb_frag_struct *frag; 671 + unsigned int i; 624 672 + 625 673 + if (pmap == 8) 626 674 + pmap = (1 << 4); 627 675 + 628 + if ( unlikely(len > sw->mtu)) {676 + if (skb->len > CNS3XXX_MAX_MTU) { 629 677 + dev_kfree_skb(skb); 630 678 + dev->stats.tx_errors++; … … 632 680 + } 633 681 + 634 + update_tx_stats(sw); 635 + 636 + spin_lock_bh(&tx_lock); 637 + 638 + clear_tx_desc(sw); 639 + 640 + if (unlikely(tx_ring->num_used == TX_DESCS)) { 641 + spin_unlock_bh(&tx_lock); 642 + return NETDEV_TX_BUSY; 682 + spin_lock(&tx_lock); 683 + 684 + if ((tx_ring->num_used + nr_frags) >= TX_DESCS) { 685 + clear_tx_desc(sw); 686 + if ((tx_ring->num_used + nr_frags) >= TX_DESCS) { 687 + spin_unlock(&tx_lock); 688 + return NETDEV_TX_BUSY; 689 + } 643 690 + } 644 691 + 645 692 + index = tx_ring->cur_index; 646 + 647 + if (unlikely(++tx_ring->cur_index == TX_DESCS)) 648 + tx_ring->cur_index = 0; 649 + 650 + tx_ring->num_used++; 651 + tx_ring->num_count++; 652 + 653 + spin_unlock_bh(&tx_lock); 654 + 655 + tx_desc = &(tx_ring)->desc[index]; 656 + 657 + tx_desc->sdp = dma_map_single(NULL, skb->data, len, 658 + DMA_TO_DEVICE); 659 + 660 + if (dma_mapping_error(NULL, tx_desc->sdp)) { 661 + dev_kfree_skb(skb); 662 + dev->stats.tx_errors++; 663 + return NETDEV_TX_OK; 664 + } 665 + 666 + tx_desc->pmap = pmap; 667 + tx_ring->buff_tab[index] = skb; 668 + 669 + if (index == TX_DESCS - 1) { 670 + tx_desc->config0 = END_OF_RING | FIRST_SEGMENT | LAST_SEGMENT | 671 + FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM | 672 + TCP_CHECKSUM | len; 693 + tx_ring->cur_index = ((tx_ring->cur_index + nr_frags + 1) % TX_DESCS); 694 + 695 + spin_unlock(&tx_lock); 696 + 697 + if (!nr_frags) { 698 + tx_desc = &(tx_ring)->desc[index]; 699 + 700 + len = skb->len; 701 + 702 + phys = dma_map_single(NULL, skb->data, len, 703 + DMA_TO_DEVICE); 704 + 705 + tx_desc->sdp = phys; 706 + tx_desc->pmap = pmap; 707 + tx_ring->phys_tab[index] = phys; 708 + 709 + tx_ring->buff_tab[index] = skb; 710 + if (index == TX_DESCS - 1) { 711 + tx_desc->config0 = END_OF_RING | FIRST_SEGMENT | LAST_SEGMENT | 712 + FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM | 713 + TCP_CHECKSUM | len; 714 + } else { 715 + tx_desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | 716 + FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM | 717 + TCP_CHECKSUM | len; 718 + } 673 719 + } else { 674 + tx_desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | 675 + FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM | 676 + TCP_CHECKSUM | len; 677 + } 720 + unsigned int config; 721 + 722 + index = ((index + nr_frags) % TX_DESCS); 723 + tx_desc = &(tx_ring)->desc[index]; 724 + 725 + /* fragments */ 726 + for (i = nr_frags; i > 0; i--) { 727 + void *addr; 728 + 729 + frag = &skb_shinfo(skb)->frags[i-1]; 730 + len = frag->size; 731 + 732 + addr = page_address(skb_frag_page(frag)) + 733 + frag->page_offset; 734 + phys = dma_map_single(NULL, addr, len, DMA_TO_DEVICE); 735 + 736 + tx_desc->sdp = phys; 737 + 738 + tx_desc->pmap = pmap; 739 + tx_ring->phys_tab[index] = phys; 740 + 741 + config = FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM | 742 + TCP_CHECKSUM | len; 743 + if (i == nr_frags) { 744 + config |= LAST_SEGMENT; 745 + tx_ring->buff_tab[index] = skb; 746 + } 747 + if (index == TX_DESCS - 1) 748 + config |= END_OF_RING; 749 + tx_desc->config0 = config; 750 + 751 + if (index == 0) { 752 + index = TX_DESCS - 1; 753 + tx_desc = &(tx_ring)->desc[index]; 754 + } else { 755 + index--; 756 + tx_desc--; 757 + } 758 + } 759 + 760 + /* header */ 761 + len = skb->len - skb->data_len; 762 + 763 + phys = dma_map_single(NULL, skb->data, len, 764 + DMA_TO_DEVICE); 765 + 766 + tx_desc->sdp = phys; 767 + tx_desc->pmap = pmap; 768 + tx_ring->phys_tab[index] = phys; 769 + 770 + if (index == TX_DESCS - 1) { 771 + tx_desc->config0 = END_OF_RING | FIRST_SEGMENT | 772 + FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM | 773 + TCP_CHECKSUM | len; 774 + } else { 775 + tx_desc->config0 = FIRST_SEGMENT | 776 + FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM | 777 + TCP_CHECKSUM | len; 778 + } 779 + } 780 + 781 + mb(); 782 + 783 + spin_lock(&tx_lock); 784 + tx_ring->num_used += nr_frags + 1; 785 + spin_unlock(&tx_lock); 786 + 787 + dev->stats.tx_packets++; 788 + dev->stats.tx_bytes += skb->len; 789 + 790 + enable_tx_dma(sw); 678 791 + 679 792 + return NETDEV_TX_OK; … … 751 864 + struct rx_desc *desc = &(rx_ring)->desc[i]; 752 865 + struct sk_buff *skb; 753 + if (!(skb = dev_alloc_skb( sw->mtu)))866 + if (!(skb = dev_alloc_skb(MAX_MRU))) 754 867 + return -ENOMEM; 755 868 + if (SKB_DMA_REALIGN) 756 869 + skb_reserve(skb, SKB_DMA_REALIGN); 757 870 + skb_reserve(skb, NET_IP_ALIGN); 758 + desc->sdl = sw->mtu;871 + desc->sdl = CNS3XXX_MAX_MTU; 759 872 + if (i == (RX_DESCS - 1)) 760 873 + desc->eor = 1; … … 763 876 + 764 877 + desc->sdp = dma_map_single(NULL, skb->data, 765 + sw->mtu, DMA_FROM_DEVICE);878 + CNS3XXX_MAX_MTU, DMA_FROM_DEVICE); 766 879 + if (dma_mapping_error(NULL, desc->sdp)) { 767 880 + return -EIO; 768 881 + } 769 882 + rx_ring->buff_tab[i] = skb; 883 + rx_ring->phys_tab[i] = desc->sdp; 770 884 + desc->cown = 0; 771 885 + } … … 808 922 + dma_unmap_single(NULL, 809 923 + desc->sdp, 810 + sw->mtu, DMA_FROM_DEVICE);924 + CNS3XXX_MAX_MTU, DMA_FROM_DEVICE); 811 925 + dev_kfree_skb(skb); 812 926 + } … … 848 962 + if (!ports_open) { 849 963 + request_irq(IRQ_CNS3XXX_SW_R0RXC, eth_rx_irq, IRQF_SHARED, "gig_switch", napi_dev); 964 + request_irq(IRQ_CNS3XXX_SW_STATUS, eth_stat_irq, IRQF_SHARED, "gig_stat", napi_dev); 850 965 + napi_enable(&sw->napi); 851 966 + netif_start_queue(napi_dev); 852 + //enable_irq(IRQ_CNS3XXX_SW_R0RXC); 967 + 968 + __raw_writel(~(MAC0_STATUS_CHANGE | MAC1_STATUS_CHANGE | MAC2_STATUS_CHANGE | 969 + MAC0_RX_ERROR | MAC1_RX_ERROR | MAC2_RX_ERROR), &sw->regs->intr_mask); 853 970 + 854 971 + temp = __raw_readl(&sw->regs->mac_cfg[2]); … … 860 977 + __raw_writel(temp, &sw->regs->dma_auto_poll_cfg); 861 978 + 862 + __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg);979 + enable_rx_dma(sw); 863 980 + } 864 981 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]); … … 891 1008 + disable_irq(IRQ_CNS3XXX_SW_R0RXC); 892 1009 + free_irq(IRQ_CNS3XXX_SW_R0RXC, napi_dev); 1010 + disable_irq(IRQ_CNS3XXX_SW_STATUS); 1011 + free_irq(IRQ_CNS3XXX_SW_STATUS, napi_dev); 893 1012 + napi_disable(&sw->napi); 894 1013 + netif_stop_queue(napi_dev); … … 980 1099 +} 981 1100 + 982 +static int cns3xxx_change_mtu(struct net_device *netdev, int new_mtu)983 +{984 + struct port *port = netdev_priv(netdev);985 + struct sw *sw = port->sw;986 + u32 temp;987 + int i;988 + struct _rx_ring *rx_ring = sw->rx_ring;989 + struct rx_desc *desc;990 + struct sk_buff *skb;991 +992 + if (new_mtu > MAX_MRU)993 + return -EINVAL;994 +995 + netdev->mtu = new_mtu;996 +997 + new_mtu += 36 + SKB_DMA_REALIGN;998 + port->mtu = new_mtu;999 +1000 + new_mtu = 0;1001 + for (i = 0; i < 3; i++) {1002 + if (switch_port_tab[i]) {1003 + if (switch_port_tab[i]->mtu > new_mtu)1004 + new_mtu = switch_port_tab[i]->mtu;1005 + }1006 + }1007 +1008 +1009 + if (new_mtu == sw->mtu)1010 + return 0;1011 +1012 + disable_irq(IRQ_CNS3XXX_SW_R0RXC);1013 +1014 + sw->mtu = new_mtu;1015 +1016 + /* Disable DMA */1017 + __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);1018 +1019 + for (i = 0; i < RX_DESCS; i++) {1020 + desc = &(rx_ring)->desc[i];1021 + /* Check if we own it, if we do, it will get set correctly1022 + * when it is re-used */1023 + if (!desc->cown) {1024 + skb = rx_ring->buff_tab[i];1025 + dma_unmap_single(NULL, desc->sdp, desc->sdl,1026 + DMA_FROM_DEVICE);1027 + dev_kfree_skb(skb);1028 +1029 + if ((skb = dev_alloc_skb(new_mtu))) {1030 + if (SKB_DMA_REALIGN)1031 + skb_reserve(skb, SKB_DMA_REALIGN);1032 + skb_reserve(skb, NET_IP_ALIGN);1033 + desc->sdp = dma_map_single(NULL, skb->data,1034 + new_mtu, DMA_FROM_DEVICE);1035 + if (dma_mapping_error(NULL, desc->sdp)) {1036 + dev_kfree_skb(skb);1037 + skb = NULL;1038 + }1039 + }1040 +1041 + /* put the new buffer on RX-free queue */1042 + rx_ring->buff_tab[i] = skb;1043 +1044 + if (i == RX_DESCS - 1)1045 + desc->config0 = END_OF_RING | FIRST_SEGMENT |1046 + LAST_SEGMENT | new_mtu;1047 + else1048 + desc->config0 = FIRST_SEGMENT |1049 + LAST_SEGMENT | new_mtu;1050 + }1051 + }1052 +1053 + /* Re-ENABLE DMA */1054 + temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);1055 + temp &= ~(TS_SUSPEND | FS_SUSPEND);1056 + __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);1057 +1058 + __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg);1059 +1060 + enable_irq(IRQ_CNS3XXX_SW_R0RXC);1061 +1062 + return 0;1063 +}1064 +1065 1101 +static const struct net_device_ops cns3xxx_netdev_ops = { 1066 1102 + .ndo_open = eth_open, … … 1069 1105 + .ndo_set_rx_mode = eth_rx_mode, 1070 1106 + .ndo_do_ioctl = eth_ioctl, 1071 + .ndo_change_mtu = cns3xxx_change_mtu,1107 + .ndo_change_mtu = eth_change_mtu, 1072 1108 + .ndo_set_mac_address = eth_set_mac, 1073 1109 + .ndo_validate_addr = eth_validate_addr, … … 1086 1122 + u32 temp; 1087 1123 + 1088 + spin_lock_init(&tx_lock);1089 + spin_lock_init(&stat_lock);1090 +1091 1124 + if (!(napi_dev = alloc_etherdev(sizeof(struct sw)))) 1092 1125 + return -ENOMEM; 1093 1126 + strcpy(napi_dev->name, "switch%d"); 1127 + napi_dev->features = NETIF_F_IP_CSUM | NETIF_F_SG; 1094 1128 + 1095 1129 + SET_NETDEV_DEV(napi_dev, &pdev->dev); … … 1104 1138 + } 1105 1139 + 1106 + sw->mtu = 1536 + SKB_DMA_REALIGN;1107 +1108 1140 + for (i = 0; i < 4; i++) { 1109 1141 + temp = __raw_readl(&sw->regs->mac_cfg[i]); 1110 + temp |= (PORT_DISABLE) | 0x80000000;1142 + temp |= (PORT_DISABLE); 1111 1143 + __raw_writel(temp, &sw->regs->mac_cfg[i]); 1112 1144 + } … … 1119 1151 + __raw_writel(temp, &sw->regs->vlan_cfg); 1120 1152 + 1121 + __raw_writel(UNKNOWN_VLAN_TO_CPU | ACCEPT_CRC_PACKET |1153 + __raw_writel(UNKNOWN_VLAN_TO_CPU | 1122 1154 + CRC_STRIPPING, &sw->regs->mac_glob_cfg); 1123 1155 + … … 1152 1184 + } 1153 1185 + 1154 + //SET_NETDEV_DEV(dev, &pdev->dev);1155 1186 + port = netdev_priv(dev); 1156 1187 + port->netdev = dev; … … 1160 1191 + port->id = i; 1161 1192 + port->sw = sw; 1162 + port->mtu = sw->mtu;1163 1193 + 1164 1194 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]); 1165 + temp |= (PORT_DISABLE );1195 + temp |= (PORT_DISABLE | PORT_BLOCK_STATE | PORT_LEARN_DIS); 1166 1196 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]); 1167 1197 + … … 1169 1199 + dev->ethtool_ops = &cns3xxx_ethtool_ops; 1170 1200 + dev->tx_queue_len = 1000; 1171 + dev->features = NETIF_F_HW_CSUM; 1172 + 1173 + dev->vlan_features = NETIF_F_HW_CSUM; 1174 + 1175 + switch_port_tab[i] = port; 1201 + dev->features = NETIF_F_IP_CSUM | NETIF_F_SG; 1202 + 1203 + switch_port_tab[port->id] = port; 1176 1204 + memcpy(dev->dev_addr, &plat->hwaddr[i], ETH_ALEN); 1177 1205 + … … 1180 1208 + PHY_INTERFACE_MODE_RGMII); 1181 1209 + if ((err = IS_ERR(port->phydev))) { 1182 + switch_port_tab[ i] = 0;1210 + switch_port_tab[port->id] = 0; 1183 1211 + free_netdev(dev); 1184 1212 + goto free_ports; 1185 1213 + } 1186 1214 + 1187 + port->phydev->irq = PHY_ POLL;1215 + port->phydev->irq = PHY_IGNORE_INTERRUPT; 1188 1216 + 1189 1217 + if ((err = register_netdev(dev))) { 1190 1218 + phy_disconnect(port->phydev); 1191 + switch_port_tab[ i] = 0;1219 + switch_port_tab[port->id] = 0; 1192 1220 + free_netdev(dev); 1193 1221 + goto free_ports; … … 1229 1257 + destroy_rings(sw); 1230 1258 + 1231 + for (i = 2; i >= 0; i--) {1259 + for (i = 3; i >= 0; i--) { 1232 1260 + if (switch_port_tab[i]) { 1233 1261 + struct port *port = switch_port_tab[i];
Note: See TracChangeset
for help on using the changeset viewer.