qla3xxx: Add support for Qlogic 4032 chip.
Qlogic 4032 chip is an incremental change from the 4022. Signed-off-by: Ron Mercer <ron.mercer@qlogic.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
This commit is contained in:
parent
83d98b401c
commit
bd36b0ac5d
2 changed files with 379 additions and 72 deletions
321
drivers/net/qla3xxx.c
Normal file → Executable file
321
drivers/net/qla3xxx.c
Normal file → Executable file
|
@ -22,6 +22,7 @@
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
#include <linux/ip.h>
|
#include <linux/ip.h>
|
||||||
|
#include <linux/in.h>
|
||||||
#include <linux/if_arp.h>
|
#include <linux/if_arp.h>
|
||||||
#include <linux/if_ether.h>
|
#include <linux/if_ether.h>
|
||||||
#include <linux/netdevice.h>
|
#include <linux/netdevice.h>
|
||||||
|
@ -63,6 +64,7 @@ MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
|
||||||
|
|
||||||
static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
|
static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
|
||||||
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
|
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
|
||||||
|
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
|
||||||
/* required last entry */
|
/* required last entry */
|
||||||
{0,}
|
{0,}
|
||||||
};
|
};
|
||||||
|
@ -1475,6 +1477,10 @@ static int ql_mii_setup(struct ql3_adapter *qdev)
|
||||||
2) << 7))
|
2) << 7))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
if (qdev->device_id == QL3032_DEVICE_ID)
|
||||||
|
ql_write_page0_reg(qdev,
|
||||||
|
&port_regs->macMIIMgmtControlReg, 0x0f00000);
|
||||||
|
|
||||||
/* Divide 125MHz clock by 28 to meet PHY timing requirements */
|
/* Divide 125MHz clock by 28 to meet PHY timing requirements */
|
||||||
reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
|
reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
|
||||||
|
|
||||||
|
@ -1706,18 +1712,42 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
|
||||||
struct ob_mac_iocb_rsp *mac_rsp)
|
struct ob_mac_iocb_rsp *mac_rsp)
|
||||||
{
|
{
|
||||||
struct ql_tx_buf_cb *tx_cb;
|
struct ql_tx_buf_cb *tx_cb;
|
||||||
|
int i;
|
||||||
|
|
||||||
tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
|
tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
|
||||||
pci_unmap_single(qdev->pdev,
|
pci_unmap_single(qdev->pdev,
|
||||||
pci_unmap_addr(tx_cb, mapaddr),
|
pci_unmap_addr(&tx_cb->map[0], mapaddr),
|
||||||
pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
|
pci_unmap_len(&tx_cb->map[0], maplen),
|
||||||
dev_kfree_skb_irq(tx_cb->skb);
|
PCI_DMA_TODEVICE);
|
||||||
|
tx_cb->seg_count--;
|
||||||
|
if (tx_cb->seg_count) {
|
||||||
|
for (i = 1; i < tx_cb->seg_count; i++) {
|
||||||
|
pci_unmap_page(qdev->pdev,
|
||||||
|
pci_unmap_addr(&tx_cb->map[i],
|
||||||
|
mapaddr),
|
||||||
|
pci_unmap_len(&tx_cb->map[i], maplen),
|
||||||
|
PCI_DMA_TODEVICE);
|
||||||
|
}
|
||||||
|
}
|
||||||
qdev->stats.tx_packets++;
|
qdev->stats.tx_packets++;
|
||||||
qdev->stats.tx_bytes += tx_cb->skb->len;
|
qdev->stats.tx_bytes += tx_cb->skb->len;
|
||||||
|
dev_kfree_skb_irq(tx_cb->skb);
|
||||||
tx_cb->skb = NULL;
|
tx_cb->skb = NULL;
|
||||||
atomic_inc(&qdev->tx_count);
|
atomic_inc(&qdev->tx_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The difference between 3022 and 3032 for inbound completions:
|
||||||
|
* 3022 uses two buffers per completion. The first buffer contains
|
||||||
|
* (some) header info, the second the remainder of the headers plus
|
||||||
|
* the data. For this chip we reserve some space at the top of the
|
||||||
|
* receive buffer so that the header info in buffer one can be
|
||||||
|
* prepended to the buffer two. Buffer two is the sent up while
|
||||||
|
* buffer one is returned to the hardware to be reused.
|
||||||
|
* 3032 receives all of it's data and headers in one buffer for a
|
||||||
|
* simpler process. 3032 also supports checksum verification as
|
||||||
|
* can be seen in ql_process_macip_rx_intr().
|
||||||
|
*/
|
||||||
static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
|
static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
|
||||||
struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
|
struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
|
||||||
{
|
{
|
||||||
|
@ -1740,14 +1770,17 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
|
||||||
qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
|
qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
|
||||||
qdev->small_buf_release_cnt++;
|
qdev->small_buf_release_cnt++;
|
||||||
|
|
||||||
/* start of first buffer */
|
if (qdev->device_id == QL3022_DEVICE_ID) {
|
||||||
|
/* start of first buffer (3022 only) */
|
||||||
lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
|
lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
|
||||||
lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
|
lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
|
||||||
qdev->lrg_buf_release_cnt++;
|
qdev->lrg_buf_release_cnt++;
|
||||||
if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
|
if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) {
|
||||||
qdev->lrg_buf_index = 0;
|
qdev->lrg_buf_index = 0;
|
||||||
|
}
|
||||||
curr_ial_ptr++; /* 64-bit pointers require two incs. */
|
curr_ial_ptr++; /* 64-bit pointers require two incs. */
|
||||||
curr_ial_ptr++;
|
curr_ial_ptr++;
|
||||||
|
}
|
||||||
|
|
||||||
/* start of second buffer */
|
/* start of second buffer */
|
||||||
lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
|
lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
|
||||||
|
@ -1778,6 +1811,7 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
|
||||||
qdev->ndev->last_rx = jiffies;
|
qdev->ndev->last_rx = jiffies;
|
||||||
lrg_buf_cb2->skb = NULL;
|
lrg_buf_cb2->skb = NULL;
|
||||||
|
|
||||||
|
if (qdev->device_id == QL3022_DEVICE_ID)
|
||||||
ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
|
ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
|
||||||
ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
|
ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
|
||||||
}
|
}
|
||||||
|
@ -1790,7 +1824,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
|
||||||
struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
|
struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
|
||||||
struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
|
struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
|
||||||
u32 *curr_ial_ptr;
|
u32 *curr_ial_ptr;
|
||||||
struct sk_buff *skb1, *skb2;
|
struct sk_buff *skb1 = NULL, *skb2;
|
||||||
struct net_device *ndev = qdev->ndev;
|
struct net_device *ndev = qdev->ndev;
|
||||||
u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
|
u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
|
||||||
u16 size = 0;
|
u16 size = 0;
|
||||||
|
@ -1806,16 +1840,20 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
|
||||||
qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
|
qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
|
||||||
qdev->small_buf_release_cnt++;
|
qdev->small_buf_release_cnt++;
|
||||||
|
|
||||||
/* start of first buffer */
|
if (qdev->device_id == QL3022_DEVICE_ID) {
|
||||||
|
/* start of first buffer on 3022 */
|
||||||
lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
|
lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
|
||||||
lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
|
lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
|
||||||
|
|
||||||
qdev->lrg_buf_release_cnt++;
|
qdev->lrg_buf_release_cnt++;
|
||||||
if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
|
if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
|
||||||
qdev->lrg_buf_index = 0;
|
qdev->lrg_buf_index = 0;
|
||||||
skb1 = lrg_buf_cb1->skb;
|
skb1 = lrg_buf_cb1->skb;
|
||||||
curr_ial_ptr++; /* 64-bit pointers require two incs. */
|
curr_ial_ptr++; /* 64-bit pointers require two incs. */
|
||||||
curr_ial_ptr++;
|
curr_ial_ptr++;
|
||||||
|
size = ETH_HLEN;
|
||||||
|
if (*((u16 *) skb1->data) != 0xFFFF)
|
||||||
|
size += VLAN_ETH_HLEN - ETH_HLEN;
|
||||||
|
}
|
||||||
|
|
||||||
/* start of second buffer */
|
/* start of second buffer */
|
||||||
lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
|
lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
|
||||||
|
@ -1825,18 +1863,6 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
|
||||||
if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
|
if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
|
||||||
qdev->lrg_buf_index = 0;
|
qdev->lrg_buf_index = 0;
|
||||||
|
|
||||||
qdev->stats.rx_packets++;
|
|
||||||
qdev->stats.rx_bytes += length;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copy the ethhdr from first buffer to second. This
|
|
||||||
* is necessary for IP completions.
|
|
||||||
*/
|
|
||||||
if (*((u16 *) skb1->data) != 0xFFFF)
|
|
||||||
size = VLAN_ETH_HLEN;
|
|
||||||
else
|
|
||||||
size = ETH_HLEN;
|
|
||||||
|
|
||||||
skb_put(skb2, length); /* Just the second buffer length here. */
|
skb_put(skb2, length); /* Just the second buffer length here. */
|
||||||
pci_unmap_single(qdev->pdev,
|
pci_unmap_single(qdev->pdev,
|
||||||
pci_unmap_addr(lrg_buf_cb2, mapaddr),
|
pci_unmap_addr(lrg_buf_cb2, mapaddr),
|
||||||
|
@ -1844,15 +1870,39 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
prefetch(skb2->data);
|
prefetch(skb2->data);
|
||||||
|
|
||||||
memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
|
|
||||||
skb2->dev = qdev->ndev;
|
|
||||||
skb2->ip_summed = CHECKSUM_NONE;
|
skb2->ip_summed = CHECKSUM_NONE;
|
||||||
|
if (qdev->device_id == QL3022_DEVICE_ID) {
|
||||||
|
/*
|
||||||
|
* Copy the ethhdr from first buffer to second. This
|
||||||
|
* is necessary for 3022 IP completions.
|
||||||
|
*/
|
||||||
|
memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
|
||||||
|
} else {
|
||||||
|
u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
|
||||||
|
if (checksum &
|
||||||
|
(IB_IP_IOCB_RSP_3032_ICE |
|
||||||
|
IB_IP_IOCB_RSP_3032_CE |
|
||||||
|
IB_IP_IOCB_RSP_3032_NUC)) {
|
||||||
|
printk(KERN_ERR
|
||||||
|
"%s: Bad checksum for this %s packet, checksum = %x.\n",
|
||||||
|
__func__,
|
||||||
|
((checksum &
|
||||||
|
IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
|
||||||
|
"UDP"),checksum);
|
||||||
|
} else if (checksum & IB_IP_IOCB_RSP_3032_TCP) {
|
||||||
|
skb2->ip_summed = CHECKSUM_UNNECESSARY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
skb2->dev = qdev->ndev;
|
||||||
skb2->protocol = eth_type_trans(skb2, qdev->ndev);
|
skb2->protocol = eth_type_trans(skb2, qdev->ndev);
|
||||||
|
|
||||||
netif_receive_skb(skb2);
|
netif_receive_skb(skb2);
|
||||||
|
qdev->stats.rx_packets++;
|
||||||
|
qdev->stats.rx_bytes += length;
|
||||||
ndev->last_rx = jiffies;
|
ndev->last_rx = jiffies;
|
||||||
lrg_buf_cb2->skb = NULL;
|
lrg_buf_cb2->skb = NULL;
|
||||||
|
|
||||||
|
if (qdev->device_id == QL3022_DEVICE_ID)
|
||||||
ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
|
ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
|
||||||
ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
|
ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
|
||||||
}
|
}
|
||||||
|
@ -1880,12 +1930,14 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OPCODE_IB_MAC_IOCB:
|
case OPCODE_IB_MAC_IOCB:
|
||||||
|
case OPCODE_IB_3032_MAC_IOCB:
|
||||||
ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
|
ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
|
||||||
net_rsp);
|
net_rsp);
|
||||||
(*rx_cleaned)++;
|
(*rx_cleaned)++;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OPCODE_IB_IP_IOCB:
|
case OPCODE_IB_IP_IOCB:
|
||||||
|
case OPCODE_IB_3032_IP_IOCB:
|
||||||
ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
|
ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
|
||||||
net_rsp);
|
net_rsp);
|
||||||
(*rx_cleaned)++;
|
(*rx_cleaned)++;
|
||||||
|
@ -2032,13 +2084,96 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
|
||||||
return IRQ_RETVAL(handled);
|
return IRQ_RETVAL(handled);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Get the total number of segments needed for the
|
||||||
|
* given number of fragments. This is necessary because
|
||||||
|
* outbound address lists (OAL) will be used when more than
|
||||||
|
* two frags are given. Each address list has 5 addr/len
|
||||||
|
* pairs. The 5th pair in each AOL is used to point to
|
||||||
|
* the next AOL if more frags are coming.
|
||||||
|
* That is why the frags:segment count ratio is not linear.
|
||||||
|
*/
|
||||||
|
static int ql_get_seg_count(unsigned short frags)
|
||||||
|
{
|
||||||
|
switch(frags) {
|
||||||
|
case 0: return 1; /* just the skb->data seg */
|
||||||
|
case 1: return 2; /* skb->data + 1 frag */
|
||||||
|
case 2: return 3; /* skb->data + 2 frags */
|
||||||
|
case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
|
||||||
|
case 4: return 6;
|
||||||
|
case 5: return 7;
|
||||||
|
case 6: return 8;
|
||||||
|
case 7: return 10;
|
||||||
|
case 8: return 11;
|
||||||
|
case 9: return 12;
|
||||||
|
case 10: return 13;
|
||||||
|
case 11: return 15;
|
||||||
|
case 12: return 16;
|
||||||
|
case 13: return 17;
|
||||||
|
case 14: return 18;
|
||||||
|
case 15: return 20;
|
||||||
|
case 16: return 21;
|
||||||
|
case 17: return 22;
|
||||||
|
case 18: return 23;
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ql_hw_csum_setup(struct sk_buff *skb,
|
||||||
|
struct ob_mac_iocb_req *mac_iocb_ptr)
|
||||||
|
{
|
||||||
|
struct ethhdr *eth;
|
||||||
|
struct iphdr *ip = NULL;
|
||||||
|
u8 offset = ETH_HLEN;
|
||||||
|
|
||||||
|
eth = (struct ethhdr *)(skb->data);
|
||||||
|
|
||||||
|
if (eth->h_proto == __constant_htons(ETH_P_IP)) {
|
||||||
|
ip = (struct iphdr *)&skb->data[ETH_HLEN];
|
||||||
|
} else if (eth->h_proto == htons(ETH_P_8021Q) &&
|
||||||
|
((struct vlan_ethhdr *)skb->data)->
|
||||||
|
h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) {
|
||||||
|
ip = (struct iphdr *)&skb->data[VLAN_ETH_HLEN];
|
||||||
|
offset = VLAN_ETH_HLEN;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ip) {
|
||||||
|
if (ip->protocol == IPPROTO_TCP) {
|
||||||
|
mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC;
|
||||||
|
mac_iocb_ptr->ip_hdr_off = offset;
|
||||||
|
mac_iocb_ptr->ip_hdr_len = ip->ihl;
|
||||||
|
} else if (ip->protocol == IPPROTO_UDP) {
|
||||||
|
mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC;
|
||||||
|
mac_iocb_ptr->ip_hdr_off = offset;
|
||||||
|
mac_iocb_ptr->ip_hdr_len = ip->ihl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The difference between 3022 and 3032 sends:
|
||||||
|
* 3022 only supports a simple single segment transmission.
|
||||||
|
* 3032 supports checksumming and scatter/gather lists (fragments).
|
||||||
|
* The 3032 supports sglists by using the 3 addr/len pairs (ALP)
|
||||||
|
* in the IOCB plus a chain of outbound address lists (OAL) that
|
||||||
|
* each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
|
||||||
|
* will used to point to an OAL when more ALP entries are required.
|
||||||
|
* The IOCB is always the top of the chain followed by one or more
|
||||||
|
* OALs (when necessary).
|
||||||
|
*/
|
||||||
static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
|
static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
|
||||||
{
|
{
|
||||||
struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
|
struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
|
||||||
struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
|
struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
|
||||||
struct ql_tx_buf_cb *tx_cb;
|
struct ql_tx_buf_cb *tx_cb;
|
||||||
|
u32 tot_len = skb->len;
|
||||||
|
struct oal *oal;
|
||||||
|
struct oal_entry *oal_entry;
|
||||||
|
int len;
|
||||||
struct ob_mac_iocb_req *mac_iocb_ptr;
|
struct ob_mac_iocb_req *mac_iocb_ptr;
|
||||||
u64 map;
|
u64 map;
|
||||||
|
int seg_cnt, seg = 0;
|
||||||
|
int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
|
||||||
|
|
||||||
if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
|
if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
|
||||||
if (!netif_queue_stopped(ndev))
|
if (!netif_queue_stopped(ndev))
|
||||||
|
@ -2046,21 +2181,79 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
|
||||||
return NETDEV_TX_BUSY;
|
return NETDEV_TX_BUSY;
|
||||||
}
|
}
|
||||||
tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
|
tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
|
||||||
|
seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
|
||||||
|
if(seg_cnt == -1) {
|
||||||
|
printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
|
||||||
|
return NETDEV_TX_OK;
|
||||||
|
|
||||||
|
}
|
||||||
mac_iocb_ptr = tx_cb->queue_entry;
|
mac_iocb_ptr = tx_cb->queue_entry;
|
||||||
memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
|
memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
|
||||||
mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
|
mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
|
||||||
mac_iocb_ptr->flags |= qdev->mb_bit_mask;
|
mac_iocb_ptr->flags |= qdev->mb_bit_mask;
|
||||||
mac_iocb_ptr->transaction_id = qdev->req_producer_index;
|
mac_iocb_ptr->transaction_id = qdev->req_producer_index;
|
||||||
mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len);
|
mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
|
||||||
tx_cb->skb = skb;
|
tx_cb->skb = skb;
|
||||||
map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
|
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||||
mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map));
|
ql_hw_csum_setup(skb, mac_iocb_ptr);
|
||||||
mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map));
|
len = skb_headlen(skb);
|
||||||
mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E);
|
map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
||||||
pci_unmap_addr_set(tx_cb, mapaddr, map);
|
oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
|
||||||
pci_unmap_len_set(tx_cb, maplen, skb->len);
|
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
|
||||||
atomic_dec(&qdev->tx_count);
|
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
|
||||||
|
oal_entry->len = cpu_to_le32(len);
|
||||||
|
pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
|
||||||
|
pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
|
||||||
|
seg++;
|
||||||
|
|
||||||
|
if (!skb_shinfo(skb)->nr_frags) {
|
||||||
|
/* Terminate the last segment. */
|
||||||
|
oal_entry->len =
|
||||||
|
cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
|
||||||
|
} else {
|
||||||
|
int i;
|
||||||
|
oal = tx_cb->oal;
|
||||||
|
for (i=0; i<frag_cnt; i++,seg++) {
|
||||||
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||||
|
oal_entry++;
|
||||||
|
if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
|
||||||
|
(seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
|
||||||
|
(seg == 12 && seg_cnt > 13) || /* but necessary. */
|
||||||
|
(seg == 17 && seg_cnt > 18)) {
|
||||||
|
/* Continuation entry points to outbound address list. */
|
||||||
|
map = pci_map_single(qdev->pdev, oal,
|
||||||
|
sizeof(struct oal),
|
||||||
|
PCI_DMA_TODEVICE);
|
||||||
|
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
|
||||||
|
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
|
||||||
|
oal_entry->len =
|
||||||
|
cpu_to_le32(sizeof(struct oal) |
|
||||||
|
OAL_CONT_ENTRY);
|
||||||
|
pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
|
||||||
|
map);
|
||||||
|
pci_unmap_len_set(&tx_cb->map[seg], maplen,
|
||||||
|
len);
|
||||||
|
oal_entry = (struct oal_entry *)oal;
|
||||||
|
oal++;
|
||||||
|
seg++;
|
||||||
|
}
|
||||||
|
|
||||||
|
map =
|
||||||
|
pci_map_page(qdev->pdev, frag->page,
|
||||||
|
frag->page_offset, frag->size,
|
||||||
|
PCI_DMA_TODEVICE);
|
||||||
|
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
|
||||||
|
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
|
||||||
|
oal_entry->len = cpu_to_le32(frag->size);
|
||||||
|
pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
|
||||||
|
pci_unmap_len_set(&tx_cb->map[seg], maplen,
|
||||||
|
frag->size);
|
||||||
|
}
|
||||||
|
/* Terminate the last segment. */
|
||||||
|
oal_entry->len =
|
||||||
|
cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
|
||||||
|
}
|
||||||
|
wmb();
|
||||||
qdev->req_producer_index++;
|
qdev->req_producer_index++;
|
||||||
if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
|
if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
|
||||||
qdev->req_producer_index = 0;
|
qdev->req_producer_index = 0;
|
||||||
|
@ -2074,8 +2267,10 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
|
||||||
printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
|
printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
|
||||||
ndev->name, qdev->req_producer_index, skb->len);
|
ndev->name, qdev->req_producer_index, skb->len);
|
||||||
|
|
||||||
|
atomic_dec(&qdev->tx_count);
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
|
static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
|
||||||
{
|
{
|
||||||
qdev->req_q_size =
|
qdev->req_q_size =
|
||||||
|
@ -2359,7 +2554,22 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ql_create_send_free_list(struct ql3_adapter *qdev)
|
static void ql_free_send_free_list(struct ql3_adapter *qdev)
|
||||||
|
{
|
||||||
|
struct ql_tx_buf_cb *tx_cb;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
tx_cb = &qdev->tx_buf[0];
|
||||||
|
for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
|
||||||
|
if (tx_cb->oal) {
|
||||||
|
kfree(tx_cb->oal);
|
||||||
|
tx_cb->oal = NULL;
|
||||||
|
}
|
||||||
|
tx_cb++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ql_create_send_free_list(struct ql3_adapter *qdev)
|
||||||
{
|
{
|
||||||
struct ql_tx_buf_cb *tx_cb;
|
struct ql_tx_buf_cb *tx_cb;
|
||||||
int i;
|
int i;
|
||||||
|
@ -2368,11 +2578,16 @@ static void ql_create_send_free_list(struct ql3_adapter *qdev)
|
||||||
|
|
||||||
/* Create free list of transmit buffers */
|
/* Create free list of transmit buffers */
|
||||||
for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
|
for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
|
||||||
|
|
||||||
tx_cb = &qdev->tx_buf[i];
|
tx_cb = &qdev->tx_buf[i];
|
||||||
tx_cb->skb = NULL;
|
tx_cb->skb = NULL;
|
||||||
tx_cb->queue_entry = req_q_curr;
|
tx_cb->queue_entry = req_q_curr;
|
||||||
req_q_curr++;
|
req_q_curr++;
|
||||||
|
tx_cb->oal = kmalloc(512, GFP_KERNEL);
|
||||||
|
if (tx_cb->oal == NULL)
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
|
static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
|
||||||
|
@ -2447,12 +2662,14 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
|
||||||
|
|
||||||
/* Initialize the large buffer queue. */
|
/* Initialize the large buffer queue. */
|
||||||
ql_init_large_buffers(qdev);
|
ql_init_large_buffers(qdev);
|
||||||
ql_create_send_free_list(qdev);
|
if (ql_create_send_free_list(qdev))
|
||||||
|
goto err_free_list;
|
||||||
|
|
||||||
qdev->rsp_current = qdev->rsp_q_virt_addr;
|
qdev->rsp_current = qdev->rsp_q_virt_addr;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
err_free_list:
|
||||||
|
ql_free_send_free_list(qdev);
|
||||||
err_small_buffers:
|
err_small_buffers:
|
||||||
ql_free_buffer_queues(qdev);
|
ql_free_buffer_queues(qdev);
|
||||||
err_buffer_queues:
|
err_buffer_queues:
|
||||||
|
@ -2468,6 +2685,7 @@ err_req_rsp:
|
||||||
|
|
||||||
static void ql_free_mem_resources(struct ql3_adapter *qdev)
|
static void ql_free_mem_resources(struct ql3_adapter *qdev)
|
||||||
{
|
{
|
||||||
|
ql_free_send_free_list(qdev);
|
||||||
ql_free_large_buffers(qdev);
|
ql_free_large_buffers(qdev);
|
||||||
ql_free_small_buffers(qdev);
|
ql_free_small_buffers(qdev);
|
||||||
ql_free_buffer_queues(qdev);
|
ql_free_buffer_queues(qdev);
|
||||||
|
@ -2766,11 +2984,20 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Enable Ethernet Function */
|
/* Enable Ethernet Function */
|
||||||
|
if (qdev->device_id == QL3032_DEVICE_ID) {
|
||||||
|
value =
|
||||||
|
(QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
|
||||||
|
QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4);
|
||||||
|
ql_write_page0_reg(qdev, &port_regs->functionControl,
|
||||||
|
((value << 16) | value));
|
||||||
|
} else {
|
||||||
value =
|
value =
|
||||||
(PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
|
(PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
|
||||||
PORT_CONTROL_HH);
|
PORT_CONTROL_HH);
|
||||||
ql_write_page0_reg(qdev, &port_regs->portControl,
|
ql_write_page0_reg(qdev, &port_regs->portControl,
|
||||||
((value << 16) | value));
|
((value << 16) | value));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return status;
|
return status;
|
||||||
|
@ -2917,8 +3144,10 @@ static void ql_display_dev_info(struct net_device *ndev)
|
||||||
struct pci_dev *pdev = qdev->pdev;
|
struct pci_dev *pdev = qdev->pdev;
|
||||||
|
|
||||||
printk(KERN_INFO PFX
|
printk(KERN_INFO PFX
|
||||||
"\n%s Adapter %d RevisionID %d found on PCI slot %d.\n",
|
"\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
|
||||||
DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot);
|
DRV_NAME, qdev->index, qdev->chip_rev_id,
|
||||||
|
(qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
|
||||||
|
qdev->pci_slot);
|
||||||
printk(KERN_INFO PFX
|
printk(KERN_INFO PFX
|
||||||
"%s Interface.\n",
|
"%s Interface.\n",
|
||||||
test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
|
test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
|
||||||
|
@ -3212,15 +3441,22 @@ static void ql_reset_work(struct work_struct *work)
|
||||||
* Loop through the active list and return the skb.
|
* Loop through the active list and return the skb.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
|
for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
|
||||||
|
int j;
|
||||||
tx_cb = &qdev->tx_buf[i];
|
tx_cb = &qdev->tx_buf[i];
|
||||||
if (tx_cb->skb) {
|
if (tx_cb->skb) {
|
||||||
|
|
||||||
printk(KERN_DEBUG PFX
|
printk(KERN_DEBUG PFX
|
||||||
"%s: Freeing lost SKB.\n",
|
"%s: Freeing lost SKB.\n",
|
||||||
qdev->ndev->name);
|
qdev->ndev->name);
|
||||||
pci_unmap_single(qdev->pdev,
|
pci_unmap_single(qdev->pdev,
|
||||||
pci_unmap_addr(tx_cb, mapaddr),
|
pci_unmap_addr(&tx_cb->map[0], mapaddr),
|
||||||
pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
|
pci_unmap_len(&tx_cb->map[0], maplen),
|
||||||
|
PCI_DMA_TODEVICE);
|
||||||
|
for(j=1;j<tx_cb->seg_count;j++) {
|
||||||
|
pci_unmap_page(qdev->pdev,
|
||||||
|
pci_unmap_addr(&tx_cb->map[j],mapaddr),
|
||||||
|
pci_unmap_len(&tx_cb->map[j],maplen),
|
||||||
|
PCI_DMA_TODEVICE);
|
||||||
|
}
|
||||||
dev_kfree_skb(tx_cb->skb);
|
dev_kfree_skb(tx_cb->skb);
|
||||||
tx_cb->skb = NULL;
|
tx_cb->skb = NULL;
|
||||||
}
|
}
|
||||||
|
@ -3379,21 +3615,24 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
|
||||||
SET_MODULE_OWNER(ndev);
|
SET_MODULE_OWNER(ndev);
|
||||||
SET_NETDEV_DEV(ndev, &pdev->dev);
|
SET_NETDEV_DEV(ndev, &pdev->dev);
|
||||||
|
|
||||||
if (pci_using_dac)
|
|
||||||
ndev->features |= NETIF_F_HIGHDMA;
|
|
||||||
|
|
||||||
pci_set_drvdata(pdev, ndev);
|
pci_set_drvdata(pdev, ndev);
|
||||||
|
|
||||||
qdev = netdev_priv(ndev);
|
qdev = netdev_priv(ndev);
|
||||||
qdev->index = cards_found;
|
qdev->index = cards_found;
|
||||||
qdev->ndev = ndev;
|
qdev->ndev = ndev;
|
||||||
qdev->pdev = pdev;
|
qdev->pdev = pdev;
|
||||||
|
qdev->device_id = pci_entry->device;
|
||||||
qdev->port_link_state = LS_DOWN;
|
qdev->port_link_state = LS_DOWN;
|
||||||
if (msi)
|
if (msi)
|
||||||
qdev->msi = 1;
|
qdev->msi = 1;
|
||||||
|
|
||||||
qdev->msg_enable = netif_msg_init(debug, default_msg);
|
qdev->msg_enable = netif_msg_init(debug, default_msg);
|
||||||
|
|
||||||
|
if (pci_using_dac)
|
||||||
|
ndev->features |= NETIF_F_HIGHDMA;
|
||||||
|
if (qdev->device_id == QL3032_DEVICE_ID)
|
||||||
|
ndev->features |= (NETIF_F_HW_CSUM | NETIF_F_SG);
|
||||||
|
|
||||||
qdev->mem_map_registers =
|
qdev->mem_map_registers =
|
||||||
ioremap_nocache(pci_resource_start(pdev, 1),
|
ioremap_nocache(pci_resource_start(pdev, 1),
|
||||||
pci_resource_len(qdev->pdev, 1));
|
pci_resource_len(qdev->pdev, 1));
|
||||||
|
|
88
drivers/net/qla3xxx.h
Normal file → Executable file
88
drivers/net/qla3xxx.h
Normal file → Executable file
|
@ -21,7 +21,9 @@
|
||||||
|
|
||||||
#define OPCODE_UPDATE_NCB_IOCB 0xF0
|
#define OPCODE_UPDATE_NCB_IOCB 0xF0
|
||||||
#define OPCODE_IB_MAC_IOCB 0xF9
|
#define OPCODE_IB_MAC_IOCB 0xF9
|
||||||
|
#define OPCODE_IB_3032_MAC_IOCB 0x09
|
||||||
#define OPCODE_IB_IP_IOCB 0xFA
|
#define OPCODE_IB_IP_IOCB 0xFA
|
||||||
|
#define OPCODE_IB_3032_IP_IOCB 0x0A
|
||||||
#define OPCODE_IB_TCP_IOCB 0xFB
|
#define OPCODE_IB_TCP_IOCB 0xFB
|
||||||
#define OPCODE_DUMP_PROTO_IOCB 0xFE
|
#define OPCODE_DUMP_PROTO_IOCB 0xFE
|
||||||
#define OPCODE_BUFFER_ALERT_IOCB 0xFB
|
#define OPCODE_BUFFER_ALERT_IOCB 0xFB
|
||||||
|
@ -37,18 +39,23 @@
|
||||||
struct ob_mac_iocb_req {
|
struct ob_mac_iocb_req {
|
||||||
u8 opcode;
|
u8 opcode;
|
||||||
u8 flags;
|
u8 flags;
|
||||||
#define OB_MAC_IOCB_REQ_MA 0xC0
|
#define OB_MAC_IOCB_REQ_MA 0xe0
|
||||||
#define OB_MAC_IOCB_REQ_F 0x20
|
#define OB_MAC_IOCB_REQ_F 0x10
|
||||||
#define OB_MAC_IOCB_REQ_X 0x10
|
#define OB_MAC_IOCB_REQ_X 0x08
|
||||||
#define OB_MAC_IOCB_REQ_D 0x02
|
#define OB_MAC_IOCB_REQ_D 0x02
|
||||||
#define OB_MAC_IOCB_REQ_I 0x01
|
#define OB_MAC_IOCB_REQ_I 0x01
|
||||||
__le16 reserved0;
|
u8 flags1;
|
||||||
|
#define OB_3032MAC_IOCB_REQ_IC 0x04
|
||||||
|
#define OB_3032MAC_IOCB_REQ_TC 0x02
|
||||||
|
#define OB_3032MAC_IOCB_REQ_UC 0x01
|
||||||
|
u8 reserved0;
|
||||||
|
|
||||||
__le32 transaction_id;
|
__le32 transaction_id;
|
||||||
__le16 data_len;
|
__le16 data_len;
|
||||||
__le16 reserved1;
|
u8 ip_hdr_off;
|
||||||
|
u8 ip_hdr_len;
|
||||||
|
__le32 reserved1;
|
||||||
__le32 reserved2;
|
__le32 reserved2;
|
||||||
__le32 reserved3;
|
|
||||||
__le32 buf_addr0_low;
|
__le32 buf_addr0_low;
|
||||||
__le32 buf_addr0_high;
|
__le32 buf_addr0_high;
|
||||||
__le32 buf_0_len;
|
__le32 buf_0_len;
|
||||||
|
@ -58,8 +65,8 @@ struct ob_mac_iocb_req {
|
||||||
__le32 buf_addr2_low;
|
__le32 buf_addr2_low;
|
||||||
__le32 buf_addr2_high;
|
__le32 buf_addr2_high;
|
||||||
__le32 buf_2_len;
|
__le32 buf_2_len;
|
||||||
|
__le32 reserved3;
|
||||||
__le32 reserved4;
|
__le32 reserved4;
|
||||||
__le32 reserved5;
|
|
||||||
};
|
};
|
||||||
/*
|
/*
|
||||||
* The following constants define control bits for buffer
|
* The following constants define control bits for buffer
|
||||||
|
@ -74,6 +81,7 @@ struct ob_mac_iocb_rsp {
|
||||||
u8 opcode;
|
u8 opcode;
|
||||||
u8 flags;
|
u8 flags;
|
||||||
#define OB_MAC_IOCB_RSP_P 0x08
|
#define OB_MAC_IOCB_RSP_P 0x08
|
||||||
|
#define OB_MAC_IOCB_RSP_L 0x04
|
||||||
#define OB_MAC_IOCB_RSP_S 0x02
|
#define OB_MAC_IOCB_RSP_S 0x02
|
||||||
#define OB_MAC_IOCB_RSP_I 0x01
|
#define OB_MAC_IOCB_RSP_I 0x01
|
||||||
|
|
||||||
|
@ -85,6 +93,7 @@ struct ob_mac_iocb_rsp {
|
||||||
|
|
||||||
struct ib_mac_iocb_rsp {
|
struct ib_mac_iocb_rsp {
|
||||||
u8 opcode;
|
u8 opcode;
|
||||||
|
#define IB_MAC_IOCB_RSP_V 0x80
|
||||||
u8 flags;
|
u8 flags;
|
||||||
#define IB_MAC_IOCB_RSP_S 0x80
|
#define IB_MAC_IOCB_RSP_S 0x80
|
||||||
#define IB_MAC_IOCB_RSP_H1 0x40
|
#define IB_MAC_IOCB_RSP_H1 0x40
|
||||||
|
@ -138,6 +147,7 @@ struct ob_ip_iocb_req {
|
||||||
struct ob_ip_iocb_rsp {
|
struct ob_ip_iocb_rsp {
|
||||||
u8 opcode;
|
u8 opcode;
|
||||||
u8 flags;
|
u8 flags;
|
||||||
|
#define OB_MAC_IOCB_RSP_H 0x10
|
||||||
#define OB_MAC_IOCB_RSP_E 0x08
|
#define OB_MAC_IOCB_RSP_E 0x08
|
||||||
#define OB_MAC_IOCB_RSP_L 0x04
|
#define OB_MAC_IOCB_RSP_L 0x04
|
||||||
#define OB_MAC_IOCB_RSP_S 0x02
|
#define OB_MAC_IOCB_RSP_S 0x02
|
||||||
|
@ -220,6 +230,10 @@ struct ob_tcp_iocb_rsp {
|
||||||
|
|
||||||
struct ib_ip_iocb_rsp {
|
struct ib_ip_iocb_rsp {
|
||||||
u8 opcode;
|
u8 opcode;
|
||||||
|
#define IB_IP_IOCB_RSP_3032_V 0x80
|
||||||
|
#define IB_IP_IOCB_RSP_3032_O 0x40
|
||||||
|
#define IB_IP_IOCB_RSP_3032_I 0x20
|
||||||
|
#define IB_IP_IOCB_RSP_3032_R 0x10
|
||||||
u8 flags;
|
u8 flags;
|
||||||
#define IB_IP_IOCB_RSP_S 0x80
|
#define IB_IP_IOCB_RSP_S 0x80
|
||||||
#define IB_IP_IOCB_RSP_H1 0x40
|
#define IB_IP_IOCB_RSP_H1 0x40
|
||||||
|
@ -230,6 +244,12 @@ struct ib_ip_iocb_rsp {
|
||||||
|
|
||||||
__le16 length;
|
__le16 length;
|
||||||
__le16 checksum;
|
__le16 checksum;
|
||||||
|
#define IB_IP_IOCB_RSP_3032_ICE 0x01
|
||||||
|
#define IB_IP_IOCB_RSP_3032_CE 0x02
|
||||||
|
#define IB_IP_IOCB_RSP_3032_NUC 0x04
|
||||||
|
#define IB_IP_IOCB_RSP_3032_UDP 0x08
|
||||||
|
#define IB_IP_IOCB_RSP_3032_TCP 0x10
|
||||||
|
#define IB_IP_IOCB_RSP_3032_IPE 0x20
|
||||||
__le16 reserved;
|
__le16 reserved;
|
||||||
#define IB_IP_IOCB_RSP_R 0x01
|
#define IB_IP_IOCB_RSP_R 0x01
|
||||||
__le32 ial_low;
|
__le32 ial_low;
|
||||||
|
@ -524,6 +544,21 @@ enum {
|
||||||
IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
|
IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
|
||||||
IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
|
IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
|
||||||
IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
|
IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
|
||||||
|
IP_ADDR_INDEX_REG_6 = 0x0008,
|
||||||
|
IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030,
|
||||||
|
IP_ADDR_INDEX_REG_E = 0x0040,
|
||||||
|
};
|
||||||
|
enum {
|
||||||
|
QL3032_PORT_CONTROL_DS = 0x0001,
|
||||||
|
QL3032_PORT_CONTROL_HH = 0x0002,
|
||||||
|
QL3032_PORT_CONTROL_EIv6 = 0x0004,
|
||||||
|
QL3032_PORT_CONTROL_EIv4 = 0x0008,
|
||||||
|
QL3032_PORT_CONTROL_ET = 0x0010,
|
||||||
|
QL3032_PORT_CONTROL_EF = 0x0020,
|
||||||
|
QL3032_PORT_CONTROL_DRM = 0x0040,
|
||||||
|
QL3032_PORT_CONTROL_RLB = 0x0080,
|
||||||
|
QL3032_PORT_CONTROL_RCB = 0x0100,
|
||||||
|
QL3032_PORT_CONTROL_KIE = 0x0200,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -657,7 +692,8 @@ struct ql3xxx_port_registers {
|
||||||
u32 internalRamWDataReg;
|
u32 internalRamWDataReg;
|
||||||
u32 reclaimedBufferAddrRegLow;
|
u32 reclaimedBufferAddrRegLow;
|
||||||
u32 reclaimedBufferAddrRegHigh;
|
u32 reclaimedBufferAddrRegHigh;
|
||||||
u32 reserved[2];
|
u32 tcpConfiguration;
|
||||||
|
u32 functionControl;
|
||||||
u32 fpgaRevID;
|
u32 fpgaRevID;
|
||||||
u32 localRamAddr;
|
u32 localRamAddr;
|
||||||
u32 localRamDataAutoIncr;
|
u32 localRamDataAutoIncr;
|
||||||
|
@ -963,6 +999,7 @@ struct eeprom_data {
|
||||||
|
|
||||||
#define QL3XXX_VENDOR_ID 0x1077
|
#define QL3XXX_VENDOR_ID 0x1077
|
||||||
#define QL3022_DEVICE_ID 0x3022
|
#define QL3022_DEVICE_ID 0x3022
|
||||||
|
#define QL3032_DEVICE_ID 0x3032
|
||||||
|
|
||||||
/* MTU & Frame Size stuff */
|
/* MTU & Frame Size stuff */
|
||||||
#define NORMAL_MTU_SIZE ETH_DATA_LEN
|
#define NORMAL_MTU_SIZE ETH_DATA_LEN
|
||||||
|
@ -1038,11 +1075,41 @@ struct ql_rcv_buf_cb {
|
||||||
int index;
|
int index;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Original IOCB has 3 sg entries:
|
||||||
|
* first points to skb-data area
|
||||||
|
* second points to first frag
|
||||||
|
* third points to next oal.
|
||||||
|
* OAL has 5 entries:
|
||||||
|
* 1 thru 4 point to frags
|
||||||
|
* fifth points to next oal.
|
||||||
|
*/
|
||||||
|
#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1)
|
||||||
|
|
||||||
|
struct oal_entry {
|
||||||
|
u32 dma_lo;
|
||||||
|
u32 dma_hi;
|
||||||
|
u32 len;
|
||||||
|
#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */
|
||||||
|
#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */
|
||||||
|
u32 reserved;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct oal {
|
||||||
|
struct oal_entry oal_entry[5];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct map_list {
|
||||||
|
DECLARE_PCI_UNMAP_ADDR(mapaddr);
|
||||||
|
DECLARE_PCI_UNMAP_LEN(maplen);
|
||||||
|
};
|
||||||
|
|
||||||
struct ql_tx_buf_cb {
|
struct ql_tx_buf_cb {
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct ob_mac_iocb_req *queue_entry ;
|
struct ob_mac_iocb_req *queue_entry ;
|
||||||
DECLARE_PCI_UNMAP_ADDR(mapaddr);
|
int seg_count;
|
||||||
DECLARE_PCI_UNMAP_LEN(maplen);
|
struct oal *oal;
|
||||||
|
struct map_list map[MAX_SKB_FRAGS+1];
|
||||||
};
|
};
|
||||||
|
|
||||||
/* definitions for type field */
|
/* definitions for type field */
|
||||||
|
@ -1189,6 +1256,7 @@ struct ql3_adapter {
|
||||||
struct delayed_work reset_work;
|
struct delayed_work reset_work;
|
||||||
struct delayed_work tx_timeout_work;
|
struct delayed_work tx_timeout_work;
|
||||||
u32 max_frame_size;
|
u32 max_frame_size;
|
||||||
|
u32 device_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _QLA3XXX_H_ */
|
#endif /* _QLA3XXX_H_ */
|
||||||
|
|
Loading…
Add table
Reference in a new issue