mirror of https://gitee.com/openkylin/linux.git
Merge branch 's390-next'
Julian Wiedmann says: ==================== s390/qeth: updates 2020-03-25 please apply the following patch series for qeth to netdev's net-next tree. Same series as yesterday, with one minor update to patch 1 as per your review. This adds 1) NAPI poll support for the async-Completion Queue (with one qdio layer patch acked by Heiko), 2) ethtool support for per-queue TX IRQ coalescing, 3) various cleanups. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
1455ea1d8a
|
@ -338,7 +338,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
|
|||
* @no_output_qs: number of output queues
|
||||
* @input_handler: handler to be called for input queues
|
||||
* @output_handler: handler to be called for output queues
|
||||
* @queue_start_poll_array: polling handlers (one per input queue or NULL)
|
||||
* @irq_poll: Data IRQ polling handler (NULL when not supported)
|
||||
* @scan_threshold: # of in-use buffers that triggers scan on output queue
|
||||
* @int_parm: interruption parameter
|
||||
* @input_sbal_addr_array: address of no_input_qs * 128 pointers
|
||||
|
@ -359,8 +359,7 @@ struct qdio_initialize {
|
|||
unsigned int no_output_qs;
|
||||
qdio_handler_t *input_handler;
|
||||
qdio_handler_t *output_handler;
|
||||
void (**queue_start_poll_array) (struct ccw_device *, int,
|
||||
unsigned long);
|
||||
void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
|
||||
unsigned int scan_threshold;
|
||||
unsigned long int_parm;
|
||||
struct qdio_buffer **input_sbal_addr_array;
|
||||
|
@ -415,8 +414,8 @@ extern int qdio_activate(struct ccw_device *);
|
|||
extern void qdio_release_aob(struct qaob *);
|
||||
extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
|
||||
unsigned int);
|
||||
extern int qdio_start_irq(struct ccw_device *, int);
|
||||
extern int qdio_stop_irq(struct ccw_device *, int);
|
||||
extern int qdio_start_irq(struct ccw_device *cdev);
|
||||
extern int qdio_stop_irq(struct ccw_device *cdev);
|
||||
extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
|
||||
extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr,
|
||||
bool is_input, unsigned int *bufnr,
|
||||
|
|
|
@ -177,8 +177,8 @@ struct qdio_queue_perf_stat {
|
|||
unsigned int nr_sbal_total;
|
||||
};
|
||||
|
||||
enum qdio_queue_irq_states {
|
||||
QDIO_QUEUE_IRQS_DISABLED,
|
||||
enum qdio_irq_poll_states {
|
||||
QDIO_IRQ_DISABLED,
|
||||
};
|
||||
|
||||
struct qdio_input_q {
|
||||
|
@ -188,10 +188,6 @@ struct qdio_input_q {
|
|||
int ack_count;
|
||||
/* last time of noticing incoming data */
|
||||
u64 timestamp;
|
||||
/* upper-layer polling flag */
|
||||
unsigned long queue_irq_state;
|
||||
/* callback to start upper-layer polling */
|
||||
void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
|
||||
};
|
||||
|
||||
struct qdio_output_q {
|
||||
|
@ -299,6 +295,9 @@ struct qdio_irq {
|
|||
struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
|
||||
struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
|
||||
|
||||
void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
|
||||
unsigned long poll_state;
|
||||
|
||||
debug_info_t *debug_area;
|
||||
struct mutex setup_mutex;
|
||||
struct qdio_dev_perf_stat perf_stat;
|
||||
|
|
|
@ -128,8 +128,8 @@ static int qstat_show(struct seq_file *m, void *v)
|
|||
q->u.in.ack_start, q->u.in.ack_count);
|
||||
seq_printf(m, "DSCI: %x IRQs disabled: %u\n",
|
||||
*(u8 *)q->irq_ptr->dsci,
|
||||
test_bit(QDIO_QUEUE_IRQS_DISABLED,
|
||||
&q->u.in.queue_irq_state));
|
||||
test_bit(QDIO_IRQ_DISABLED,
|
||||
&q->irq_ptr->poll_state));
|
||||
}
|
||||
seq_printf(m, "SBAL states:\n");
|
||||
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
|
||||
|
|
|
@ -950,19 +950,14 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
|
|||
if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
|
||||
return;
|
||||
|
||||
for_each_input_queue(irq_ptr, q, i) {
|
||||
if (q->u.in.queue_start_poll) {
|
||||
/* skip if polling is enabled or already in work */
|
||||
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
|
||||
&q->u.in.queue_irq_state)) {
|
||||
QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
|
||||
continue;
|
||||
}
|
||||
q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
|
||||
q->irq_ptr->int_parm);
|
||||
} else {
|
||||
if (irq_ptr->irq_poll) {
|
||||
if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
|
||||
irq_ptr->irq_poll(irq_ptr->cdev, irq_ptr->int_parm);
|
||||
else
|
||||
QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
|
||||
} else {
|
||||
for_each_input_queue(irq_ptr, q, i)
|
||||
tasklet_schedule(&q->tasklet);
|
||||
}
|
||||
}
|
||||
|
||||
if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
|
||||
|
@ -1610,24 +1605,26 @@ EXPORT_SYMBOL_GPL(do_QDIO);
|
|||
/**
|
||||
* qdio_start_irq - process input buffers
|
||||
* @cdev: associated ccw_device for the qdio subchannel
|
||||
* @nr: input queue number
|
||||
*
|
||||
* Return codes
|
||||
* 0 - success
|
||||
* 1 - irqs not started since new data is available
|
||||
*/
|
||||
int qdio_start_irq(struct ccw_device *cdev, int nr)
|
||||
int qdio_start_irq(struct ccw_device *cdev)
|
||||
{
|
||||
struct qdio_q *q;
|
||||
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
||||
unsigned int i;
|
||||
|
||||
if (!irq_ptr)
|
||||
return -ENODEV;
|
||||
q = irq_ptr->input_qs[nr];
|
||||
|
||||
clear_nonshared_ind(irq_ptr);
|
||||
qdio_stop_polling(q);
|
||||
clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
|
||||
|
||||
for_each_input_queue(irq_ptr, q, i)
|
||||
qdio_stop_polling(q);
|
||||
|
||||
clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
|
||||
|
||||
/*
|
||||
* We need to check again to not lose initiative after
|
||||
|
@ -1635,13 +1632,16 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
|
|||
*/
|
||||
if (test_nonshared_ind(irq_ptr))
|
||||
goto rescan;
|
||||
if (!qdio_inbound_q_done(q, q->first_to_check))
|
||||
goto rescan;
|
||||
|
||||
for_each_input_queue(irq_ptr, q, i) {
|
||||
if (!qdio_inbound_q_done(q, q->first_to_check))
|
||||
goto rescan;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
rescan:
|
||||
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
|
||||
&q->u.in.queue_irq_state))
|
||||
if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
|
@ -1729,23 +1729,19 @@ EXPORT_SYMBOL(qdio_get_next_buffers);
|
|||
/**
|
||||
* qdio_stop_irq - disable interrupt processing for the device
|
||||
* @cdev: associated ccw_device for the qdio subchannel
|
||||
* @nr: input queue number
|
||||
*
|
||||
* Return codes
|
||||
* 0 - interrupts were already disabled
|
||||
* 1 - interrupts successfully disabled
|
||||
*/
|
||||
int qdio_stop_irq(struct ccw_device *cdev, int nr)
|
||||
int qdio_stop_irq(struct ccw_device *cdev)
|
||||
{
|
||||
struct qdio_q *q;
|
||||
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
||||
|
||||
if (!irq_ptr)
|
||||
return -ENODEV;
|
||||
q = irq_ptr->input_qs[nr];
|
||||
|
||||
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
|
||||
&q->u.in.queue_irq_state))
|
||||
if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
|
|
|
@ -224,15 +224,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
|
|||
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
|
||||
|
||||
q->is_input_q = 1;
|
||||
if (qdio_init->queue_start_poll_array &&
|
||||
qdio_init->queue_start_poll_array[i]) {
|
||||
q->u.in.queue_start_poll =
|
||||
qdio_init->queue_start_poll_array[i];
|
||||
set_bit(QDIO_QUEUE_IRQS_DISABLED,
|
||||
&q->u.in.queue_irq_state);
|
||||
} else {
|
||||
q->u.in.queue_start_poll = NULL;
|
||||
}
|
||||
|
||||
setup_storage_lists(q, irq_ptr, input_sbal_array, i);
|
||||
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
|
||||
|
@ -483,6 +474,13 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
|
|||
ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
|
||||
setup_queues(irq_ptr, init_data);
|
||||
|
||||
if (init_data->irq_poll) {
|
||||
irq_ptr->irq_poll = init_data->irq_poll;
|
||||
set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
|
||||
} else {
|
||||
irq_ptr->irq_poll = NULL;
|
||||
}
|
||||
|
||||
setup_qib(irq_ptr, init_data);
|
||||
qdio_setup_thinint(irq_ptr);
|
||||
set_impl_params(irq_ptr, init_data->qib_param_field_format,
|
||||
|
|
|
@ -135,28 +135,24 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
|
|||
has_multiple_inq_on_dsci(irq))
|
||||
xchg(irq->dsci, 0);
|
||||
|
||||
if (irq->irq_poll) {
|
||||
if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq->poll_state))
|
||||
irq->irq_poll(irq->cdev, irq->int_parm);
|
||||
else
|
||||
QDIO_PERF_STAT_INC(irq, int_discarded);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_input_queue(irq, q, i) {
|
||||
if (q->u.in.queue_start_poll) {
|
||||
/* skip if polling is enabled or already in work */
|
||||
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
|
||||
&q->u.in.queue_irq_state)) {
|
||||
QDIO_PERF_STAT_INC(irq, int_discarded);
|
||||
continue;
|
||||
}
|
||||
if (!shared_ind(irq))
|
||||
xchg(irq->dsci, 0);
|
||||
|
||||
/* avoid dsci clear here, done after processing */
|
||||
q->u.in.queue_start_poll(irq->cdev, q->nr,
|
||||
irq->int_parm);
|
||||
} else {
|
||||
if (!shared_ind(irq))
|
||||
xchg(irq->dsci, 0);
|
||||
|
||||
/*
|
||||
* Call inbound processing but not directly
|
||||
* since that could starve other thinint queues.
|
||||
*/
|
||||
tasklet_schedule(&q->tasklet);
|
||||
}
|
||||
/*
|
||||
* Call inbound processing but not directly
|
||||
* since that could starve other thinint queues.
|
||||
*/
|
||||
tasklet_schedule(&q->tasklet);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -178,10 +178,6 @@ struct qeth_vnicc_info {
|
|||
#define QETH_RECLAIM_WORK_TIME HZ
|
||||
#define QETH_MAX_PORTNO 15
|
||||
|
||||
/*IPv6 address autoconfiguration stuff*/
|
||||
#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
|
||||
#define UNIQUE_ID_NOT_BY_CARD 0x10000
|
||||
|
||||
/*****************************************************************************/
|
||||
/* QDIO queue and buffer handling */
|
||||
/*****************************************************************************/
|
||||
|
@ -215,6 +211,7 @@ struct qeth_vnicc_info {
|
|||
#define QETH_PRIO_Q_ING_TOS 2
|
||||
#define QETH_PRIO_Q_ING_SKB 3
|
||||
#define QETH_PRIO_Q_ING_VLAN 4
|
||||
#define QETH_PRIO_Q_ING_FIXED 5
|
||||
|
||||
/* Packing */
|
||||
#define QETH_LOW_WATERMARK_PACK 2
|
||||
|
@ -406,6 +403,7 @@ struct qeth_qdio_out_buffer {
|
|||
struct qdio_buffer *buffer;
|
||||
atomic_t state;
|
||||
int next_element_to_fill;
|
||||
unsigned int frames;
|
||||
unsigned int bytes;
|
||||
struct sk_buff_head skb_list;
|
||||
int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
|
||||
|
@ -461,6 +459,8 @@ struct qeth_out_q_stats {
|
|||
u64 tso_bytes;
|
||||
u64 packing_mode_switch;
|
||||
u64 stopped;
|
||||
u64 doorbell;
|
||||
u64 coal_frames;
|
||||
u64 completion_yield;
|
||||
u64 completion_timer;
|
||||
|
||||
|
@ -471,6 +471,8 @@ struct qeth_out_q_stats {
|
|||
u64 tx_dropped;
|
||||
};
|
||||
|
||||
#define QETH_TX_MAX_COALESCED_FRAMES 1
|
||||
#define QETH_TX_COALESCE_USECS 25
|
||||
#define QETH_TX_TIMER_USECS 500
|
||||
|
||||
struct qeth_qdio_out_q {
|
||||
|
@ -494,9 +496,13 @@ struct qeth_qdio_out_q {
|
|||
struct napi_struct napi;
|
||||
struct timer_list timer;
|
||||
struct qeth_hdr *prev_hdr;
|
||||
unsigned int coalesced_frames;
|
||||
u8 bulk_start;
|
||||
u8 bulk_count;
|
||||
u8 bulk_max;
|
||||
|
||||
unsigned int coalesce_usecs;
|
||||
unsigned int max_coalesced_frames;
|
||||
};
|
||||
|
||||
#define qeth_for_each_output_queue(card, q, i) \
|
||||
|
@ -505,12 +511,10 @@ struct qeth_qdio_out_q {
|
|||
|
||||
#define qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi)
|
||||
|
||||
static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue)
|
||||
static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue,
|
||||
unsigned long usecs)
|
||||
{
|
||||
if (timer_pending(&queue->timer))
|
||||
return;
|
||||
mod_timer(&queue->timer, usecs_to_jiffies(QETH_TX_TIMER_USECS) +
|
||||
jiffies);
|
||||
timer_reduce(&queue->timer, usecs_to_jiffies(usecs) + jiffies);
|
||||
}
|
||||
|
||||
static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
|
||||
|
@ -672,22 +676,20 @@ struct qeth_card_blkt {
|
|||
|
||||
#define QETH_BROADCAST_WITH_ECHO 0x01
|
||||
#define QETH_BROADCAST_WITHOUT_ECHO 0x02
|
||||
#define QETH_LAYER2_MAC_REGISTERED 0x02
|
||||
struct qeth_card_info {
|
||||
unsigned short unit_addr2;
|
||||
unsigned short cula;
|
||||
u8 chpid;
|
||||
__u16 func_level;
|
||||
char mcl_level[QETH_MCL_LENGTH + 1];
|
||||
u8 dev_addr_is_registered:1;
|
||||
u8 open_when_online:1;
|
||||
u8 promisc_mode:1;
|
||||
u8 use_v1_blkt:1;
|
||||
u8 is_vm_nic:1;
|
||||
int mac_bits;
|
||||
enum qeth_card_types type;
|
||||
enum qeth_link_types link_type;
|
||||
int broadcast_capable;
|
||||
int unique_id;
|
||||
bool layer_enforced;
|
||||
struct qeth_card_blkt blkt;
|
||||
__u32 diagass_support;
|
||||
|
@ -752,7 +754,7 @@ enum qeth_addr_disposition {
|
|||
struct qeth_rx {
|
||||
int b_count;
|
||||
int b_index;
|
||||
struct qdio_buffer_element *b_element;
|
||||
u8 buf_element;
|
||||
int e_offset;
|
||||
int qdio_err;
|
||||
};
|
||||
|
|
|
@ -548,14 +548,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
|
|||
qdio_release_aob(aob);
|
||||
}
|
||||
|
||||
static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
|
||||
{
|
||||
return card->options.cq == QETH_CQ_ENABLED &&
|
||||
card->qdio.c_q != NULL &&
|
||||
queue != 0 &&
|
||||
queue == card->qdio.no_in_queues - 1;
|
||||
}
|
||||
|
||||
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
|
||||
void *data)
|
||||
{
|
||||
|
@ -1163,17 +1155,20 @@ static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
|
|||
|
||||
QETH_TXQ_STAT_INC(queue, bufs);
|
||||
QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
|
||||
if (error) {
|
||||
QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
|
||||
} else {
|
||||
QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
|
||||
QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
|
||||
}
|
||||
|
||||
while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
|
||||
unsigned int bytes = qdisc_pkt_len(skb);
|
||||
bool is_tso = skb_is_gso(skb);
|
||||
unsigned int packets;
|
||||
|
||||
packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
|
||||
if (error) {
|
||||
QETH_TXQ_STAT_ADD(queue, tx_errors, packets);
|
||||
} else {
|
||||
QETH_TXQ_STAT_ADD(queue, tx_packets, packets);
|
||||
QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes);
|
||||
if (!error) {
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
|
||||
if (skb_is_nonlinear(skb))
|
||||
|
@ -1210,6 +1205,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
|||
|
||||
qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
|
||||
buf->next_element_to_fill = 0;
|
||||
buf->frames = 0;
|
||||
buf->bytes = 0;
|
||||
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
|
||||
}
|
||||
|
@ -2408,6 +2404,8 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
|
|||
queue->card = card;
|
||||
queue->queue_no = i;
|
||||
timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
|
||||
queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
|
||||
queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
|
||||
|
||||
/* give outbound qeth_qdio_buffers their qdio_buffers */
|
||||
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
|
||||
|
@ -2631,15 +2629,13 @@ static void qeth_initialize_working_pool_list(struct qeth_card *card)
|
|||
static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
|
||||
struct qeth_card *card)
|
||||
{
|
||||
struct list_head *plh;
|
||||
struct qeth_buffer_pool_entry *entry;
|
||||
int i, free;
|
||||
|
||||
if (list_empty(&card->qdio.in_buf_pool.entry_list))
|
||||
return NULL;
|
||||
|
||||
list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
|
||||
entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
|
||||
list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
|
||||
free = 1;
|
||||
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
|
||||
if (page_count(entry->elements[i]) > 1) {
|
||||
|
@ -2654,8 +2650,8 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
|
|||
}
|
||||
|
||||
/* no free buffer in pool so take first one and swap pages */
|
||||
entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
|
||||
struct qeth_buffer_pool_entry, list);
|
||||
entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
|
||||
struct qeth_buffer_pool_entry, list);
|
||||
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
|
||||
if (page_count(entry->elements[i]) > 1) {
|
||||
struct page *page = dev_alloc_page();
|
||||
|
@ -2766,6 +2762,7 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
|
|||
queue->next_buf_to_fill = 0;
|
||||
queue->do_pack = 0;
|
||||
queue->prev_hdr = NULL;
|
||||
queue->coalesced_frames = 0;
|
||||
queue->bulk_start = 0;
|
||||
queue->bulk_count = 0;
|
||||
queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
|
||||
|
@ -3361,6 +3358,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
|||
buf = queue->bufs[bidx];
|
||||
buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
|
||||
SBAL_EFLAGS_LAST_ENTRY;
|
||||
queue->coalesced_frames += buf->frames;
|
||||
|
||||
if (queue->bufstates)
|
||||
queue->bufstates[bidx].user = buf;
|
||||
|
@ -3397,6 +3395,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
|||
}
|
||||
}
|
||||
|
||||
QETH_TXQ_STAT_INC(queue, doorbell);
|
||||
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
|
||||
if (atomic_read(&queue->set_pci_flags_count))
|
||||
qdio_flags |= QDIO_FLAG_PCI_OUT;
|
||||
|
@ -3404,8 +3403,18 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
|||
queue->queue_no, index, count);
|
||||
|
||||
/* Fake the TX completion interrupt: */
|
||||
if (IS_IQD(card))
|
||||
napi_schedule(&queue->napi);
|
||||
if (IS_IQD(card)) {
|
||||
unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
|
||||
unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
|
||||
|
||||
if (frames && queue->coalesced_frames >= frames) {
|
||||
napi_schedule(&queue->napi);
|
||||
queue->coalesced_frames = 0;
|
||||
QETH_TXQ_STAT_INC(queue, coal_frames);
|
||||
} else if (usecs) {
|
||||
qeth_tx_arm_timer(queue, usecs);
|
||||
}
|
||||
}
|
||||
|
||||
if (rc) {
|
||||
/* ignore temporary SIGA errors without busy condition */
|
||||
|
@ -3469,8 +3478,7 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
|
|||
}
|
||||
}
|
||||
|
||||
static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
|
||||
unsigned long card_ptr)
|
||||
static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
|
||||
{
|
||||
struct qeth_card *card = (struct qeth_card *)card_ptr;
|
||||
|
||||
|
@ -3508,9 +3516,6 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
|
|||
int i;
|
||||
int rc;
|
||||
|
||||
if (!qeth_is_cq(card, queue))
|
||||
return;
|
||||
|
||||
QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
|
||||
QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
|
||||
QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
|
||||
|
@ -3556,9 +3561,7 @@ static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
|
|||
QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
|
||||
QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
|
||||
|
||||
if (qeth_is_cq(card, queue))
|
||||
qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
|
||||
else if (qdio_err)
|
||||
if (qdio_err)
|
||||
qeth_schedule_recovery(card);
|
||||
}
|
||||
|
||||
|
@ -3641,6 +3644,8 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
|
|||
return ~ntohs(veth->h_vlan_TCI) >>
|
||||
(VLAN_PRIO_SHIFT + 1) & 3;
|
||||
break;
|
||||
case QETH_PRIO_Q_ING_FIXED:
|
||||
return card->qdio.default_out_queue;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -3956,6 +3961,7 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
|
|||
|
||||
next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
|
||||
buffer->bytes += bytes;
|
||||
buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
|
||||
queue->prev_hdr = hdr;
|
||||
|
||||
flush = __netdev_tx_sent_queue(txq, bytes,
|
||||
|
@ -4046,6 +4052,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
|
|||
}
|
||||
|
||||
next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
|
||||
buffer->bytes += qdisc_pkt_len(skb);
|
||||
buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
|
||||
|
||||
if (queue->do_pack)
|
||||
QETH_TXQ_STAT_INC(queue, skbs_pack);
|
||||
|
@ -4805,10 +4813,7 @@ static void qeth_determine_capabilities(struct qeth_card *card)
|
|||
}
|
||||
|
||||
static void qeth_qdio_establish_cq(struct qeth_card *card,
|
||||
struct qdio_buffer **in_sbal_ptrs,
|
||||
void (**queue_start_poll)
|
||||
(struct ccw_device *, int,
|
||||
unsigned long))
|
||||
struct qdio_buffer **in_sbal_ptrs)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -4819,8 +4824,6 @@ static void qeth_qdio_establish_cq(struct qeth_card *card,
|
|||
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
|
||||
in_sbal_ptrs[offset + i] =
|
||||
card->qdio.c_q->bufs[i].buffer;
|
||||
|
||||
queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4829,7 +4832,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
|
|||
struct qdio_initialize init_data;
|
||||
char *qib_param_field;
|
||||
struct qdio_buffer **in_sbal_ptrs;
|
||||
void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
|
||||
struct qdio_buffer **out_sbal_ptrs;
|
||||
int i, j, k;
|
||||
int rc = 0;
|
||||
|
@ -4856,16 +4858,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
|
|||
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
|
||||
in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer;
|
||||
|
||||
queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
if (!queue_start_poll) {
|
||||
rc = -ENOMEM;
|
||||
goto out_free_in_sbals;
|
||||
}
|
||||
for (i = 0; i < card->qdio.no_in_queues; ++i)
|
||||
queue_start_poll[i] = qeth_qdio_start_poll;
|
||||
|
||||
qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
|
||||
qeth_qdio_establish_cq(card, in_sbal_ptrs);
|
||||
|
||||
out_sbal_ptrs =
|
||||
kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
|
||||
|
@ -4873,7 +4866,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
|
|||
GFP_KERNEL);
|
||||
if (!out_sbal_ptrs) {
|
||||
rc = -ENOMEM;
|
||||
goto out_free_queue_start_poll;
|
||||
goto out_free_in_sbals;
|
||||
}
|
||||
|
||||
for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
|
||||
|
@ -4891,7 +4884,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
|
|||
init_data.no_output_qs = card->qdio.no_out_queues;
|
||||
init_data.input_handler = qeth_qdio_input_handler;
|
||||
init_data.output_handler = qeth_qdio_output_handler;
|
||||
init_data.queue_start_poll_array = queue_start_poll;
|
||||
init_data.irq_poll = qeth_qdio_poll;
|
||||
init_data.int_parm = (unsigned long) card;
|
||||
init_data.input_sbal_addr_array = in_sbal_ptrs;
|
||||
init_data.output_sbal_addr_array = out_sbal_ptrs;
|
||||
|
@ -4924,8 +4917,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
|
|||
}
|
||||
out:
|
||||
kfree(out_sbal_ptrs);
|
||||
out_free_queue_start_poll:
|
||||
kfree(queue_start_poll);
|
||||
out_free_in_sbals:
|
||||
kfree(in_sbal_ptrs);
|
||||
out_free_qib_param:
|
||||
|
@ -5332,14 +5323,13 @@ static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
|
|||
}
|
||||
|
||||
static int qeth_extract_skb(struct qeth_card *card,
|
||||
struct qeth_qdio_buffer *qethbuffer,
|
||||
struct qdio_buffer_element **__element,
|
||||
struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
|
||||
int *__offset)
|
||||
{
|
||||
struct qdio_buffer_element *element = *__element;
|
||||
struct qeth_priv *priv = netdev_priv(card->dev);
|
||||
struct qdio_buffer *buffer = qethbuffer->buffer;
|
||||
struct napi_struct *napi = &card->napi;
|
||||
struct qdio_buffer_element *element;
|
||||
unsigned int linear_len = 0;
|
||||
bool uses_frags = false;
|
||||
int offset = *__offset;
|
||||
|
@ -5349,6 +5339,8 @@ static int qeth_extract_skb(struct qeth_card *card,
|
|||
struct sk_buff *skb;
|
||||
int skb_len = 0;
|
||||
|
||||
element = &buffer->element[*element_no];
|
||||
|
||||
next_packet:
|
||||
/* qeth_hdr must not cross element boundaries */
|
||||
while (element->length < offset + sizeof(struct qeth_hdr)) {
|
||||
|
@ -5504,22 +5496,20 @@ static int qeth_extract_skb(struct qeth_card *card,
|
|||
if (!skb)
|
||||
goto next_packet;
|
||||
|
||||
*__element = element;
|
||||
*element_no = element - &buffer->element[0];
|
||||
*__offset = offset;
|
||||
|
||||
qeth_receive_skb(card, skb, hdr, uses_frags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qeth_extract_skbs(struct qeth_card *card, int budget,
|
||||
struct qeth_qdio_buffer *buf, bool *done)
|
||||
static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
|
||||
struct qeth_qdio_buffer *buf, bool *done)
|
||||
{
|
||||
int work_done = 0;
|
||||
|
||||
*done = false;
|
||||
unsigned int work_done = 0;
|
||||
|
||||
while (budget) {
|
||||
if (qeth_extract_skb(card, buf, &card->rx.b_element,
|
||||
if (qeth_extract_skb(card, buf, &card->rx.buf_element,
|
||||
&card->rx.e_offset)) {
|
||||
*done = true;
|
||||
break;
|
||||
|
@ -5532,15 +5522,16 @@ static int qeth_extract_skbs(struct qeth_card *card, int budget,
|
|||
return work_done;
|
||||
}
|
||||
|
||||
int qeth_poll(struct napi_struct *napi, int budget)
|
||||
static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
|
||||
{
|
||||
struct qeth_card *card = container_of(napi, struct qeth_card, napi);
|
||||
int work_done = 0;
|
||||
struct qeth_qdio_buffer *buffer;
|
||||
int new_budget = budget;
|
||||
bool done;
|
||||
unsigned int work_done = 0;
|
||||
|
||||
while (1) {
|
||||
while (budget > 0) {
|
||||
struct qeth_qdio_buffer *buffer;
|
||||
unsigned int skbs_done = 0;
|
||||
bool done = false;
|
||||
|
||||
/* Fetch completed RX buffers: */
|
||||
if (!card->rx.b_count) {
|
||||
card->rx.qdio_err = 0;
|
||||
card->rx.b_count = qdio_get_next_buffers(
|
||||
|
@ -5550,50 +5541,73 @@ int qeth_poll(struct napi_struct *napi, int budget)
|
|||
card->rx.b_count = 0;
|
||||
break;
|
||||
}
|
||||
card->rx.b_element =
|
||||
&card->qdio.in_q->bufs[card->rx.b_index]
|
||||
.buffer->element[0];
|
||||
card->rx.e_offset = 0;
|
||||
}
|
||||
|
||||
while (card->rx.b_count) {
|
||||
buffer = &card->qdio.in_q->bufs[card->rx.b_index];
|
||||
if (!(card->rx.qdio_err &&
|
||||
qeth_check_qdio_errors(card, buffer->buffer,
|
||||
card->rx.qdio_err, "qinerr")))
|
||||
work_done += qeth_extract_skbs(card, new_budget,
|
||||
buffer, &done);
|
||||
else
|
||||
done = true;
|
||||
/* Process one completed RX buffer: */
|
||||
buffer = &card->qdio.in_q->bufs[card->rx.b_index];
|
||||
if (!(card->rx.qdio_err &&
|
||||
qeth_check_qdio_errors(card, buffer->buffer,
|
||||
card->rx.qdio_err, "qinerr")))
|
||||
skbs_done = qeth_extract_skbs(card, budget, buffer,
|
||||
&done);
|
||||
else
|
||||
done = true;
|
||||
|
||||
if (done) {
|
||||
QETH_CARD_STAT_INC(card, rx_bufs);
|
||||
qeth_put_buffer_pool_entry(card,
|
||||
buffer->pool_entry);
|
||||
qeth_queue_input_buffer(card, card->rx.b_index);
|
||||
card->rx.b_count--;
|
||||
if (card->rx.b_count) {
|
||||
card->rx.b_index =
|
||||
QDIO_BUFNR(card->rx.b_index + 1);
|
||||
card->rx.b_element =
|
||||
&card->qdio.in_q
|
||||
->bufs[card->rx.b_index]
|
||||
.buffer->element[0];
|
||||
card->rx.e_offset = 0;
|
||||
}
|
||||
}
|
||||
work_done += skbs_done;
|
||||
budget -= skbs_done;
|
||||
|
||||
if (work_done >= budget)
|
||||
goto out;
|
||||
else
|
||||
new_budget = budget - work_done;
|
||||
if (done) {
|
||||
QETH_CARD_STAT_INC(card, rx_bufs);
|
||||
qeth_put_buffer_pool_entry(card, buffer->pool_entry);
|
||||
qeth_queue_input_buffer(card, card->rx.b_index);
|
||||
card->rx.b_count--;
|
||||
|
||||
/* Step forward to next buffer: */
|
||||
card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
|
||||
card->rx.buf_element = 0;
|
||||
card->rx.e_offset = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return work_done;
|
||||
}
|
||||
|
||||
static void qeth_cq_poll(struct qeth_card *card)
|
||||
{
|
||||
unsigned int work_done = 0;
|
||||
|
||||
while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
|
||||
unsigned int start, error;
|
||||
int completed;
|
||||
|
||||
completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
|
||||
&error);
|
||||
if (completed <= 0)
|
||||
return;
|
||||
|
||||
qeth_qdio_cq_handler(card, error, 1, start, completed);
|
||||
work_done += completed;
|
||||
}
|
||||
}
|
||||
|
||||
int qeth_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct qeth_card *card = container_of(napi, struct qeth_card, napi);
|
||||
unsigned int work_done;
|
||||
|
||||
work_done = qeth_rx_poll(card, budget);
|
||||
|
||||
if (card->options.cq == QETH_CQ_ENABLED)
|
||||
qeth_cq_poll(card);
|
||||
|
||||
/* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
|
||||
if (budget && work_done >= budget)
|
||||
return work_done;
|
||||
|
||||
if (napi_complete_done(napi, work_done) &&
|
||||
qdio_start_irq(CARD_DDEV(card), 0))
|
||||
qdio_start_irq(CARD_DDEV(card)))
|
||||
napi_schedule(napi);
|
||||
out:
|
||||
|
||||
return work_done;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_poll);
|
||||
|
@ -5667,7 +5681,7 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
|
|||
if (completed <= 0) {
|
||||
/* Ensure we see TX completion for pending work: */
|
||||
if (napi_complete_done(napi, 0))
|
||||
qeth_tx_arm_timer(queue);
|
||||
qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5676,7 +5690,7 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
|
|||
unsigned int bidx = QDIO_BUFNR(i);
|
||||
|
||||
buffer = queue->bufs[bidx];
|
||||
packets += skb_queue_len(&buffer->skb_list);
|
||||
packets += buffer->frames;
|
||||
bytes += buffer->bytes;
|
||||
|
||||
qeth_handle_send_error(card, buffer, error);
|
||||
|
@ -6754,7 +6768,7 @@ int qeth_stop(struct net_device *dev)
|
|||
}
|
||||
|
||||
napi_disable(&card->napi);
|
||||
qdio_stop_irq(CARD_DDEV(card), 0);
|
||||
qdio_stop_irq(CARD_DDEV(card));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -211,16 +211,16 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
|
|||
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
|
||||
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
|
||||
} else if (sysfs_streq(buf, "no_prio_queueing:0")) {
|
||||
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
|
||||
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
|
||||
card->qdio.default_out_queue = 0;
|
||||
} else if (sysfs_streq(buf, "no_prio_queueing:1")) {
|
||||
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
|
||||
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
|
||||
card->qdio.default_out_queue = 1;
|
||||
} else if (sysfs_streq(buf, "no_prio_queueing:2")) {
|
||||
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
|
||||
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
|
||||
card->qdio.default_out_queue = 2;
|
||||
} else if (sysfs_streq(buf, "no_prio_queueing:3")) {
|
||||
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
|
||||
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
|
||||
card->qdio.default_out_queue = 3;
|
||||
} else if (sysfs_streq(buf, "no_prio_queueing")) {
|
||||
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
|
||||
|
|
|
@ -39,6 +39,8 @@ static const struct qeth_stats txq_stats[] = {
|
|||
QETH_TXQ_STAT("TSO bytes", tso_bytes),
|
||||
QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
|
||||
QETH_TXQ_STAT("Queue stopped", stopped),
|
||||
QETH_TXQ_STAT("Doorbell", doorbell),
|
||||
QETH_TXQ_STAT("IRQ for frames", coal_frames),
|
||||
QETH_TXQ_STAT("Completion yield", completion_yield),
|
||||
QETH_TXQ_STAT("Completion timer", completion_timer),
|
||||
};
|
||||
|
@ -108,6 +110,38 @@ static void qeth_get_ethtool_stats(struct net_device *dev,
|
|||
txq_stats, TXQ_STATS_LEN);
|
||||
}
|
||||
|
||||
static void __qeth_set_coalesce(struct net_device *dev,
|
||||
struct qeth_qdio_out_q *queue,
|
||||
struct ethtool_coalesce *coal)
|
||||
{
|
||||
WRITE_ONCE(queue->coalesce_usecs, coal->tx_coalesce_usecs);
|
||||
WRITE_ONCE(queue->max_coalesced_frames, coal->tx_max_coalesced_frames);
|
||||
|
||||
if (coal->tx_coalesce_usecs &&
|
||||
netif_running(dev) &&
|
||||
!qeth_out_queue_is_empty(queue))
|
||||
qeth_tx_arm_timer(queue, coal->tx_coalesce_usecs);
|
||||
}
|
||||
|
||||
static int qeth_set_coalesce(struct net_device *dev,
|
||||
struct ethtool_coalesce *coal)
|
||||
{
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
struct qeth_qdio_out_q *queue;
|
||||
unsigned int i;
|
||||
|
||||
if (!IS_IQD(card))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!coal->tx_coalesce_usecs && !coal->tx_max_coalesced_frames)
|
||||
return -EINVAL;
|
||||
|
||||
qeth_for_each_output_queue(card, queue, i)
|
||||
__qeth_set_coalesce(dev, queue, coal);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qeth_get_ringparam(struct net_device *dev,
|
||||
struct ethtool_ringparam *param)
|
||||
{
|
||||
|
@ -243,6 +277,43 @@ static int qeth_set_tunable(struct net_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
static int qeth_get_per_queue_coalesce(struct net_device *dev, u32 __queue,
|
||||
struct ethtool_coalesce *coal)
|
||||
{
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
struct qeth_qdio_out_q *queue;
|
||||
|
||||
if (!IS_IQD(card))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (__queue >= card->qdio.no_out_queues)
|
||||
return -EINVAL;
|
||||
|
||||
queue = card->qdio.out_qs[__queue];
|
||||
|
||||
coal->tx_coalesce_usecs = queue->coalesce_usecs;
|
||||
coal->tx_max_coalesced_frames = queue->max_coalesced_frames;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qeth_set_per_queue_coalesce(struct net_device *dev, u32 queue,
|
||||
struct ethtool_coalesce *coal)
|
||||
{
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
|
||||
if (!IS_IQD(card))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (queue >= card->qdio.no_out_queues)
|
||||
return -EINVAL;
|
||||
|
||||
if (!coal->tx_coalesce_usecs && !coal->tx_max_coalesced_frames)
|
||||
return -EINVAL;
|
||||
|
||||
__qeth_set_coalesce(dev, card->qdio.out_qs[queue], coal);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Helper function to fill 'advertising' and 'supported' which are the same. */
|
||||
/* Autoneg and full-duplex are supported and advertised unconditionally. */
|
||||
/* Always advertise and support all speeds up to specified, and only one */
|
||||
|
@ -442,7 +513,10 @@ static int qeth_get_link_ksettings(struct net_device *netdev,
|
|||
}
|
||||
|
||||
const struct ethtool_ops qeth_ethtool_ops = {
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS |
|
||||
ETHTOOL_COALESCE_TX_MAX_FRAMES,
|
||||
.get_link = ethtool_op_get_link,
|
||||
.set_coalesce = qeth_set_coalesce,
|
||||
.get_ringparam = qeth_get_ringparam,
|
||||
.get_strings = qeth_get_strings,
|
||||
.get_ethtool_stats = qeth_get_ethtool_stats,
|
||||
|
@ -453,6 +527,8 @@ const struct ethtool_ops qeth_ethtool_ops = {
|
|||
.get_ts_info = qeth_get_ts_info,
|
||||
.get_tunable = qeth_get_tunable,
|
||||
.set_tunable = qeth_set_tunable,
|
||||
.get_per_queue_coalesce = qeth_get_per_queue_coalesce,
|
||||
.set_per_queue_coalesce = qeth_set_per_queue_coalesce,
|
||||
.get_link_ksettings = qeth_get_link_ksettings,
|
||||
};
|
||||
|
||||
|
|
|
@ -52,11 +52,11 @@ static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode)
|
|||
break;
|
||||
case IPA_RC_L2_DUP_MAC:
|
||||
case IPA_RC_L2_DUP_LAYER3_MAC:
|
||||
rc = -EEXIST;
|
||||
rc = -EADDRINUSE;
|
||||
break;
|
||||
case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
|
||||
case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
|
||||
rc = -EPERM;
|
||||
rc = -EADDRNOTAVAIL;
|
||||
break;
|
||||
case IPA_RC_L2_MAC_NOT_FOUND:
|
||||
rc = -ENOENT;
|
||||
|
@ -105,11 +105,11 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
|
|||
"MAC address %pM successfully registered\n", mac);
|
||||
} else {
|
||||
switch (rc) {
|
||||
case -EEXIST:
|
||||
case -EADDRINUSE:
|
||||
dev_warn(&card->gdev->dev,
|
||||
"MAC address %pM already exists\n", mac);
|
||||
break;
|
||||
case -EPERM:
|
||||
case -EADDRNOTAVAIL:
|
||||
dev_warn(&card->gdev->dev,
|
||||
"MAC address %pM is not authorized\n", mac);
|
||||
break;
|
||||
|
@ -126,7 +126,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
|
|||
|
||||
QETH_CARD_TEXT(card, 2, "L2Wmac");
|
||||
rc = qeth_l2_send_setdelmac(card, mac, cmd);
|
||||
if (rc == -EEXIST)
|
||||
if (rc == -EADDRINUSE)
|
||||
QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n",
|
||||
CARD_DEVID(card));
|
||||
else if (rc)
|
||||
|
@ -291,7 +291,6 @@ static void qeth_l2_stop_card(struct qeth_card *card)
|
|||
qeth_qdio_clear_card(card, 0);
|
||||
qeth_clear_working_pool_list(card);
|
||||
flush_workqueue(card->event_wq);
|
||||
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
|
||||
card->info.promisc_mode = 0;
|
||||
}
|
||||
|
||||
|
@ -337,14 +336,16 @@ static void qeth_l2_register_dev_addr(struct qeth_card *card)
|
|||
qeth_l2_request_initial_mac(card);
|
||||
|
||||
if (!IS_OSN(card) && !qeth_l2_send_setmac(card, card->dev->dev_addr))
|
||||
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
|
||||
card->info.dev_addr_is_registered = 1;
|
||||
else
|
||||
card->info.dev_addr_is_registered = 0;
|
||||
}
|
||||
|
||||
static int qeth_l2_validate_addr(struct net_device *dev)
|
||||
{
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
|
||||
if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
|
||||
if (card->info.dev_addr_is_registered)
|
||||
return eth_validate_addr(dev);
|
||||
|
||||
QETH_CARD_TEXT(card, 4, "nomacadr");
|
||||
|
@ -370,7 +371,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
|
|||
|
||||
/* don't register the same address twice */
|
||||
if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
|
||||
(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
|
||||
card->info.dev_addr_is_registered)
|
||||
return 0;
|
||||
|
||||
/* add the new address, switch over, drop the old */
|
||||
|
@ -380,9 +381,9 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
|
|||
ether_addr_copy(old_addr, dev->dev_addr);
|
||||
ether_addr_copy(dev->dev_addr, addr->sa_data);
|
||||
|
||||
if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
|
||||
if (card->info.dev_addr_is_registered)
|
||||
qeth_l2_remove_mac(card, old_addr);
|
||||
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
|
||||
card->info.dev_addr_is_registered = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -949,39 +949,36 @@ static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
|
|||
struct qeth_reply *reply, unsigned long data)
|
||||
{
|
||||
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
||||
u16 *uid = reply->param;
|
||||
|
||||
if (cmd->hdr.return_code == 0) {
|
||||
card->info.unique_id = cmd->data.create_destroy_addr.uid;
|
||||
*uid = cmd->data.create_destroy_addr.uid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
|
||||
UNIQUE_ID_NOT_BY_CARD;
|
||||
dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int qeth_l3_get_unique_id(struct qeth_card *card)
|
||||
static u16 qeth_l3_get_unique_id(struct qeth_card *card, u16 uid)
|
||||
{
|
||||
int rc = 0;
|
||||
struct qeth_cmd_buffer *iob;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "guniqeid");
|
||||
|
||||
if (!qeth_is_supported(card, IPA_IPV6)) {
|
||||
card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
|
||||
UNIQUE_ID_NOT_BY_CARD;
|
||||
return 0;
|
||||
}
|
||||
if (!qeth_is_supported(card, IPA_IPV6))
|
||||
goto out;
|
||||
|
||||
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6,
|
||||
IPA_DATA_SIZEOF(create_destroy_addr));
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
goto out;
|
||||
|
||||
__ipa_cmd(iob)->data.create_destroy_addr.uid = card->info.unique_id;
|
||||
rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL);
|
||||
return rc;
|
||||
__ipa_cmd(iob)->data.create_destroy_addr.uid = uid;
|
||||
qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, &uid);
|
||||
|
||||
out:
|
||||
return uid;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1920,6 +1917,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
|
|||
|
||||
static int qeth_l3_setup_netdev(struct qeth_card *card)
|
||||
{
|
||||
struct net_device *dev = card->dev;
|
||||
unsigned int headroom;
|
||||
int rc;
|
||||
|
||||
|
@ -1937,9 +1935,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
|
|||
card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
|
||||
|
||||
/*IPv6 address autoconfiguration stuff*/
|
||||
qeth_l3_get_unique_id(card);
|
||||
if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
|
||||
card->dev->dev_id = card->info.unique_id & 0xffff;
|
||||
dev->dev_id = qeth_l3_get_unique_id(card, dev->dev_id);
|
||||
|
||||
if (!IS_VM_NIC(card)) {
|
||||
card->dev->features |= NETIF_F_SG;
|
||||
|
|
Loading…
Reference in New Issue