Merge 291009f656
("Merge tag 'pm-5.11-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm") into android-mainline
Steps to 5.11-final Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I95c90547740667473c99793f400521276cf61623
This commit is contained in:
commit
793826ba7f
1
.mailmap
1
.mailmap
|
@ -37,6 +37,7 @@ Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
|
|||
Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
|
||||
Andrew Vasquez <andrew.vasquez@qlogic.com>
|
||||
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
|
||||
Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
|
||||
Andy Adamson <andros@citi.umich.edu>
|
||||
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
|
||||
Antoine Tenart <atenart@kernel.org> <antoine.tenart@free-electrons.com>
|
||||
|
|
|
@ -163,8 +163,7 @@ particular KASAN features.
|
|||
- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
|
||||
|
||||
- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
|
||||
traces collection (default: ``on`` for ``CONFIG_DEBUG_KERNEL=y``, otherwise
|
||||
``off``).
|
||||
traces collection (default: ``on``).
|
||||
|
||||
- ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN
|
||||
report or also panic the kernel (default: ``report``).
|
||||
|
|
|
@ -9566,7 +9566,7 @@ F: Documentation/hwmon/k8temp.rst
|
|||
F: drivers/hwmon/k8temp.c
|
||||
|
||||
KASAN
|
||||
M: Andrey Ryabinin <aryabinin@virtuozzo.com>
|
||||
M: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
||||
R: Alexander Potapenko <glider@google.com>
|
||||
R: Dmitry Vyukov <dvyukov@google.com>
|
||||
L: kasan-dev@googlegroups.com
|
||||
|
|
|
@ -1833,6 +1833,7 @@ void arch_set_max_freq_ratio(bool turbo_disabled)
|
|||
arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE :
|
||||
arch_turbo_freq_ratio;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arch_set_max_freq_ratio);
|
||||
|
||||
static bool turbo_disabled(void)
|
||||
{
|
||||
|
|
|
@ -495,8 +495,9 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
|
|||
union acpi_operand_object **return_object_ptr)
|
||||
{
|
||||
union acpi_operand_object *return_object = *return_object_ptr;
|
||||
char *dest;
|
||||
union acpi_operand_object *new_string;
|
||||
char *source;
|
||||
char *dest;
|
||||
|
||||
ACPI_FUNCTION_NAME(ns_repair_HID);
|
||||
|
||||
|
@ -517,6 +518,13 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
|
|||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/* It is simplest to always create a new string object */
|
||||
|
||||
new_string = acpi_ut_create_string_object(return_object->string.length);
|
||||
if (!new_string) {
|
||||
return_ACPI_STATUS(AE_NO_MEMORY);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a leading asterisk if present. For some unknown reason, there
|
||||
* are many machines in the field that contains IDs like this.
|
||||
|
@ -526,7 +534,7 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
|
|||
source = return_object->string.pointer;
|
||||
if (*source == '*') {
|
||||
source++;
|
||||
return_object->string.length--;
|
||||
new_string->string.length--;
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
|
||||
"%s: Removed invalid leading asterisk\n",
|
||||
|
@ -541,11 +549,12 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
|
|||
* "NNNN####" where N is an uppercase letter or decimal digit, and
|
||||
* # is a hex digit.
|
||||
*/
|
||||
for (dest = return_object->string.pointer; *source; dest++, source++) {
|
||||
for (dest = new_string->string.pointer; *source; dest++, source++) {
|
||||
*dest = (char)toupper((int)*source);
|
||||
}
|
||||
return_object->string.pointer[return_object->string.length] = 0;
|
||||
|
||||
acpi_ut_remove_reference(return_object);
|
||||
*return_object_ptr = new_string;
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/uaccess.h>
|
||||
|
||||
#include <acpi/processor.h>
|
||||
#include <acpi/cppc_acpi.h>
|
||||
|
||||
#include <asm/msr.h>
|
||||
#include <asm/processor.h>
|
||||
|
@ -53,6 +54,7 @@ struct acpi_cpufreq_data {
|
|||
unsigned int resume;
|
||||
unsigned int cpu_feature;
|
||||
unsigned int acpi_perf_cpu;
|
||||
unsigned int first_perf_state;
|
||||
cpumask_var_t freqdomain_cpus;
|
||||
void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
|
||||
u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
|
||||
|
@ -221,10 +223,10 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
|
|||
|
||||
perf = to_perf_data(data);
|
||||
|
||||
cpufreq_for_each_entry(pos, policy->freq_table)
|
||||
cpufreq_for_each_entry(pos, policy->freq_table + data->first_perf_state)
|
||||
if (msr == perf->states[pos->driver_data].status)
|
||||
return pos->frequency;
|
||||
return policy->freq_table[0].frequency;
|
||||
return policy->freq_table[data->first_perf_state].frequency;
|
||||
}
|
||||
|
||||
static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
|
||||
|
@ -363,6 +365,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
|
|||
struct cpufreq_policy *policy;
|
||||
unsigned int freq;
|
||||
unsigned int cached_freq;
|
||||
unsigned int state;
|
||||
|
||||
pr_debug("%s (%d)\n", __func__, cpu);
|
||||
|
||||
|
@ -374,7 +377,11 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
|
|||
if (unlikely(!data || !policy->freq_table))
|
||||
return 0;
|
||||
|
||||
cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
|
||||
state = to_perf_data(data)->state;
|
||||
if (state < data->first_perf_state)
|
||||
state = data->first_perf_state;
|
||||
|
||||
cached_freq = policy->freq_table[state].frequency;
|
||||
freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
|
||||
if (freq != cached_freq) {
|
||||
/*
|
||||
|
@ -628,16 +635,54 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI_CPPC_LIB
|
||||
static u64 get_max_boost_ratio(unsigned int cpu)
|
||||
{
|
||||
struct cppc_perf_caps perf_caps;
|
||||
u64 highest_perf, nominal_perf;
|
||||
int ret;
|
||||
|
||||
if (acpi_pstate_strict)
|
||||
return 0;
|
||||
|
||||
ret = cppc_get_perf_caps(cpu, &perf_caps);
|
||||
if (ret) {
|
||||
pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
|
||||
cpu, ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
highest_perf = perf_caps.highest_perf;
|
||||
nominal_perf = perf_caps.nominal_perf;
|
||||
|
||||
if (!highest_perf || !nominal_perf) {
|
||||
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (highest_perf < nominal_perf) {
|
||||
pr_debug("CPU%d: nominal performance above highest\n", cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
|
||||
}
|
||||
#else
|
||||
static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
|
||||
#endif
|
||||
|
||||
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int valid_states = 0;
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct acpi_cpufreq_data *data;
|
||||
unsigned int result = 0;
|
||||
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
|
||||
struct acpi_processor_performance *perf;
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
struct acpi_processor_performance *perf;
|
||||
struct acpi_cpufreq_data *data;
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
unsigned int valid_states = 0;
|
||||
unsigned int result = 0;
|
||||
unsigned int state_count;
|
||||
u64 max_boost_ratio;
|
||||
unsigned int i;
|
||||
#ifdef CONFIG_SMP
|
||||
static int blacklisted;
|
||||
#endif
|
||||
|
@ -750,8 +795,28 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
goto err_unreg;
|
||||
}
|
||||
|
||||
freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
|
||||
GFP_KERNEL);
|
||||
state_count = perf->state_count + 1;
|
||||
|
||||
max_boost_ratio = get_max_boost_ratio(cpu);
|
||||
if (max_boost_ratio) {
|
||||
/*
|
||||
* Make a room for one more entry to represent the highest
|
||||
* available "boost" frequency.
|
||||
*/
|
||||
state_count++;
|
||||
valid_states++;
|
||||
data->first_perf_state = valid_states;
|
||||
} else {
|
||||
/*
|
||||
* If the maximum "boost" frequency is unknown, ask the arch
|
||||
* scale-invariance code to use the "nominal" performance for
|
||||
* CPU utilization scaling so as to prevent the schedutil
|
||||
* governor from selecting inadequate CPU frequencies.
|
||||
*/
|
||||
arch_set_max_freq_ratio(true);
|
||||
}
|
||||
|
||||
freq_table = kcalloc(state_count, sizeof(*freq_table), GFP_KERNEL);
|
||||
if (!freq_table) {
|
||||
result = -ENOMEM;
|
||||
goto err_unreg;
|
||||
|
@ -785,6 +850,30 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
valid_states++;
|
||||
}
|
||||
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
|
||||
|
||||
if (max_boost_ratio) {
|
||||
unsigned int state = data->first_perf_state;
|
||||
unsigned int freq = freq_table[state].frequency;
|
||||
|
||||
/*
|
||||
* Because the loop above sorts the freq_table entries in the
|
||||
* descending order, freq is the maximum frequency in the table.
|
||||
* Assume that it corresponds to the CPPC nominal frequency and
|
||||
* use it to populate the frequency field of the extra "boost"
|
||||
* frequency entry.
|
||||
*/
|
||||
freq_table[0].frequency = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
|
||||
/*
|
||||
* The purpose of the extra "boost" frequency entry is to make
|
||||
* the rest of cpufreq aware of the real maximum frequency, but
|
||||
* the way to request it is the same as for the first_perf_state
|
||||
* entry that is expected to cover the entire range of "boost"
|
||||
* frequencies of the CPU, so copy the driver_data value from
|
||||
* that entry.
|
||||
*/
|
||||
freq_table[0].driver_data = freq_table[state].driver_data;
|
||||
}
|
||||
|
||||
policy->freq_table = freq_table;
|
||||
perf->state = 0;
|
||||
|
||||
|
@ -858,8 +947,10 @@ static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
|
|||
{
|
||||
struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
|
||||
policy->cpu);
|
||||
struct acpi_cpufreq_data *data = policy->driver_data;
|
||||
unsigned int freq = policy->freq_table[data->first_perf_state].frequency;
|
||||
|
||||
if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
|
||||
if (perf->states[0].core_frequency * 1000 != freq)
|
||||
pr_warn(FW_WARN "P-state 0 is not max freq\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -1110,7 +1110,6 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
|
|||
"%s called while %d clients hold a reference\n",
|
||||
__func__, chan->client_count);
|
||||
mutex_lock(&dma_list_mutex);
|
||||
list_del(&chan->device_node);
|
||||
device->chancnt--;
|
||||
chan->dev->chan = NULL;
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
|
|
|
@ -982,11 +982,8 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|||
|
||||
dev_vdbg(chan2dev(chan), "%s\n", __func__);
|
||||
|
||||
pm_runtime_get_sync(dw->dma.dev);
|
||||
|
||||
/* ASSERT: channel is idle */
|
||||
if (dma_readl(dw, CH_EN) & dwc->mask) {
|
||||
pm_runtime_put_sync_suspend(dw->dma.dev);
|
||||
dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -1003,7 +1000,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|||
* We need controller-specific data to set up slave transfers.
|
||||
*/
|
||||
if (chan->private && !dw_dma_filter(chan, chan->private)) {
|
||||
pm_runtime_put_sync_suspend(dw->dma.dev);
|
||||
dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1047,8 +1043,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
|
|||
if (!dw->in_use)
|
||||
do_dw_dma_off(dw);
|
||||
|
||||
pm_runtime_put_sync_suspend(dw->dma.dev);
|
||||
|
||||
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
|
||||
}
|
||||
|
||||
|
|
|
@ -398,17 +398,31 @@ static inline bool idxd_is_enabled(struct idxd_device *idxd)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline bool idxd_device_is_halted(struct idxd_device *idxd)
|
||||
{
|
||||
union gensts_reg gensts;
|
||||
|
||||
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
|
||||
|
||||
return (gensts.state == IDXD_DEVICE_STATE_HALT);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is function is only used for reset during probe and will
|
||||
* poll for completion. Once the device is setup with interrupts,
|
||||
* all commands will be done via interrupt completion.
|
||||
*/
|
||||
void idxd_device_init_reset(struct idxd_device *idxd)
|
||||
int idxd_device_init_reset(struct idxd_device *idxd)
|
||||
{
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
union idxd_command_reg cmd;
|
||||
unsigned long flags;
|
||||
|
||||
if (idxd_device_is_halted(idxd)) {
|
||||
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.cmd = IDXD_CMD_RESET_DEVICE;
|
||||
dev_dbg(dev, "%s: sending reset for init.\n", __func__);
|
||||
|
@ -419,6 +433,7 @@ void idxd_device_init_reset(struct idxd_device *idxd)
|
|||
IDXD_CMDSTS_ACTIVE)
|
||||
cpu_relax();
|
||||
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
|
||||
|
@ -428,6 +443,12 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
|
|||
DECLARE_COMPLETION_ONSTACK(done);
|
||||
unsigned long flags;
|
||||
|
||||
if (idxd_device_is_halted(idxd)) {
|
||||
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
|
||||
*status = IDXD_CMDSTS_HW_ERR;
|
||||
return;
|
||||
}
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.cmd = cmd_code;
|
||||
cmd.operand = operand;
|
||||
|
|
|
@ -205,5 +205,8 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
|
|||
|
||||
void idxd_unregister_dma_channel(struct idxd_wq *wq)
|
||||
{
|
||||
dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
|
||||
struct dma_chan *chan = &wq->dma_chan;
|
||||
|
||||
dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
|
||||
list_del(&chan->device_node);
|
||||
}
|
||||
|
|
|
@ -326,7 +326,7 @@ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
|
|||
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
|
||||
|
||||
/* device control */
|
||||
void idxd_device_init_reset(struct idxd_device *idxd);
|
||||
int idxd_device_init_reset(struct idxd_device *idxd);
|
||||
int idxd_device_enable(struct idxd_device *idxd);
|
||||
int idxd_device_disable(struct idxd_device *idxd);
|
||||
void idxd_device_reset(struct idxd_device *idxd);
|
||||
|
|
|
@ -335,7 +335,10 @@ static int idxd_probe(struct idxd_device *idxd)
|
|||
int rc;
|
||||
|
||||
dev_dbg(dev, "%s entered and resetting device\n", __func__);
|
||||
idxd_device_init_reset(idxd);
|
||||
rc = idxd_device_init_reset(idxd);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
dev_dbg(dev, "IDXD reset complete\n");
|
||||
|
||||
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) {
|
||||
|
|
|
@ -111,19 +111,14 @@ irqreturn_t idxd_irq_handler(int vec, void *data)
|
|||
return IRQ_WAKE_THREAD;
|
||||
}
|
||||
|
||||
irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
||||
{
|
||||
struct idxd_irq_entry *irq_entry = data;
|
||||
struct idxd_device *idxd = irq_entry->idxd;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
union gensts_reg gensts;
|
||||
u32 cause, val = 0;
|
||||
u32 val = 0;
|
||||
int i;
|
||||
bool err = false;
|
||||
|
||||
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
|
||||
if (cause & IDXD_INTC_ERR) {
|
||||
spin_lock_bh(&idxd->dev_lock);
|
||||
for (i = 0; i < 4; i++)
|
||||
|
@ -181,7 +176,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
|
|||
val);
|
||||
|
||||
if (!err)
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* This case should rarely happen and typically is due to software
|
||||
|
@ -211,37 +206,58 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
|
|||
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
|
||||
"FLR" : "system reset");
|
||||
spin_unlock_bh(&idxd->dev_lock);
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||
{
|
||||
struct idxd_irq_entry *irq_entry = data;
|
||||
struct idxd_device *idxd = irq_entry->idxd;
|
||||
int rc;
|
||||
u32 cause;
|
||||
|
||||
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
if (cause)
|
||||
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
|
||||
while (cause) {
|
||||
rc = process_misc_interrupts(idxd, cause);
|
||||
if (rc < 0)
|
||||
break;
|
||||
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
if (cause)
|
||||
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
}
|
||||
|
||||
idxd_unmask_msix_vector(idxd, irq_entry->id);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static bool process_fault(struct idxd_desc *desc, u64 fault_addr)
|
||||
static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
|
||||
{
|
||||
/*
|
||||
* Completion address can be bad as well. Check fault address match for descriptor
|
||||
* and completion address.
|
||||
*/
|
||||
if ((u64)desc->hw == fault_addr ||
|
||||
(u64)desc->completion == fault_addr) {
|
||||
idxd_dma_complete_txd(desc, IDXD_COMPLETE_DEV_FAIL);
|
||||
if ((u64)desc->hw == fault_addr || (u64)desc->completion == fault_addr) {
|
||||
struct idxd_device *idxd = desc->wq->idxd;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
|
||||
dev_warn(dev, "desc with fault address: %#llx\n", fault_addr);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool complete_desc(struct idxd_desc *desc)
|
||||
static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
|
||||
{
|
||||
if (desc->completion->status) {
|
||||
idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
idxd_dma_complete_txd(desc, reason);
|
||||
idxd_free_desc(desc->wq, desc);
|
||||
}
|
||||
|
||||
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
|
||||
|
@ -251,25 +267,25 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
|
|||
struct idxd_desc *desc, *t;
|
||||
struct llist_node *head;
|
||||
int queued = 0;
|
||||
bool completed = false;
|
||||
unsigned long flags;
|
||||
enum idxd_complete_type reason;
|
||||
|
||||
*processed = 0;
|
||||
head = llist_del_all(&irq_entry->pending_llist);
|
||||
if (!head)
|
||||
goto out;
|
||||
|
||||
llist_for_each_entry_safe(desc, t, head, llnode) {
|
||||
if (wtype == IRQ_WORK_NORMAL)
|
||||
completed = complete_desc(desc);
|
||||
else if (wtype == IRQ_WORK_PROCESS_FAULT)
|
||||
completed = process_fault(desc, data);
|
||||
if (wtype == IRQ_WORK_NORMAL)
|
||||
reason = IDXD_COMPLETE_NORMAL;
|
||||
else
|
||||
reason = IDXD_COMPLETE_DEV_FAIL;
|
||||
|
||||
if (completed) {
|
||||
idxd_free_desc(desc->wq, desc);
|
||||
llist_for_each_entry_safe(desc, t, head, llnode) {
|
||||
if (desc->completion->status) {
|
||||
if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
|
||||
match_fault(desc, data);
|
||||
complete_desc(desc, reason);
|
||||
(*processed)++;
|
||||
if (wtype == IRQ_WORK_PROCESS_FAULT)
|
||||
break;
|
||||
} else {
|
||||
spin_lock_irqsave(&irq_entry->list_lock, flags);
|
||||
list_add_tail(&desc->list,
|
||||
|
@ -287,42 +303,46 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
|
|||
enum irq_work_type wtype,
|
||||
int *processed, u64 data)
|
||||
{
|
||||
struct list_head *node, *next;
|
||||
int queued = 0;
|
||||
bool completed = false;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(flist);
|
||||
struct idxd_desc *desc, *n;
|
||||
enum idxd_complete_type reason;
|
||||
|
||||
*processed = 0;
|
||||
if (wtype == IRQ_WORK_NORMAL)
|
||||
reason = IDXD_COMPLETE_NORMAL;
|
||||
else
|
||||
reason = IDXD_COMPLETE_DEV_FAIL;
|
||||
|
||||
/*
|
||||
* This lock protects list corruption from access of list outside of the irq handler
|
||||
* thread.
|
||||
*/
|
||||
spin_lock_irqsave(&irq_entry->list_lock, flags);
|
||||
if (list_empty(&irq_entry->work_list))
|
||||
goto out;
|
||||
|
||||
list_for_each_safe(node, next, &irq_entry->work_list) {
|
||||
struct idxd_desc *desc =
|
||||
container_of(node, struct idxd_desc, list);
|
||||
|
||||
if (list_empty(&irq_entry->work_list)) {
|
||||
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
|
||||
if (wtype == IRQ_WORK_NORMAL)
|
||||
completed = complete_desc(desc);
|
||||
else if (wtype == IRQ_WORK_PROCESS_FAULT)
|
||||
completed = process_fault(desc, data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (completed) {
|
||||
spin_lock_irqsave(&irq_entry->list_lock, flags);
|
||||
list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
|
||||
if (desc->completion->status) {
|
||||
list_del(&desc->list);
|
||||
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
|
||||
idxd_free_desc(desc->wq, desc);
|
||||
(*processed)++;
|
||||
if (wtype == IRQ_WORK_PROCESS_FAULT)
|
||||
return queued;
|
||||
list_add_tail(&desc->list, &flist);
|
||||
} else {
|
||||
queued++;
|
||||
}
|
||||
spin_lock_irqsave(&irq_entry->list_lock, flags);
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
|
||||
|
||||
list_for_each_entry(desc, &flist, list) {
|
||||
if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
|
||||
match_fault(desc, data);
|
||||
complete_desc(desc, reason);
|
||||
}
|
||||
|
||||
return queued;
|
||||
}
|
||||
|
||||
|
|
|
@ -2401,7 +2401,8 @@ static int bcdma_alloc_chan_resources(struct dma_chan *chan)
|
|||
dev_err(ud->ddev.dev,
|
||||
"Descriptor pool allocation failed\n");
|
||||
uc->use_dma_pool = false;
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto err_res_free;
|
||||
}
|
||||
|
||||
uc->use_dma_pool = true;
|
||||
|
|
|
@ -777,7 +777,7 @@ static int i3c_hci_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct __maybe_unused of_device_id i3c_hci_of_match[] = {
|
||||
static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
|
||||
{ .compatible = "mipi-i3c-hci", },
|
||||
{},
|
||||
};
|
||||
|
|
|
@ -233,9 +233,24 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
|
|||
{
|
||||
struct ocelot *ocelot = ds->priv;
|
||||
struct ocelot_port *ocelot_port = ocelot->ports[port];
|
||||
int err;
|
||||
|
||||
ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
|
||||
DEV_MAC_ENA_CFG);
|
||||
|
||||
ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
|
||||
ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
|
||||
|
||||
err = ocelot_port_flush(ocelot, port);
|
||||
if (err)
|
||||
dev_err(ocelot->dev, "failed to flush port %d: %d\n",
|
||||
port, err);
|
||||
|
||||
/* Put the port in reset. */
|
||||
ocelot_port_writel(ocelot_port,
|
||||
DEV_CLOCK_CFG_MAC_TX_RST |
|
||||
DEV_CLOCK_CFG_MAC_RX_RST |
|
||||
DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000),
|
||||
DEV_CLOCK_CFG);
|
||||
}
|
||||
|
||||
static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
|
||||
|
|
|
@ -404,6 +404,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
|
|||
if (unlikely(!xdpf)) {
|
||||
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
|
||||
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
|
||||
verdict = XDP_ABORTED;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -424,7 +425,10 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
|
|||
xdp_stat = &rx_ring->rx_stats.xdp_redirect;
|
||||
break;
|
||||
}
|
||||
fallthrough;
|
||||
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
|
||||
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
|
||||
verdict = XDP_ABORTED;
|
||||
break;
|
||||
case XDP_ABORTED:
|
||||
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
|
||||
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
|
||||
|
|
|
@ -219,6 +219,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
|
|||
CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x6092), /* Custom T62100-CR-LOM */
|
||||
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
|
||||
|
||||
#endif /* __T4_PCI_ID_TBL_H__ */
|
||||
|
|
|
@ -2180,8 +2180,10 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
|
|||
struct xdp_frame **init_xdpf)
|
||||
{
|
||||
struct xdp_frame *new_xdpf, *xdpf = *init_xdpf;
|
||||
void *new_buff;
|
||||
void *new_buff, *aligned_data;
|
||||
struct page *p;
|
||||
u32 data_shift;
|
||||
int headroom;
|
||||
|
||||
/* Check the data alignment and make sure the headroom is large
|
||||
* enough to store the xdpf backpointer. Use an aligned headroom
|
||||
|
@ -2191,25 +2193,57 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
|
|||
* byte frame headroom. If the XDP program uses all of it, copy the
|
||||
* data to a new buffer and make room for storing the backpointer.
|
||||
*/
|
||||
if (PTR_IS_ALIGNED(xdpf->data, DPAA_A050385_ALIGN) &&
|
||||
if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) &&
|
||||
xdpf->headroom >= priv->tx_headroom) {
|
||||
xdpf->headroom = priv->tx_headroom;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Try to move the data inside the buffer just enough to align it and
|
||||
* store the xdpf backpointer. If the available headroom isn't large
|
||||
* enough, resort to allocating a new buffer and copying the data.
|
||||
*/
|
||||
aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT);
|
||||
data_shift = xdpf->data - aligned_data;
|
||||
|
||||
/* The XDP frame's headroom needs to be large enough to accommodate
|
||||
* shifting the data as well as storing the xdpf backpointer.
|
||||
*/
|
||||
if (xdpf->headroom >= data_shift + priv->tx_headroom) {
|
||||
memmove(aligned_data, xdpf->data, xdpf->len);
|
||||
xdpf->data = aligned_data;
|
||||
xdpf->headroom = priv->tx_headroom;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The new xdp_frame is stored in the new buffer. Reserve enough space
|
||||
* in the headroom for storing it along with the driver's private
|
||||
* info. The headroom needs to be aligned to DPAA_FD_DATA_ALIGNMENT to
|
||||
* guarantee the data's alignment in the buffer.
|
||||
*/
|
||||
headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom,
|
||||
DPAA_FD_DATA_ALIGNMENT);
|
||||
|
||||
/* Assure the extended headroom and data don't overflow the buffer,
|
||||
* while maintaining the mandatory tailroom.
|
||||
*/
|
||||
if (headroom + xdpf->len > DPAA_BP_RAW_SIZE -
|
||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
|
||||
return -ENOMEM;
|
||||
|
||||
p = dev_alloc_pages(0);
|
||||
if (unlikely(!p))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Copy the data to the new buffer at a properly aligned offset */
|
||||
new_buff = page_address(p);
|
||||
memcpy(new_buff + priv->tx_headroom, xdpf->data, xdpf->len);
|
||||
memcpy(new_buff + headroom, xdpf->data, xdpf->len);
|
||||
|
||||
/* Create an XDP frame around the new buffer in a similar fashion
|
||||
* to xdp_convert_buff_to_frame.
|
||||
*/
|
||||
new_xdpf = new_buff;
|
||||
new_xdpf->data = new_buff + priv->tx_headroom;
|
||||
new_xdpf->data = new_buff + headroom;
|
||||
new_xdpf->len = xdpf->len;
|
||||
new_xdpf->headroom = priv->tx_headroom;
|
||||
new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
|
||||
|
|
|
@ -196,6 +196,8 @@ enum enetc_bdr_type {TX, RX};
|
|||
#define ENETC_CBS_BW_MASK GENMASK(6, 0)
|
||||
#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
|
||||
#define ENETC_RSSHASH_KEY_SIZE 40
|
||||
#define ENETC_PRSSCAPR 0x1404
|
||||
#define ENETC_PRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
|
||||
#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
|
||||
#define ENETC_PSIVLANFMR 0x1700
|
||||
#define ENETC_PSIVLANFMR_VS BIT(0)
|
||||
|
|
|
@ -996,6 +996,51 @@ static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
|
|||
phylink_destroy(priv->phylink);
|
||||
}
|
||||
|
||||
/* Initialize the entire shared memory for the flow steering entries
|
||||
* of this port (PF + VFs)
|
||||
*/
|
||||
static int enetc_init_port_rfs_memory(struct enetc_si *si)
|
||||
{
|
||||
struct enetc_cmd_rfse rfse = {0};
|
||||
struct enetc_hw *hw = &si->hw;
|
||||
int num_rfs, i, err = 0;
|
||||
u32 val;
|
||||
|
||||
val = enetc_port_rd(hw, ENETC_PRFSCAPR);
|
||||
num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val);
|
||||
|
||||
for (i = 0; i < num_rfs; i++) {
|
||||
err = enetc_set_fs_entry(si, &rfse, i);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int enetc_init_port_rss_memory(struct enetc_si *si)
|
||||
{
|
||||
struct enetc_hw *hw = &si->hw;
|
||||
int num_rss, err;
|
||||
int *rss_table;
|
||||
u32 val;
|
||||
|
||||
val = enetc_port_rd(hw, ENETC_PRSSCAPR);
|
||||
num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val);
|
||||
if (!num_rss)
|
||||
return 0;
|
||||
|
||||
rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL);
|
||||
if (!rss_table)
|
||||
return -ENOMEM;
|
||||
|
||||
err = enetc_set_rss_table(si, rss_table, num_rss);
|
||||
|
||||
kfree(rss_table);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int enetc_pf_probe(struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent)
|
||||
{
|
||||
|
@ -1051,6 +1096,18 @@ static int enetc_pf_probe(struct pci_dev *pdev,
|
|||
goto err_alloc_si_res;
|
||||
}
|
||||
|
||||
err = enetc_init_port_rfs_memory(si);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
|
||||
goto err_init_port_rfs;
|
||||
}
|
||||
|
||||
err = enetc_init_port_rss_memory(si);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
|
||||
goto err_init_port_rss;
|
||||
}
|
||||
|
||||
err = enetc_alloc_msix(priv);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "MSIX alloc failed\n");
|
||||
|
@ -1079,6 +1136,8 @@ static int enetc_pf_probe(struct pci_dev *pdev,
|
|||
enetc_mdiobus_destroy(pf);
|
||||
err_mdiobus_create:
|
||||
enetc_free_msix(priv);
|
||||
err_init_port_rss:
|
||||
err_init_port_rfs:
|
||||
err_alloc_msix:
|
||||
enetc_free_si_resources(priv);
|
||||
err_alloc_si_res:
|
||||
|
|
|
@ -9813,12 +9813,19 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
|
|||
|
||||
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
|
||||
{
|
||||
struct hnae3_handle *handle = &vport->nic;
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
int reset_try_times = 0;
|
||||
int reset_status;
|
||||
u16 queue_gid;
|
||||
int ret;
|
||||
|
||||
if (queue_id >= handle->kinfo.num_tqps) {
|
||||
dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
|
||||
queue_id);
|
||||
return;
|
||||
}
|
||||
|
||||
queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
|
||||
|
||||
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
|
||||
|
|
|
@ -158,21 +158,31 @@ static int hclge_get_ring_chain_from_mbx(
|
|||
struct hclge_vport *vport)
|
||||
{
|
||||
struct hnae3_ring_chain_node *cur_chain, *new_chain;
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
int ring_num;
|
||||
int i = 0;
|
||||
int i;
|
||||
|
||||
ring_num = req->msg.ring_num;
|
||||
|
||||
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ring_num; i++) {
|
||||
if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
|
||||
dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
|
||||
req->msg.param[i].tqp_index,
|
||||
vport->nic.kinfo.rss_size - 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B,
|
||||
req->msg.param[i].ring_type);
|
||||
req->msg.param[0].ring_type);
|
||||
ring_chain->tqp_index =
|
||||
hclge_get_queue_id(vport->nic.kinfo.tqp
|
||||
[req->msg.param[i].tqp_index]);
|
||||
[req->msg.param[0].tqp_index]);
|
||||
hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
||||
HNAE3_RING_GL_IDX_S, req->msg.param[i].int_gl_index);
|
||||
HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index);
|
||||
|
||||
cur_chain = ring_chain;
|
||||
|
||||
|
@ -597,6 +607,17 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
|
|||
|
||||
index = mbx_req->msg.data[0];
|
||||
|
||||
/* Check the query index of rss_hash_key from VF, make sure no
|
||||
* more than the size of rss_hash_key.
|
||||
*/
|
||||
if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
|
||||
sizeof(vport[0].rss_hash_key)) {
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"failed to get the rss hash key, the index(%u) invalid !\n",
|
||||
index);
|
||||
return;
|
||||
}
|
||||
|
||||
memcpy(resp_msg->data,
|
||||
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
|
||||
HCLGE_RSS_MBX_RESP_LEN);
|
||||
|
|
|
@ -4918,7 +4918,22 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
|||
complete(&adapter->init_done);
|
||||
adapter->init_done_rc = -EIO;
|
||||
}
|
||||
ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
|
||||
rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
|
||||
if (rc && rc != -EBUSY) {
|
||||
/* We were unable to schedule the failover
|
||||
* reset either because the adapter was still
|
||||
* probing (eg: during kexec) or we could not
|
||||
* allocate memory. Clear the failover_pending
|
||||
* flag since no one else will. We ignore
|
||||
* EBUSY because it means either FAILOVER reset
|
||||
* is already scheduled or the adapter is
|
||||
* being removed.
|
||||
*/
|
||||
netdev_err(netdev,
|
||||
"Error %ld scheduling failover reset\n",
|
||||
rc);
|
||||
adapter->failover_pending = false;
|
||||
}
|
||||
break;
|
||||
case IBMVNIC_CRQ_INIT_COMPLETE:
|
||||
dev_info(dev, "Partner initialization complete\n");
|
||||
|
|
|
@ -375,6 +375,60 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
|
|||
}
|
||||
}
|
||||
|
||||
static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
|
||||
{
|
||||
return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port);
|
||||
}
|
||||
|
||||
int ocelot_port_flush(struct ocelot *ocelot, int port)
|
||||
{
|
||||
int err, val;
|
||||
|
||||
/* Disable dequeuing from the egress queues */
|
||||
ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS,
|
||||
QSYS_PORT_MODE_DEQUEUE_DIS,
|
||||
QSYS_PORT_MODE, port);
|
||||
|
||||
/* Disable flow control */
|
||||
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
|
||||
|
||||
/* Disable priority flow control */
|
||||
ocelot_fields_write(ocelot, port,
|
||||
QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0);
|
||||
|
||||
/* Wait at least the time it takes to receive a frame of maximum length
|
||||
* at the port.
|
||||
* Worst-case delays for 10 kilobyte jumbo frames are:
|
||||
* 8 ms on a 10M port
|
||||
* 800 μs on a 100M port
|
||||
* 80 μs on a 1G port
|
||||
* 32 μs on a 2.5G port
|
||||
*/
|
||||
usleep_range(8000, 10000);
|
||||
|
||||
/* Disable half duplex backpressure. */
|
||||
ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE,
|
||||
SYS_FRONT_PORT_MODE, port);
|
||||
|
||||
/* Flush the queues associated with the port. */
|
||||
ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA,
|
||||
REW_PORT_CFG, port);
|
||||
|
||||
/* Enable dequeuing from the egress queues. */
|
||||
ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE,
|
||||
port);
|
||||
|
||||
/* Wait until flushing is complete. */
|
||||
err = read_poll_timeout(ocelot_read_eq_avail, val, !val,
|
||||
100, 2000000, false, ocelot, port);
|
||||
|
||||
/* Clear flushing again. */
|
||||
ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_port_flush);
|
||||
|
||||
void ocelot_adjust_link(struct ocelot *ocelot, int port,
|
||||
struct phy_device *phydev)
|
||||
{
|
||||
|
|
|
@ -71,6 +71,14 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
|
|||
}
|
||||
EXPORT_SYMBOL(ocelot_port_writel);
|
||||
|
||||
void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
|
||||
{
|
||||
u32 cur = ocelot_port_readl(port, reg);
|
||||
|
||||
ocelot_port_writel(port, (cur & (~mask)) | val, reg);
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_port_rmwl);
|
||||
|
||||
u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
|
||||
u32 reg, u32 offset)
|
||||
{
|
||||
|
|
|
@ -324,7 +324,12 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
|
|||
|
||||
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
|
||||
} else if (!qopt->enable) {
|
||||
return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
|
||||
ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
|
||||
MTL_QUEUE_DCB);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
|
||||
}
|
||||
|
||||
/* Port Transmit Rate and Speed Divider */
|
||||
|
|
|
@ -1262,8 +1262,11 @@ static int netvsc_receive(struct net_device *ndev,
|
|||
ret = rndis_filter_receive(ndev, net_device,
|
||||
nvchan, data, buflen);
|
||||
|
||||
if (unlikely(ret != NVSP_STAT_SUCCESS))
|
||||
if (unlikely(ret != NVSP_STAT_SUCCESS)) {
|
||||
/* Drop incomplete packet */
|
||||
nvchan->rsc.cnt = 0;
|
||||
status = NVSP_STAT_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
enq_receive_complete(ndev, net_device, q_idx,
|
||||
|
|
|
@ -509,8 +509,6 @@ static int rndis_filter_receive_data(struct net_device *ndev,
|
|||
return ret;
|
||||
|
||||
drop:
|
||||
/* Drop incomplete packet */
|
||||
nvchan->rsc.cnt = 0;
|
||||
return NVSP_STAT_FAIL;
|
||||
}
|
||||
|
||||
|
|
|
@ -1710,6 +1710,7 @@ static int gsi_channel_setup(struct gsi *gsi)
|
|||
if (!channel->gsi)
|
||||
continue; /* Ignore uninitialized channels */
|
||||
|
||||
ret = -EINVAL;
|
||||
dev_err(gsi->dev, "channel %u not supported by hardware\n",
|
||||
channel_id - 1);
|
||||
channel_id = gsi->channel_count;
|
||||
|
|
|
@ -1309,6 +1309,7 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
|
||||
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
|
||||
{QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
|
||||
{QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)}, /* Cinterion MV31 RmNet */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
|
||||
|
|
|
@ -169,11 +169,11 @@ static int x25_open(struct net_device *dev)
|
|||
|
||||
result = lapb_register(dev, &cb);
|
||||
if (result != LAPB_OK)
|
||||
return result;
|
||||
return -ENOMEM;
|
||||
|
||||
result = lapb_getparms(dev, ¶ms);
|
||||
if (result != LAPB_OK)
|
||||
return result;
|
||||
return -EINVAL;
|
||||
|
||||
if (state(hdlc)->settings.dce)
|
||||
params.mode = params.mode | LAPB_DCE;
|
||||
|
@ -188,7 +188,7 @@ static int x25_open(struct net_device *dev)
|
|||
|
||||
result = lapb_setparms(dev, ¶ms);
|
||||
if (result != LAPB_OK)
|
||||
return result;
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -21,11 +21,9 @@ config ATH9K_BTCOEX_SUPPORT
|
|||
config ATH9K
|
||||
tristate "Atheros 802.11n wireless cards support"
|
||||
depends on MAC80211 && HAS_DMA
|
||||
select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
|
||||
select ATH9K_HW
|
||||
select ATH9K_COMMON
|
||||
imply NEW_LEDS
|
||||
imply LEDS_CLASS
|
||||
imply MAC80211_LEDS
|
||||
help
|
||||
This module adds support for wireless adapters based on
|
||||
Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family
|
||||
|
@ -176,11 +174,9 @@ config ATH9K_PCI_NO_EEPROM
|
|||
config ATH9K_HTC
|
||||
tristate "Atheros HTC based wireless cards support"
|
||||
depends on USB && MAC80211
|
||||
select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
|
||||
select ATH9K_HW
|
||||
select ATH9K_COMMON
|
||||
imply NEW_LEDS
|
||||
imply LEDS_CLASS
|
||||
imply MAC80211_LEDS
|
||||
help
|
||||
Support for Atheros HTC based cards.
|
||||
Chipsets supported: AR9271
|
||||
|
|
|
@ -509,15 +509,17 @@ static void
|
|||
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
|
||||
int len, bool more)
|
||||
{
|
||||
struct page *page = virt_to_head_page(data);
|
||||
int offset = data - page_address(page);
|
||||
struct sk_buff *skb = q->rx_head;
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
|
||||
if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
|
||||
offset += q->buf_offset;
|
||||
struct page *page = virt_to_head_page(data);
|
||||
int offset = data - page_address(page) + q->buf_offset;
|
||||
|
||||
skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
|
||||
q->buf_size);
|
||||
} else {
|
||||
skb_free_frag(data);
|
||||
}
|
||||
|
||||
if (more)
|
||||
|
|
|
@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
|
|||
RING_IDX prod, cons;
|
||||
struct sk_buff *skb;
|
||||
int needed;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&queue->rx_queue.lock, flags);
|
||||
|
||||
skb = skb_peek(&queue->rx_queue);
|
||||
if (!skb)
|
||||
if (!skb) {
|
||||
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
|
||||
return false;
|
||||
}
|
||||
|
||||
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
|
||||
if (skb_is_gso(skb))
|
||||
|
@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
|
|||
if (skb->sw_hash)
|
||||
needed++;
|
||||
|
||||
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
|
||||
|
||||
do {
|
||||
prod = queue->rx.sring->req_prod;
|
||||
cons = queue->rx.req_cons;
|
||||
|
|
|
@ -204,7 +204,7 @@ config TMPFS_XATTR
|
|||
|
||||
config TMPFS_INODE64
|
||||
bool "Use 64-bit ino_t by default in tmpfs"
|
||||
depends on TMPFS && 64BIT
|
||||
depends on TMPFS && 64BIT && !(S390 || ALPHA)
|
||||
default n
|
||||
help
|
||||
tmpfs has historically used only inode numbers as wide as an unsigned
|
||||
|
|
|
@ -141,6 +141,7 @@ const struct file_operations nilfs_file_operations = {
|
|||
/* .release = nilfs_release_file, */
|
||||
.fsync = nilfs_sync_file,
|
||||
.splice_read = generic_file_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
};
|
||||
|
||||
const struct inode_operations nilfs_file_inode_operations = {
|
||||
|
|
|
@ -196,9 +196,15 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
|
|||
length = SQUASHFS_COMPRESSED_SIZE(length);
|
||||
index += 2;
|
||||
|
||||
TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
|
||||
TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2,
|
||||
compressed ? "" : "un", length);
|
||||
}
|
||||
if (length < 0 || length > output->length ||
|
||||
(index + length) > msblk->bytes_used) {
|
||||
res = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (next_index)
|
||||
*next_index = index + length;
|
||||
|
||||
|
|
|
@ -41,12 +41,17 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num)
|
|||
struct squashfs_sb_info *msblk = sb->s_fs_info;
|
||||
int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1);
|
||||
int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1);
|
||||
u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]);
|
||||
u64 start;
|
||||
__le64 ino;
|
||||
int err;
|
||||
|
||||
TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num);
|
||||
|
||||
if (ino_num == 0 || (ino_num - 1) >= msblk->inodes)
|
||||
return -EINVAL;
|
||||
|
||||
start = le64_to_cpu(msblk->inode_lookup_table[blk]);
|
||||
|
||||
err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino));
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
@ -111,7 +116,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
|
|||
u64 lookup_table_start, u64 next_table, unsigned int inodes)
|
||||
{
|
||||
unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
|
||||
unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes);
|
||||
int n;
|
||||
__le64 *table;
|
||||
u64 start, end;
|
||||
|
||||
TRACE("In read_inode_lookup_table, length %d\n", length);
|
||||
|
||||
|
@ -121,20 +129,37 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
|
|||
if (inodes == 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* length bytes should not extend into the next table - this check
|
||||
* also traps instances where lookup_table_start is incorrectly larger
|
||||
* than the next table start
|
||||
/*
|
||||
* The computed size of the lookup table (length bytes) should exactly
|
||||
* match the table start and end points
|
||||
*/
|
||||
if (lookup_table_start + length > next_table)
|
||||
if (length != (next_table - lookup_table_start))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
table = squashfs_read_table(sb, lookup_table_start, length);
|
||||
if (IS_ERR(table))
|
||||
return table;
|
||||
|
||||
/*
|
||||
* table[0] points to the first inode lookup table metadata block,
|
||||
* this should be less than lookup_table_start
|
||||
* table0], table[1], ... table[indexes - 1] store the locations
|
||||
* of the compressed inode lookup blocks. Each entry should be
|
||||
* less than the next (i.e. table[0] < table[1]), and the difference
|
||||
* between them should be SQUASHFS_METADATA_SIZE or less.
|
||||
* table[indexes - 1] should be less than lookup_table_start, and
|
||||
* again the difference should be SQUASHFS_METADATA_SIZE or less
|
||||
*/
|
||||
if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) {
|
||||
for (n = 0; n < (indexes - 1); n++) {
|
||||
start = le64_to_cpu(table[n]);
|
||||
end = le64_to_cpu(table[n + 1]);
|
||||
|
||||
if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
|
||||
kfree(table);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
start = le64_to_cpu(table[indexes - 1]);
|
||||
if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) {
|
||||
kfree(table);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
|
|
@ -35,10 +35,15 @@ int squashfs_get_id(struct super_block *sb, unsigned int index,
|
|||
struct squashfs_sb_info *msblk = sb->s_fs_info;
|
||||
int block = SQUASHFS_ID_BLOCK(index);
|
||||
int offset = SQUASHFS_ID_BLOCK_OFFSET(index);
|
||||
u64 start_block = le64_to_cpu(msblk->id_table[block]);
|
||||
u64 start_block;
|
||||
__le32 disk_id;
|
||||
int err;
|
||||
|
||||
if (index >= msblk->ids)
|
||||
return -EINVAL;
|
||||
|
||||
start_block = le64_to_cpu(msblk->id_table[block]);
|
||||
|
||||
err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset,
|
||||
sizeof(disk_id));
|
||||
if (err < 0)
|
||||
|
@ -56,7 +61,10 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
|
|||
u64 id_table_start, u64 next_table, unsigned short no_ids)
|
||||
{
|
||||
unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
|
||||
unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids);
|
||||
int n;
|
||||
__le64 *table;
|
||||
u64 start, end;
|
||||
|
||||
TRACE("In read_id_index_table, length %d\n", length);
|
||||
|
||||
|
@ -67,20 +75,36 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
|
|||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/*
|
||||
* length bytes should not extend into the next table - this check
|
||||
* also traps instances where id_table_start is incorrectly larger
|
||||
* than the next table start
|
||||
* The computed size of the index table (length bytes) should exactly
|
||||
* match the table start and end points
|
||||
*/
|
||||
if (id_table_start + length > next_table)
|
||||
if (length != (next_table - id_table_start))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
table = squashfs_read_table(sb, id_table_start, length);
|
||||
if (IS_ERR(table))
|
||||
return table;
|
||||
|
||||
/*
|
||||
* table[0] points to the first id lookup table metadata block, this
|
||||
* should be less than id_table_start
|
||||
* table[0], table[1], ... table[indexes - 1] store the locations
|
||||
* of the compressed id blocks. Each entry should be less than
|
||||
* the next (i.e. table[0] < table[1]), and the difference between them
|
||||
* should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
|
||||
* should be less than id_table_start, and again the difference
|
||||
* should be SQUASHFS_METADATA_SIZE or less
|
||||
*/
|
||||
if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) {
|
||||
for (n = 0; n < (indexes - 1); n++) {
|
||||
start = le64_to_cpu(table[n]);
|
||||
end = le64_to_cpu(table[n + 1]);
|
||||
|
||||
if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
|
||||
kfree(table);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
start = le64_to_cpu(table[indexes - 1]);
|
||||
if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) {
|
||||
kfree(table);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
|
|
@ -64,5 +64,6 @@ struct squashfs_sb_info {
|
|||
unsigned int inodes;
|
||||
unsigned int fragments;
|
||||
int xattr_ids;
|
||||
unsigned int ids;
|
||||
};
|
||||
#endif
|
||||
|
|
|
@ -166,6 +166,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||
msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
|
||||
msblk->inodes = le32_to_cpu(sblk->inodes);
|
||||
msblk->fragments = le32_to_cpu(sblk->fragments);
|
||||
msblk->ids = le16_to_cpu(sblk->no_ids);
|
||||
flags = le16_to_cpu(sblk->flags);
|
||||
|
||||
TRACE("Found valid superblock on %pg\n", sb->s_bdev);
|
||||
|
@ -177,7 +178,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||
TRACE("Block size %d\n", msblk->block_size);
|
||||
TRACE("Number of inodes %d\n", msblk->inodes);
|
||||
TRACE("Number of fragments %d\n", msblk->fragments);
|
||||
TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
|
||||
TRACE("Number of ids %d\n", msblk->ids);
|
||||
TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
|
||||
TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
|
||||
TRACE("sblk->fragment_table_start %llx\n",
|
||||
|
@ -236,8 +237,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||
allocate_id_index_table:
|
||||
/* Allocate and read id index table */
|
||||
msblk->id_table = squashfs_read_id_index_table(sb,
|
||||
le64_to_cpu(sblk->id_table_start), next_table,
|
||||
le16_to_cpu(sblk->no_ids));
|
||||
le64_to_cpu(sblk->id_table_start), next_table, msblk->ids);
|
||||
if (IS_ERR(msblk->id_table)) {
|
||||
errorf(fc, "unable to read id index table");
|
||||
err = PTR_ERR(msblk->id_table);
|
||||
|
|
|
@ -17,8 +17,16 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
|
|||
static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
|
||||
u64 start, u64 *xattr_table_start, int *xattr_ids)
|
||||
{
|
||||
struct squashfs_xattr_id_table *id_table;
|
||||
|
||||
id_table = squashfs_read_table(sb, start, sizeof(*id_table));
|
||||
if (IS_ERR(id_table))
|
||||
return (__le64 *) id_table;
|
||||
|
||||
*xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
|
||||
kfree(id_table);
|
||||
|
||||
ERROR("Xattrs in filesystem, these will be ignored\n");
|
||||
*xattr_table_start = start;
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,10 +31,15 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
|
|||
struct squashfs_sb_info *msblk = sb->s_fs_info;
|
||||
int block = SQUASHFS_XATTR_BLOCK(index);
|
||||
int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index);
|
||||
u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]);
|
||||
u64 start_block;
|
||||
struct squashfs_xattr_id id;
|
||||
int err;
|
||||
|
||||
if (index >= msblk->xattr_ids)
|
||||
return -EINVAL;
|
||||
|
||||
start_block = le64_to_cpu(msblk->xattr_id_table[block]);
|
||||
|
||||
err = squashfs_read_metadata(sb, &id, &start_block, &offset,
|
||||
sizeof(id));
|
||||
if (err < 0)
|
||||
|
@ -50,13 +55,17 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
|
|||
/*
|
||||
* Read uncompressed xattr id lookup table indexes from disk into memory
|
||||
*/
|
||||
__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
|
||||
__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
|
||||
u64 *xattr_table_start, int *xattr_ids)
|
||||
{
|
||||
unsigned int len;
|
||||
struct squashfs_sb_info *msblk = sb->s_fs_info;
|
||||
unsigned int len, indexes;
|
||||
struct squashfs_xattr_id_table *id_table;
|
||||
__le64 *table;
|
||||
u64 start, end;
|
||||
int n;
|
||||
|
||||
id_table = squashfs_read_table(sb, start, sizeof(*id_table));
|
||||
id_table = squashfs_read_table(sb, table_start, sizeof(*id_table));
|
||||
if (IS_ERR(id_table))
|
||||
return (__le64 *) id_table;
|
||||
|
||||
|
@ -70,13 +79,52 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
|
|||
if (*xattr_ids == 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* xattr_table should be less than start */
|
||||
if (*xattr_table_start >= start)
|
||||
len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
|
||||
indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids);
|
||||
|
||||
/*
|
||||
* The computed size of the index table (len bytes) should exactly
|
||||
* match the table start and end points
|
||||
*/
|
||||
start = table_start + sizeof(*id_table);
|
||||
end = msblk->bytes_used;
|
||||
|
||||
if (len != (end - start))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
|
||||
table = squashfs_read_table(sb, start, len);
|
||||
if (IS_ERR(table))
|
||||
return table;
|
||||
|
||||
TRACE("In read_xattr_index_table, length %d\n", len);
|
||||
/* table[0], table[1], ... table[indexes - 1] store the locations
|
||||
* of the compressed xattr id blocks. Each entry should be less than
|
||||
* the next (i.e. table[0] < table[1]), and the difference between them
|
||||
* should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
|
||||
* should be less than table_start, and again the difference
|
||||
* shouls be SQUASHFS_METADATA_SIZE or less.
|
||||
*
|
||||
* Finally xattr_table_start should be less than table[0].
|
||||
*/
|
||||
for (n = 0; n < (indexes - 1); n++) {
|
||||
start = le64_to_cpu(table[n]);
|
||||
end = le64_to_cpu(table[n + 1]);
|
||||
|
||||
return squashfs_read_table(sb, start + sizeof(*id_table), len);
|
||||
if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
|
||||
kfree(table);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
start = le64_to_cpu(table[indexes - 1]);
|
||||
if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) {
|
||||
kfree(table);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (*xattr_table_start >= le64_to_cpu(table[0])) {
|
||||
kfree(table);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
return table;
|
||||
}
|
||||
|
|
|
@ -459,7 +459,7 @@
|
|||
} \
|
||||
\
|
||||
/* Built-in firmware blobs */ \
|
||||
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
|
||||
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
|
||||
__start_builtin_fw = .; \
|
||||
KEEP(*(.builtin_fw)) \
|
||||
__end_builtin_fw = .; \
|
||||
|
|
|
@ -4352,6 +4352,7 @@ static inline void netif_tx_disable(struct net_device *dev)
|
|||
|
||||
local_bh_disable();
|
||||
cpu = smp_processor_id();
|
||||
spin_lock(&dev->tx_global_lock);
|
||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
||||
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
|
||||
|
||||
|
@ -4359,6 +4360,7 @@ static inline void netif_tx_disable(struct net_device *dev)
|
|||
netif_tx_stop_queue(txq);
|
||||
__netif_tx_unlock(txq);
|
||||
}
|
||||
spin_unlock(&dev->tx_global_lock);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
|
|
|
@ -260,7 +260,13 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
|
|||
{
|
||||
i->count = count;
|
||||
}
|
||||
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
|
||||
|
||||
struct csum_state {
|
||||
__wsum csum;
|
||||
size_t off;
|
||||
};
|
||||
|
||||
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
|
||||
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
||||
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
||||
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
|
||||
|
|
|
@ -42,7 +42,6 @@ enum switchdev_attr_id {
|
|||
SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
|
||||
SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
|
||||
SWITCHDEV_ATTR_ID_MRP_PORT_STATE,
|
||||
SWITCHDEV_ATTR_ID_MRP_PORT_ROLE,
|
||||
#endif
|
||||
};
|
||||
|
@ -62,7 +61,6 @@ struct switchdev_attr {
|
|||
u16 vlan_protocol; /* BRIDGE_VLAN_PROTOCOL */
|
||||
bool mc_disabled; /* MC_DISABLED */
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
|
||||
u8 mrp_port_state; /* MRP_PORT_STATE */
|
||||
u8 mrp_port_role; /* MRP_PORT_ROLE */
|
||||
#endif
|
||||
} u;
|
||||
|
|
|
@ -709,6 +709,7 @@ struct ocelot_policer {
|
|||
/* I/O */
|
||||
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
|
||||
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
|
||||
void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg);
|
||||
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
|
||||
void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
|
||||
void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
|
||||
|
@ -737,6 +738,7 @@ int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset);
|
|||
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
|
||||
struct ethtool_ts_info *info);
|
||||
void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
|
||||
int ocelot_port_flush(struct ocelot *ocelot, int port);
|
||||
void ocelot_adjust_link(struct ocelot *ocelot, int port,
|
||||
struct phy_device *phydev);
|
||||
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled,
|
||||
|
|
|
@ -115,6 +115,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
|
|||
|
||||
/* hash table size must be power of 2 */
|
||||
n_buckets = roundup_pow_of_two(attr->max_entries);
|
||||
if (!n_buckets)
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
|
||||
cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
|
||||
|
|
|
@ -6877,7 +6877,7 @@ static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
|
|||
case BPF_JSGT:
|
||||
if (reg->s32_min_value > sval)
|
||||
return 1;
|
||||
else if (reg->s32_max_value < sval)
|
||||
else if (reg->s32_max_value <= sval)
|
||||
return 0;
|
||||
break;
|
||||
case BPF_JLT:
|
||||
|
@ -6950,7 +6950,7 @@ static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
|
|||
case BPF_JSGT:
|
||||
if (reg->smin_value > sval)
|
||||
return 1;
|
||||
else if (reg->smax_value < sval)
|
||||
else if (reg->smax_value <= sval)
|
||||
return 0;
|
||||
break;
|
||||
case BPF_JLT:
|
||||
|
@ -8590,7 +8590,11 @@ static bool range_within(struct bpf_reg_state *old,
|
|||
return old->umin_value <= cur->umin_value &&
|
||||
old->umax_value >= cur->umax_value &&
|
||||
old->smin_value <= cur->smin_value &&
|
||||
old->smax_value >= cur->smax_value;
|
||||
old->smax_value >= cur->smax_value &&
|
||||
old->u32_min_value <= cur->u32_min_value &&
|
||||
old->u32_max_value >= cur->u32_max_value &&
|
||||
old->s32_min_value <= cur->s32_min_value &&
|
||||
old->s32_max_value >= cur->s32_max_value;
|
||||
}
|
||||
|
||||
/* Maximum number of register states that can exist at once */
|
||||
|
@ -10999,30 +11003,28 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
|||
insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
|
||||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
|
||||
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
|
||||
struct bpf_insn mask_and_div[] = {
|
||||
BPF_MOV32_REG(insn->src_reg, insn->src_reg),
|
||||
bool isdiv = BPF_OP(insn->code) == BPF_DIV;
|
||||
struct bpf_insn *patchlet;
|
||||
struct bpf_insn chk_and_div[] = {
|
||||
/* Rx div 0 -> 0 */
|
||||
BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
|
||||
BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
|
||||
BPF_JNE | BPF_K, insn->src_reg,
|
||||
0, 2, 0),
|
||||
BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
||||
*insn,
|
||||
};
|
||||
struct bpf_insn mask_and_mod[] = {
|
||||
BPF_MOV32_REG(insn->src_reg, insn->src_reg),
|
||||
struct bpf_insn chk_and_mod[] = {
|
||||
/* Rx mod 0 -> Rx */
|
||||
BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
|
||||
BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
|
||||
BPF_JEQ | BPF_K, insn->src_reg,
|
||||
0, 1, 0),
|
||||
*insn,
|
||||
};
|
||||
struct bpf_insn *patchlet;
|
||||
|
||||
if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
|
||||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
|
||||
patchlet = mask_and_div + (is64 ? 1 : 0);
|
||||
cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
|
||||
} else {
|
||||
patchlet = mask_and_mod + (is64 ? 1 : 0);
|
||||
cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
|
||||
}
|
||||
patchlet = isdiv ? chk_and_div : chk_and_mod;
|
||||
cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
|
||||
ARRAY_SIZE(chk_and_mod);
|
||||
|
||||
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
|
||||
if (!new_prog)
|
||||
|
|
|
@ -96,9 +96,6 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
|
|||
{
|
||||
unsigned int ret;
|
||||
|
||||
if (in_nmi()) /* not supported yet */
|
||||
return 1;
|
||||
|
||||
cant_sleep();
|
||||
|
||||
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
|
||||
|
|
|
@ -592,14 +592,15 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
|
|||
}
|
||||
|
||||
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
|
||||
__wsum *csum, struct iov_iter *i)
|
||||
struct csum_state *csstate,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
struct pipe_inode_info *pipe = i->pipe;
|
||||
unsigned int p_mask = pipe->ring_size - 1;
|
||||
__wsum sum = csstate->csum;
|
||||
size_t off = csstate->off;
|
||||
unsigned int i_head;
|
||||
size_t n, r;
|
||||
size_t off = 0;
|
||||
__wsum sum = *csum;
|
||||
|
||||
if (!sanity(i))
|
||||
return 0;
|
||||
|
@ -621,7 +622,8 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
|
|||
i_head++;
|
||||
} while (n);
|
||||
i->count -= bytes;
|
||||
*csum = sum;
|
||||
csstate->csum = sum;
|
||||
csstate->off = off;
|
||||
return bytes;
|
||||
}
|
||||
|
||||
|
@ -1522,18 +1524,19 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
|
|||
}
|
||||
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
|
||||
|
||||
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
|
||||
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
struct csum_state *csstate = _csstate;
|
||||
const char *from = addr;
|
||||
__wsum *csum = csump;
|
||||
__wsum sum, next;
|
||||
size_t off = 0;
|
||||
size_t off;
|
||||
|
||||
if (unlikely(iov_iter_is_pipe(i)))
|
||||
return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
|
||||
return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
|
||||
|
||||
sum = *csum;
|
||||
sum = csstate->csum;
|
||||
off = csstate->off;
|
||||
if (unlikely(iov_iter_is_discard(i))) {
|
||||
WARN_ON(1); /* for now */
|
||||
return 0;
|
||||
|
@ -1561,7 +1564,8 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
|
|||
off += v.iov_len;
|
||||
})
|
||||
)
|
||||
*csum = sum;
|
||||
csstate->csum = sum;
|
||||
csstate->off = off;
|
||||
return bytes;
|
||||
}
|
||||
EXPORT_SYMBOL(csum_and_copy_to_iter);
|
||||
|
|
|
@ -134,12 +134,8 @@ void __init kasan_init_hw_tags(void)
|
|||
|
||||
switch (kasan_arg_stacktrace) {
|
||||
case KASAN_ARG_STACKTRACE_DEFAULT:
|
||||
/*
|
||||
* Default to enabling stack trace collection for
|
||||
* debug kernels.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
|
||||
static_branch_enable(&kasan_flag_stacktrace);
|
||||
/* Default to enabling stack trace collection. */
|
||||
static_branch_enable(&kasan_flag_stacktrace);
|
||||
break;
|
||||
case KASAN_ARG_STACKTRACE_OFF:
|
||||
/* Do nothing, kasan_flag_stacktrace keeps its default value. */
|
||||
|
|
|
@ -6271,6 +6271,8 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
page_counter_set_high(&memcg->memory, high);
|
||||
|
||||
for (;;) {
|
||||
unsigned long nr_pages = page_counter_read(&memcg->memory);
|
||||
unsigned long reclaimed;
|
||||
|
@ -6294,10 +6296,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
|
|||
break;
|
||||
}
|
||||
|
||||
page_counter_set_high(&memcg->memory, high);
|
||||
|
||||
memcg_wb_domain_size_changed(memcg);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
|
|
|
@ -336,8 +336,9 @@ enum pgt_entry {
|
|||
* valid. Else returns a smaller extent bounded by the end of the source and
|
||||
* destination pgt_entry.
|
||||
*/
|
||||
static unsigned long get_extent(enum pgt_entry entry, unsigned long old_addr,
|
||||
unsigned long old_end, unsigned long new_addr)
|
||||
static __always_inline unsigned long get_extent(enum pgt_entry entry,
|
||||
unsigned long old_addr, unsigned long old_end,
|
||||
unsigned long new_addr)
|
||||
{
|
||||
unsigned long next, extent, mask, size;
|
||||
|
||||
|
|
18
mm/slub.c
18
mm/slub.c
|
@ -3423,6 +3423,7 @@ static inline int calculate_order(unsigned int size)
|
|||
unsigned int order;
|
||||
unsigned int min_objects;
|
||||
unsigned int max_objects;
|
||||
unsigned int nr_cpus;
|
||||
|
||||
/*
|
||||
* Attempt to find best configuration for a slab. This
|
||||
|
@ -3433,8 +3434,21 @@ static inline int calculate_order(unsigned int size)
|
|||
* we reduce the minimum objects required in a slab.
|
||||
*/
|
||||
min_objects = slub_min_objects;
|
||||
if (!min_objects)
|
||||
min_objects = 4 * (fls(num_online_cpus()) + 1);
|
||||
if (!min_objects) {
|
||||
/*
|
||||
* Some architectures will only update present cpus when
|
||||
* onlining them, so don't trust the number if it's just 1. But
|
||||
* we also don't want to use nr_cpu_ids always, as on some other
|
||||
* architectures, there can be many possible cpus, but never
|
||||
* onlined. Here we compromise between trying to avoid too high
|
||||
* order on systems that appear larger than they are, and too
|
||||
* low order on systems that appear smaller than they are.
|
||||
*/
|
||||
nr_cpus = num_present_cpus();
|
||||
if (nr_cpus <= 1)
|
||||
nr_cpus = nr_cpu_ids;
|
||||
min_objects = 4 * (fls(nr_cpus) + 1);
|
||||
}
|
||||
max_objects = order_objects(slub_max_order, size);
|
||||
min_objects = min(min_objects, max_objects);
|
||||
|
||||
|
|
|
@ -557,19 +557,22 @@ int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance)
|
|||
int br_mrp_set_port_state(struct net_bridge_port *p,
|
||||
enum br_mrp_port_state_type state)
|
||||
{
|
||||
u32 port_state;
|
||||
|
||||
if (!p || !(p->flags & BR_MRP_AWARE))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&p->br->lock);
|
||||
|
||||
if (state == BR_MRP_PORT_STATE_FORWARDING)
|
||||
p->state = BR_STATE_FORWARDING;
|
||||
port_state = BR_STATE_FORWARDING;
|
||||
else
|
||||
p->state = BR_STATE_BLOCKING;
|
||||
port_state = BR_STATE_BLOCKING;
|
||||
|
||||
p->state = port_state;
|
||||
spin_unlock_bh(&p->br->lock);
|
||||
|
||||
br_mrp_port_switchdev_set_state(p, state);
|
||||
br_mrp_port_switchdev_set_state(p, port_state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -169,13 +169,12 @@ int br_mrp_switchdev_send_in_test(struct net_bridge *br, struct br_mrp *mrp,
|
|||
return err;
|
||||
}
|
||||
|
||||
int br_mrp_port_switchdev_set_state(struct net_bridge_port *p,
|
||||
enum br_mrp_port_state_type state)
|
||||
int br_mrp_port_switchdev_set_state(struct net_bridge_port *p, u32 state)
|
||||
{
|
||||
struct switchdev_attr attr = {
|
||||
.orig_dev = p->dev,
|
||||
.id = SWITCHDEV_ATTR_ID_MRP_PORT_STATE,
|
||||
.u.mrp_port_state = state,
|
||||
.id = SWITCHDEV_ATTR_ID_PORT_STP_STATE,
|
||||
.u.stp_state = state,
|
||||
};
|
||||
int err;
|
||||
|
||||
|
|
|
@ -72,8 +72,7 @@ int br_mrp_switchdev_set_ring_state(struct net_bridge *br, struct br_mrp *mrp,
|
|||
int br_mrp_switchdev_send_ring_test(struct net_bridge *br, struct br_mrp *mrp,
|
||||
u32 interval, u8 max_miss, u32 period,
|
||||
bool monitor);
|
||||
int br_mrp_port_switchdev_set_state(struct net_bridge_port *p,
|
||||
enum br_mrp_port_state_type state);
|
||||
int br_mrp_port_switchdev_set_state(struct net_bridge_port *p, u32 state);
|
||||
int br_mrp_port_switchdev_set_role(struct net_bridge_port *p,
|
||||
enum br_mrp_port_role_type role);
|
||||
int br_mrp_switchdev_set_in_role(struct net_bridge *br, struct br_mrp *mrp,
|
||||
|
|
|
@ -721,8 +721,16 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
|
|||
struct iov_iter *to, int len,
|
||||
__wsum *csump)
|
||||
{
|
||||
return __skb_datagram_iter(skb, offset, to, len, true,
|
||||
csum_and_copy_to_iter, csump);
|
||||
struct csum_state csdata = { .csum = *csump };
|
||||
int ret;
|
||||
|
||||
ret = __skb_datagram_iter(skb, offset, to, len, true,
|
||||
csum_and_copy_to_iter, &csdata);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*csump = csdata.csum;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -5735,10 +5735,11 @@ static void gro_normal_list(struct napi_struct *napi)
|
|||
/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
|
||||
* pass the whole batch up to the stack.
|
||||
*/
|
||||
static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
|
||||
static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
|
||||
{
|
||||
list_add_tail(&skb->list, &napi->rx_list);
|
||||
if (++napi->rx_count >= gro_normal_batch)
|
||||
napi->rx_count += segs;
|
||||
if (napi->rx_count >= gro_normal_batch)
|
||||
gro_normal_list(napi);
|
||||
}
|
||||
|
||||
|
@ -5777,7 +5778,7 @@ static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
out:
|
||||
gro_normal_one(napi, skb);
|
||||
gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
|
||||
return NET_RX_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -6067,7 +6068,7 @@ static gro_result_t napi_skb_finish(struct napi_struct *napi,
|
|||
{
|
||||
switch (ret) {
|
||||
case GRO_NORMAL:
|
||||
gro_normal_one(napi, skb);
|
||||
gro_normal_one(napi, skb, 1);
|
||||
break;
|
||||
|
||||
case GRO_DROP:
|
||||
|
@ -6155,7 +6156,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
|
|||
__skb_push(skb, ETH_HLEN);
|
||||
skb->protocol = eth_type_trans(skb, skb->dev);
|
||||
if (ret == GRO_NORMAL)
|
||||
gro_normal_one(napi, skb);
|
||||
gro_normal_one(napi, skb, 1);
|
||||
break;
|
||||
|
||||
case GRO_DROP:
|
||||
|
|
|
@ -462,20 +462,23 @@ static int dsa_switch_setup(struct dsa_switch *ds)
|
|||
ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
|
||||
if (!ds->slave_mii_bus) {
|
||||
err = -ENOMEM;
|
||||
goto unregister_notifier;
|
||||
goto teardown;
|
||||
}
|
||||
|
||||
dsa_slave_mii_bus_init(ds);
|
||||
|
||||
err = mdiobus_register(ds->slave_mii_bus);
|
||||
if (err < 0)
|
||||
goto unregister_notifier;
|
||||
goto teardown;
|
||||
}
|
||||
|
||||
ds->setup = true;
|
||||
|
||||
return 0;
|
||||
|
||||
teardown:
|
||||
if (ds->ops->teardown)
|
||||
ds->ops->teardown(ds);
|
||||
unregister_notifier:
|
||||
dsa_switch_unregister_notifier(ds);
|
||||
unregister_devlink_ports:
|
||||
|
|
|
@ -69,7 +69,7 @@ config MAC80211_MESH
|
|||
config MAC80211_LEDS
|
||||
bool "Enable LED triggers"
|
||||
depends on MAC80211
|
||||
depends on LEDS_CLASS
|
||||
depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
|
||||
select LEDS_TRIGGERS
|
||||
help
|
||||
This option enables a few LED triggers for different
|
||||
|
|
|
@ -1229,7 +1229,8 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
|
|||
* Let nf_ct_resolve_clash() deal with this later.
|
||||
*/
|
||||
if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
|
||||
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
|
||||
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
|
||||
nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
|
||||
continue;
|
||||
|
||||
NF_CT_STAT_INC_ATOMIC(net, found);
|
||||
|
|
|
@ -399,7 +399,7 @@ static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
|
|||
return -1;
|
||||
|
||||
tcph = (void *)(skb_network_header(skb) + thoff);
|
||||
inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
|
||||
inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -415,7 +415,7 @@ static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
|
|||
udph = (void *)(skb_network_header(skb) + thoff);
|
||||
if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
inet_proto_csum_replace2(&udph->check, skb, port,
|
||||
new_port, true);
|
||||
new_port, false);
|
||||
if (!udph->check)
|
||||
udph->check = CSUM_MANGLED_0;
|
||||
}
|
||||
|
|
|
@ -5281,6 +5281,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
struct nft_expr *expr_array[NFT_SET_EXPR_MAX] = {};
|
||||
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
|
||||
u8 genmask = nft_genmask_next(ctx->net);
|
||||
u32 flags = 0, size = 0, num_exprs = 0;
|
||||
struct nft_set_ext_tmpl tmpl;
|
||||
struct nft_set_ext *ext, *ext2;
|
||||
struct nft_set_elem elem;
|
||||
|
@ -5290,7 +5291,6 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
struct nft_data_desc desc;
|
||||
enum nft_registers dreg;
|
||||
struct nft_trans *trans;
|
||||
u32 flags = 0, size = 0;
|
||||
u64 timeout;
|
||||
u64 expiration;
|
||||
int err, i;
|
||||
|
@ -5356,7 +5356,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
if (nla[NFTA_SET_ELEM_EXPR]) {
|
||||
struct nft_expr *expr;
|
||||
|
||||
if (set->num_exprs != 1)
|
||||
if (set->num_exprs && set->num_exprs != 1)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
expr = nft_set_elem_expr_alloc(ctx, set,
|
||||
|
@ -5365,8 +5365,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
return PTR_ERR(expr);
|
||||
|
||||
expr_array[0] = expr;
|
||||
num_exprs = 1;
|
||||
|
||||
if (set->exprs[0] && set->exprs[0]->ops != expr->ops) {
|
||||
if (set->num_exprs && set->exprs[0]->ops != expr->ops) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_set_elem_expr;
|
||||
}
|
||||
|
@ -5375,12 +5376,10 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
struct nlattr *tmp;
|
||||
int left;
|
||||
|
||||
if (set->num_exprs == 0)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
i = 0;
|
||||
nla_for_each_nested(tmp, nla[NFTA_SET_ELEM_EXPRESSIONS], left) {
|
||||
if (i == set->num_exprs) {
|
||||
if (i == NFT_SET_EXPR_MAX ||
|
||||
(set->num_exprs && set->num_exprs == i)) {
|
||||
err = -E2BIG;
|
||||
goto err_set_elem_expr;
|
||||
}
|
||||
|
@ -5394,14 +5393,15 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
goto err_set_elem_expr;
|
||||
}
|
||||
expr_array[i] = expr;
|
||||
num_exprs++;
|
||||
|
||||
if (expr->ops != set->exprs[i]->ops) {
|
||||
if (set->num_exprs && expr->ops != set->exprs[i]->ops) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_set_elem_expr;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
if (set->num_exprs != i) {
|
||||
if (set->num_exprs && set->num_exprs != i) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_set_elem_expr;
|
||||
}
|
||||
|
@ -5409,6 +5409,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
err = nft_set_elem_expr_clone(ctx, set, expr_array);
|
||||
if (err < 0)
|
||||
goto err_set_elem_expr_clone;
|
||||
|
||||
num_exprs = set->num_exprs;
|
||||
}
|
||||
|
||||
err = nft_setelem_parse_key(ctx, set, &elem.key.val,
|
||||
|
@ -5433,8 +5435,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
|
||||
}
|
||||
|
||||
if (set->num_exprs) {
|
||||
for (i = 0; i < set->num_exprs; i++)
|
||||
if (num_exprs) {
|
||||
for (i = 0; i < num_exprs; i++)
|
||||
size += expr_array[i]->ops->size;
|
||||
|
||||
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_EXPRESSIONS,
|
||||
|
@ -5522,7 +5524,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
*nft_set_ext_obj(ext) = obj;
|
||||
obj->use++;
|
||||
}
|
||||
for (i = 0; i < set->num_exprs; i++)
|
||||
for (i = 0; i < num_exprs; i++)
|
||||
nft_set_elem_expr_setup(ext, i, expr_array);
|
||||
|
||||
trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
|
||||
|
@ -5584,7 +5586,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
err_parse_key:
|
||||
nft_data_release(&elem.key.val, NFT_DATA_VALUE);
|
||||
err_set_elem_expr:
|
||||
for (i = 0; i < set->num_exprs && expr_array[i]; i++)
|
||||
for (i = 0; i < num_exprs && expr_array[i]; i++)
|
||||
nft_expr_destroy(ctx, expr_array[i]);
|
||||
err_set_elem_expr_clone:
|
||||
return err;
|
||||
|
@ -8949,6 +8951,17 @@ int __nft_release_basechain(struct nft_ctx *ctx)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(__nft_release_basechain);
|
||||
|
||||
static void __nft_release_hooks(struct net *net)
|
||||
{
|
||||
struct nft_table *table;
|
||||
struct nft_chain *chain;
|
||||
|
||||
list_for_each_entry(table, &net->nft.tables, list) {
|
||||
list_for_each_entry(chain, &table->chains, list)
|
||||
nf_tables_unregister_hook(net, table, chain);
|
||||
}
|
||||
}
|
||||
|
||||
static void __nft_release_tables(struct net *net)
|
||||
{
|
||||
struct nft_flowtable *flowtable, *nf;
|
||||
|
@ -8964,10 +8977,6 @@ static void __nft_release_tables(struct net *net)
|
|||
|
||||
list_for_each_entry_safe(table, nt, &net->nft.tables, list) {
|
||||
ctx.family = table->family;
|
||||
|
||||
list_for_each_entry(chain, &table->chains, list)
|
||||
nf_tables_unregister_hook(net, table, chain);
|
||||
/* No packets are walking on these chains anymore. */
|
||||
ctx.table = table;
|
||||
list_for_each_entry(chain, &table->chains, list) {
|
||||
ctx.chain = chain;
|
||||
|
@ -9016,6 +9025,11 @@ static int __net_init nf_tables_init_net(struct net *net)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __net_exit nf_tables_pre_exit_net(struct net *net)
|
||||
{
|
||||
__nft_release_hooks(net);
|
||||
}
|
||||
|
||||
static void __net_exit nf_tables_exit_net(struct net *net)
|
||||
{
|
||||
mutex_lock(&net->nft.commit_mutex);
|
||||
|
@ -9029,8 +9043,9 @@ static void __net_exit nf_tables_exit_net(struct net *net)
|
|||
}
|
||||
|
||||
static struct pernet_operations nf_tables_net_ops = {
|
||||
.init = nf_tables_init_net,
|
||||
.exit = nf_tables_exit_net,
|
||||
.init = nf_tables_init_net,
|
||||
.pre_exit = nf_tables_pre_exit_net,
|
||||
.exit = nf_tables_exit_net,
|
||||
};
|
||||
|
||||
static int __init nf_tables_module_init(void)
|
||||
|
|
|
@ -152,7 +152,8 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
|
|||
/*
|
||||
* Drop entries with timestamps older then 'time'.
|
||||
*/
|
||||
static void recent_entry_reap(struct recent_table *t, unsigned long time)
|
||||
static void recent_entry_reap(struct recent_table *t, unsigned long time,
|
||||
struct recent_entry *working, bool update)
|
||||
{
|
||||
struct recent_entry *e;
|
||||
|
||||
|
@ -161,6 +162,12 @@ static void recent_entry_reap(struct recent_table *t, unsigned long time)
|
|||
*/
|
||||
e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
|
||||
|
||||
/*
|
||||
* Do not reap the entry which are going to be updated.
|
||||
*/
|
||||
if (e == working && update)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The last time stamp is the most recent.
|
||||
*/
|
||||
|
@ -303,7 +310,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
|||
|
||||
/* info->seconds must be non-zero */
|
||||
if (info->check_set & XT_RECENT_REAP)
|
||||
recent_entry_reap(t, time);
|
||||
recent_entry_reap(t, time, e,
|
||||
info->check_set & XT_RECENT_UPDATE && ret);
|
||||
}
|
||||
|
||||
if (info->check_set & XT_RECENT_SET ||
|
||||
|
|
|
@ -80,6 +80,12 @@ static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
ssize_t ret;
|
||||
void *kbuf;
|
||||
|
||||
if (!len)
|
||||
return -EINVAL;
|
||||
|
||||
if (len > KMALLOC_MAX_SIZE)
|
||||
return -ENOMEM;
|
||||
|
||||
kbuf = kzalloc(len, GFP_KERNEL);
|
||||
if (!kbuf)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -548,8 +548,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
|
|||
rxrpc_disconnect_call(call);
|
||||
if (call->security)
|
||||
call->security->free_call_crypto(call);
|
||||
|
||||
rxrpc_cleanup_ring(call);
|
||||
_leave("");
|
||||
}
|
||||
|
||||
|
|
|
@ -215,6 +215,12 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
|
|||
{
|
||||
struct sctp_ht_iter *iter = seq->private;
|
||||
|
||||
if (v && v != SEQ_START_TOKEN) {
|
||||
struct sctp_transport *transport = v;
|
||||
|
||||
sctp_transport_put(transport);
|
||||
}
|
||||
|
||||
sctp_transport_walk_stop(&iter->hti);
|
||||
}
|
||||
|
||||
|
@ -222,6 +228,12 @@ static void *sctp_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
{
|
||||
struct sctp_ht_iter *iter = seq->private;
|
||||
|
||||
if (v && v != SEQ_START_TOKEN) {
|
||||
struct sctp_transport *transport = v;
|
||||
|
||||
sctp_transport_put(transport);
|
||||
}
|
||||
|
||||
++*pos;
|
||||
|
||||
return sctp_transport_get_next(seq_file_net(seq), &iter->hti);
|
||||
|
@ -277,8 +289,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
|
|||
sk->sk_rcvbuf);
|
||||
seq_printf(seq, "\n");
|
||||
|
||||
sctp_transport_put(transport);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -354,8 +364,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
|
|||
seq_printf(seq, "\n");
|
||||
}
|
||||
|
||||
sctp_transport_put(transport);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -943,10 +943,12 @@ static int vsock_shutdown(struct socket *sock, int mode)
|
|||
*/
|
||||
|
||||
sk = sock->sk;
|
||||
|
||||
lock_sock(sk);
|
||||
if (sock->state == SS_UNCONNECTED) {
|
||||
err = -ENOTCONN;
|
||||
if (sk->sk_type == SOCK_STREAM)
|
||||
return err;
|
||||
goto out;
|
||||
} else {
|
||||
sock->state = SS_DISCONNECTING;
|
||||
err = 0;
|
||||
|
@ -955,10 +957,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
|
|||
/* Receive and send shutdowns are treated alike. */
|
||||
mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
|
||||
if (mode) {
|
||||
lock_sock(sk);
|
||||
sk->sk_shutdown |= mode;
|
||||
sk->sk_state_change(sk);
|
||||
release_sock(sk);
|
||||
|
||||
if (sk->sk_type == SOCK_STREAM) {
|
||||
sock_reset_flag(sk, SOCK_DONE);
|
||||
|
@ -966,6 +966,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
|
|||
}
|
||||
}
|
||||
|
||||
out:
|
||||
release_sock(sk);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1233,7 +1235,7 @@ static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
|
|||
{
|
||||
const struct vsock_transport *transport = vsk->transport;
|
||||
|
||||
if (!transport->cancel_pkt)
|
||||
if (!transport || !transport->cancel_pkt)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return transport->cancel_pkt(vsk);
|
||||
|
@ -1243,7 +1245,6 @@ static void vsock_connect_timeout(struct work_struct *work)
|
|||
{
|
||||
struct sock *sk;
|
||||
struct vsock_sock *vsk;
|
||||
int cancel = 0;
|
||||
|
||||
vsk = container_of(work, struct vsock_sock, connect_work.work);
|
||||
sk = sk_vsock(vsk);
|
||||
|
@ -1254,11 +1255,9 @@ static void vsock_connect_timeout(struct work_struct *work)
|
|||
sk->sk_state = TCP_CLOSE;
|
||||
sk->sk_err = ETIMEDOUT;
|
||||
sk->sk_error_report(sk);
|
||||
cancel = 1;
|
||||
vsock_transport_cancel_pkt(vsk);
|
||||
}
|
||||
release_sock(sk);
|
||||
if (cancel)
|
||||
vsock_transport_cancel_pkt(vsk);
|
||||
|
||||
sock_put(sk);
|
||||
}
|
||||
|
|
|
@ -474,14 +474,10 @@ static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
|
|||
|
||||
static int hvs_shutdown(struct vsock_sock *vsk, int mode)
|
||||
{
|
||||
struct sock *sk = sk_vsock(vsk);
|
||||
|
||||
if (!(mode & SEND_SHUTDOWN))
|
||||
return 0;
|
||||
|
||||
lock_sock(sk);
|
||||
hvs_shutdown_lock_held(vsk->trans, mode);
|
||||
release_sock(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1130,8 +1130,6 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
|
|||
|
||||
vsk = vsock_sk(sk);
|
||||
|
||||
space_available = virtio_transport_space_update(sk, pkt);
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
/* Check if sk has been closed before lock_sock */
|
||||
|
@ -1142,6 +1140,8 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
|
|||
goto free_pkt;
|
||||
}
|
||||
|
||||
space_available = virtio_transport_space_update(sk, pkt);
|
||||
|
||||
/* Update CID in case it has changed after a transport reset event */
|
||||
vsk->local_addr.svm_cid = dst.svm_cid;
|
||||
|
||||
|
|
|
@ -133,7 +133,10 @@ FIXTURE_VARIANT_ADD(tls, 13_chacha)
|
|||
|
||||
FIXTURE_SETUP(tls)
|
||||
{
|
||||
union tls_crypto_context tls12;
|
||||
union {
|
||||
struct tls12_crypto_info_aes_gcm_128 aes128;
|
||||
struct tls12_crypto_info_chacha20_poly1305 chacha20;
|
||||
} tls12;
|
||||
struct sockaddr_in addr;
|
||||
socklen_t len;
|
||||
int sfd, ret;
|
||||
|
@ -143,14 +146,16 @@ FIXTURE_SETUP(tls)
|
|||
len = sizeof(addr);
|
||||
|
||||
memset(&tls12, 0, sizeof(tls12));
|
||||
tls12.info.version = variant->tls_version;
|
||||
tls12.info.cipher_type = variant->cipher_type;
|
||||
switch (variant->cipher_type) {
|
||||
case TLS_CIPHER_CHACHA20_POLY1305:
|
||||
tls12_sz = sizeof(tls12_crypto_info_chacha20_poly1305);
|
||||
tls12_sz = sizeof(struct tls12_crypto_info_chacha20_poly1305);
|
||||
tls12.chacha20.info.version = variant->tls_version;
|
||||
tls12.chacha20.info.cipher_type = variant->cipher_type;
|
||||
break;
|
||||
case TLS_CIPHER_AES_GCM_128:
|
||||
tls12_sz = sizeof(tls12_crypto_info_aes_gcm_128);
|
||||
tls12_sz = sizeof(struct tls12_crypto_info_aes_gcm_128);
|
||||
tls12.aes128.info.version = variant->tls_version;
|
||||
tls12.aes128.info.cipher_type = variant->cipher_type;
|
||||
break;
|
||||
default:
|
||||
tls12_sz = 0;
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <inttypes.h>
|
||||
#include <linux/errqueue.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/if_packet.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/net_tstamp.h>
|
||||
#include <netdb.h>
|
||||
|
@ -34,7 +35,6 @@
|
|||
#include <netinet/ip.h>
|
||||
#include <netinet/udp.h>
|
||||
#include <netinet/tcp.h>
|
||||
#include <netpacket/packet.h>
|
||||
#include <poll.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
|
@ -495,12 +495,12 @@ static void do_test(int family, unsigned int report_opt)
|
|||
total_len = cfg_payload_len;
|
||||
if (cfg_use_pf_packet || cfg_proto == SOCK_RAW) {
|
||||
total_len += sizeof(struct udphdr);
|
||||
if (cfg_use_pf_packet || cfg_ipproto == IPPROTO_RAW)
|
||||
if (cfg_use_pf_packet || cfg_ipproto == IPPROTO_RAW) {
|
||||
if (family == PF_INET)
|
||||
total_len += sizeof(struct iphdr);
|
||||
else
|
||||
total_len += sizeof(struct ipv6hdr);
|
||||
|
||||
}
|
||||
/* special case, only rawv6_sendmsg:
|
||||
* pass proto in sin6_port if not connected
|
||||
* also see ANK comment in net/ipv4/raw.c
|
||||
|
|
|
@ -23,7 +23,7 @@ ip -net "$ns0" addr add 127.0.0.1 dev lo
|
|||
|
||||
trap cleanup EXIT
|
||||
|
||||
currentyear=$(date +%G)
|
||||
currentyear=$(date +%Y)
|
||||
lastyear=$((currentyear-1))
|
||||
ip netns exec "$ns0" nft -f /dev/stdin <<EOF
|
||||
table inet filter {
|
||||
|
|
Loading…
Reference in New Issue