mirror of https://gitee.com/openkylin/linux.git
ASoC: More updates for v3.10
The main additional change here is Lars-Peter's DMA work plus the platform conversions which have been tested - getting this in mainline will make life easier for development after the merge window. These factor a large chunk of code out of the drivers for the platforms using dmaengine, greatly simplifying development. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iQIcBAABAgAGBQJRb/60AAoJELSic+t+oim9vL8P+wfaXRBGDtxhzMnTCf/cR2sc vlRnmSjA4s14vCoPffQJI0zaGLwDT5FnQtg6DAkP/8vXRoUz4Hgb0UwwDPiQNGED 6Wmqm7mU+XGWgo4bPBA1e3Bt/9phVVO62rNPMNEnNqcp/Fa3RrdFAfxy4EUz9sKa lX4KJETCbIvLpOJmgq3H/WGtgYEnULHSCCNUQQ+fEY/VRQLsMtY5+tnZIJilMez0 Ff6B084kE5oQpMsxdf89q8O5Uqc8lB0Xleluh0yQ1YZK3lxELMgr1Z7BkitysaJh uid+Ze8Vj2n5duI87OZcHN1Z2SibgTzqUwsd6YGCUKK3D3KVcSYgaYNn3zY09KNG tYlckAOJgVXqe1jedsfyuKTraz2JBY+jWYcIf8cRbwxxZpItG4Oj3idIBAKw+FrE /0DGqW7U9wXKx8pg7BH3dE6J6WVZ5uryaQX9d+nC8CGGjpcCla5L5jl/8stgGniW StTk4ETB6PP6iApv11p/7CXaTqXi+9UHmlcHFo11oQKiJFx4kG21DKQCXS0ycocM j0/gRGesWrVawYwJ86dhciUJjWlTHwproE/75i1JsTd3eRX6ybjBeNTTAI2ll/BJ BFDTS7tbX7GVcNbwXCvxW6pKOPpqV9jh0yMgpaB4jtkXOTKV/Z73ThPEql5w27c5 OTBtONmiYeBcZGvgKQ3r =MXrq -----END PGP SIGNATURE----- Merge tag 'asoc-v3.10-2' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-next ASoC: More updates for v3.10 The main additional change here is Lars-Peter's DMA work plus the platform conversions which have been tested - getting this in mainline will make life easier for development after the merge window. These factor a large chunk of code out of the drivers for the platforms using dmaengine, greatly simplifying development.
This commit is contained in:
commit
8dd2b66d1a
|
@ -227,7 +227,7 @@ X!Isound/sound_firmware.c
|
|||
<chapter id="uart16x50">
|
||||
<title>16x50 UART Driver</title>
|
||||
!Edrivers/tty/serial/serial_core.c
|
||||
!Edrivers/tty/serial/8250/8250.c
|
||||
!Edrivers/tty/serial/8250/8250_core.c
|
||||
</chapter>
|
||||
|
||||
<chapter id="fbdev">
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2003-2012 QLogic Corporation
|
||||
Copyright (c) 2003-2013 QLogic Corporation
|
||||
QLogic Linux FC-FCoE Driver
|
||||
|
||||
This program includes a device driver for Linux 3.x.
|
||||
|
|
|
@ -4941,6 +4941,12 @@ W: logfs.org
|
|||
S: Maintained
|
||||
F: fs/logfs/
|
||||
|
||||
LPC32XX MACHINE SUPPORT
|
||||
M: Roland Stigge <stigge@antcom.de>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/mach-lpc32xx/
|
||||
|
||||
LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
|
||||
M: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com>
|
||||
M: Sreekanth Reddy <Sreekanth.Reddy@lsi.com>
|
||||
|
@ -6951,7 +6957,6 @@ F: drivers/scsi/st*
|
|||
|
||||
SCTP PROTOCOL
|
||||
M: Vlad Yasevich <vyasevich@gmail.com>
|
||||
M: Sridhar Samudrala <sri@us.ibm.com>
|
||||
M: Neil Horman <nhorman@tuxdriver.com>
|
||||
L: linux-sctp@vger.kernel.org
|
||||
W: http://lksctp.sourceforge.net
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Unicycling Gorilla
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void)
|
|||
" flag.nz %0 \n"
|
||||
: "=r"(temp), "=r"(flags)
|
||||
: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
|
||||
: "cc");
|
||||
: "memory", "cc");
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
@ -53,7 +53,8 @@ static inline void arch_local_irq_restore(unsigned long flags)
|
|||
__asm__ __volatile__(
|
||||
" flag %0 \n"
|
||||
:
|
||||
: "r"(flags));
|
||||
: "r"(flags)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -73,7 +74,8 @@ static inline void arch_local_irq_disable(void)
|
|||
" and %0, %0, %1 \n"
|
||||
" flag %0 \n"
|
||||
: "=&r"(temp)
|
||||
: "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
|
||||
: "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
|
||||
: "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -85,7 +87,9 @@ static inline long arch_local_save_flags(void)
|
|||
|
||||
__asm__ __volatile__(
|
||||
" lr %0, [status32] \n"
|
||||
: "=&r"(temp));
|
||||
: "=&r"(temp)
|
||||
:
|
||||
: "memory");
|
||||
|
||||
return temp;
|
||||
}
|
||||
|
|
|
@ -152,7 +152,6 @@ saif1: saif@80046000 {
|
|||
i2c0: i2c@80058000 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&i2c0_pins_a>;
|
||||
clock-frequency = <400000>;
|
||||
status = "okay";
|
||||
|
||||
sgtl5000: codec@0a {
|
||||
|
|
|
@ -70,7 +70,6 @@ apbx@80040000 {
|
|||
i2c0: i2c@80058000 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&i2c0_pins_a>;
|
||||
clock-frequency = <400000>;
|
||||
status = "okay";
|
||||
|
||||
rtc: rtc@51 {
|
||||
|
|
|
@ -91,6 +91,7 @@ timer@00a00600 {
|
|||
compatible = "arm,cortex-a9-twd-timer";
|
||||
reg = <0x00a00600 0x20>;
|
||||
interrupts = <1 13 0xf01>;
|
||||
clocks = <&clks 15>;
|
||||
};
|
||||
|
||||
L2: l2-cache@00a02000 {
|
||||
|
|
|
@ -96,11 +96,11 @@ pmx_led_health_brt_ctrl_2: pmx-led-health-brt-ctrl-2 {
|
|||
marvell,function = "gpio";
|
||||
};
|
||||
pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 {
|
||||
marvell,pins = "mpp44";
|
||||
marvell,pins = "mpp46";
|
||||
marvell,function = "gpio";
|
||||
};
|
||||
pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 {
|
||||
marvell,pins = "mpp45";
|
||||
marvell,pins = "mpp47";
|
||||
marvell,function = "gpio";
|
||||
};
|
||||
|
||||
|
@ -157,14 +157,14 @@ power_led {
|
|||
gpios = <&gpio0 16 0>;
|
||||
linux,default-trigger = "default-on";
|
||||
};
|
||||
health_led1 {
|
||||
rebuild_led {
|
||||
label = "status:white:rebuild_led";
|
||||
gpios = <&gpio1 4 0>;
|
||||
};
|
||||
health_led {
|
||||
label = "status:red:health_led";
|
||||
gpios = <&gpio1 5 0>;
|
||||
};
|
||||
health_led2 {
|
||||
label = "status:white:health_led";
|
||||
gpios = <&gpio1 4 0>;
|
||||
};
|
||||
backup_led {
|
||||
label = "status:blue:backup_led";
|
||||
gpios = <&gpio0 15 0>;
|
||||
|
|
|
@ -257,6 +257,7 @@ int __init mx35_clocks_init(void)
|
|||
clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
|
||||
clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");
|
||||
clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
|
||||
clk_register_clkdev(clk[admux_gate], "audmux", NULL);
|
||||
|
||||
clk_prepare_enable(clk[spba_gate]);
|
||||
clk_prepare_enable(clk[gpio1_gate]);
|
||||
|
@ -265,6 +266,7 @@ int __init mx35_clocks_init(void)
|
|||
clk_prepare_enable(clk[iim_gate]);
|
||||
clk_prepare_enable(clk[emi_gate]);
|
||||
clk_prepare_enable(clk[max_gate]);
|
||||
clk_prepare_enable(clk[iomuxc_gate]);
|
||||
|
||||
/*
|
||||
* SCC is needed to boot via mmc after a watchdog reset. The clock code
|
||||
|
|
|
@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m"
|
|||
static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
|
||||
static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
|
||||
static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
|
||||
static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", };
|
||||
static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
|
||||
static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
|
||||
static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
|
||||
static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
|
||||
|
@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void)
|
|||
|
||||
clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
|
||||
clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
|
||||
clk_register_clkdev(clk[twd], NULL, "smp_twd");
|
||||
clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
|
||||
clk_register_clkdev(clk[ahb], "ahb", NULL);
|
||||
clk_register_clkdev(clk[cko1], "cko1", NULL);
|
||||
|
|
|
@ -20,10 +20,15 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = {
|
|||
.duplex = DUPLEX_FULL,
|
||||
};
|
||||
|
||||
static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = {
|
||||
.phy_addr = MV643XX_ETH_PHY_ADDR(11),
|
||||
};
|
||||
|
||||
void __init iomega_ix2_200_init(void)
|
||||
{
|
||||
/*
|
||||
* Basic setup. Needs to be called early.
|
||||
*/
|
||||
kirkwood_ge01_init(&iomega_ix2_200_ge00_data);
|
||||
kirkwood_ge00_init(&iomega_ix2_200_ge00_data);
|
||||
kirkwood_ge01_init(&iomega_ix2_200_ge01_data);
|
||||
}
|
||||
|
|
|
@ -61,7 +61,6 @@ static struct irq_domain *armada_370_xp_mpic_domain;
|
|||
*/
|
||||
static void armada_370_xp_irq_mask(struct irq_data *d)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
|
||||
if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
|
||||
|
@ -70,15 +69,10 @@ static void armada_370_xp_irq_mask(struct irq_data *d)
|
|||
else
|
||||
writel(hwirq, per_cpu_int_base +
|
||||
ARMADA_370_XP_INT_SET_MASK_OFFS);
|
||||
#else
|
||||
writel(irqd_to_hwirq(d),
|
||||
per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void armada_370_xp_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
|
||||
if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
|
||||
|
@ -87,10 +81,6 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
|
|||
else
|
||||
writel(hwirq, per_cpu_int_base +
|
||||
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
||||
#else
|
||||
writel(irqd_to_hwirq(d),
|
||||
per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -146,7 +136,11 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
|
|||
unsigned int virq, irq_hw_number_t hw)
|
||||
{
|
||||
armada_370_xp_irq_mask(irq_get_irq_data(virq));
|
||||
writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
|
||||
if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
|
||||
writel(hw, per_cpu_int_base +
|
||||
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
||||
else
|
||||
writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
|
||||
irq_set_status_flags(virq, IRQ_LEVEL);
|
||||
|
||||
if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
|
||||
|
|
|
@ -188,10 +188,8 @@
|
|||
|
||||
#if defined(CONFIG_CPU_S3C2416)
|
||||
#define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
|
||||
#elif defined(CONFIG_CPU_S3C2443)
|
||||
#define NR_IRQS (IRQ_S3C2443_AC97+1)
|
||||
#else
|
||||
#define NR_IRQS (IRQ_S3C2440_AC97+1)
|
||||
#define NR_IRQS (IRQ_S3C2443_AC97 + 1)
|
||||
#endif
|
||||
|
||||
/* compatibility define. */
|
||||
|
|
|
@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np,
|
|||
base = (void *)0xfd000000;
|
||||
|
||||
intc->reg_mask = base + 0xa4;
|
||||
intc->reg_pending = base + 0x08;
|
||||
intc->reg_pending = base + 0xa8;
|
||||
irq_num = 20;
|
||||
irq_start = S3C2410_IRQ(32);
|
||||
irq_offset = 4;
|
||||
|
|
|
@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void)
|
|||
/* set interrupt enabled status */
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags));
|
||||
asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory");
|
||||
}
|
||||
|
||||
/* unconditionally enable interrupts */
|
||||
|
|
|
@ -849,17 +849,6 @@ static palinfo_entry_t palinfo_entries[]={
|
|||
|
||||
#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
|
||||
|
||||
/*
|
||||
* this array is used to keep track of the proc entries we create. This is
|
||||
* required in the module mode when we need to remove all entries. The procfs code
|
||||
* does not do recursion of deletion
|
||||
*
|
||||
* Notes:
|
||||
* - +1 accounts for the cpuN directory entry in /proc/pal
|
||||
*/
|
||||
#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1))
|
||||
|
||||
static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
|
||||
static struct proc_dir_entry *palinfo_dir;
|
||||
|
||||
/*
|
||||
|
@ -971,60 +960,32 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi
|
|||
static void __cpuinit
|
||||
create_palinfo_proc_entries(unsigned int cpu)
|
||||
{
|
||||
# define CPUSTR "cpu%d"
|
||||
|
||||
pal_func_cpu_u_t f;
|
||||
struct proc_dir_entry **pdir;
|
||||
struct proc_dir_entry *cpu_dir;
|
||||
int j;
|
||||
char cpustr[sizeof(CPUSTR)];
|
||||
|
||||
|
||||
/*
|
||||
* we keep track of created entries in a depth-first order for
|
||||
* cleanup purposes. Each entry is stored into palinfo_proc_entries
|
||||
*/
|
||||
sprintf(cpustr,CPUSTR, cpu);
|
||||
char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
|
||||
sprintf(cpustr, "cpu%d", cpu);
|
||||
|
||||
cpu_dir = proc_mkdir(cpustr, palinfo_dir);
|
||||
if (!cpu_dir)
|
||||
return;
|
||||
|
||||
f.req_cpu = cpu;
|
||||
|
||||
/*
|
||||
* Compute the location to store per cpu entries
|
||||
* We dont store the top level entry in this list, but
|
||||
* remove it finally after removing all cpu entries.
|
||||
*/
|
||||
pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)];
|
||||
*pdir++ = cpu_dir;
|
||||
for (j=0; j < NR_PALINFO_ENTRIES; j++) {
|
||||
f.func_id = j;
|
||||
*pdir = create_proc_read_entry(
|
||||
palinfo_entries[j].name, 0, cpu_dir,
|
||||
palinfo_read_entry, (void *)f.value);
|
||||
pdir++;
|
||||
create_proc_read_entry(
|
||||
palinfo_entries[j].name, 0, cpu_dir,
|
||||
palinfo_read_entry, (void *)f.value);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
remove_palinfo_proc_entries(unsigned int hcpu)
|
||||
{
|
||||
int j;
|
||||
struct proc_dir_entry *cpu_dir, **pdir;
|
||||
|
||||
pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)];
|
||||
cpu_dir = *pdir;
|
||||
*pdir++=NULL;
|
||||
for (j=0; j < (NR_PALINFO_ENTRIES); j++) {
|
||||
if ((*pdir)) {
|
||||
remove_proc_entry ((*pdir)->name, cpu_dir);
|
||||
*pdir ++= NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (cpu_dir) {
|
||||
remove_proc_entry(cpu_dir->name, palinfo_dir);
|
||||
}
|
||||
char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
|
||||
sprintf(cpustr, "cpu%d", hcpu);
|
||||
remove_proc_subtree(cpustr, palinfo_dir);
|
||||
}
|
||||
|
||||
static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
|
||||
|
@ -1058,6 +1019,8 @@ palinfo_init(void)
|
|||
|
||||
printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
|
||||
palinfo_dir = proc_mkdir("pal", NULL);
|
||||
if (!palinfo_dir)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Create palinfo dirs in /proc for all online cpus */
|
||||
for_each_online_cpu(i) {
|
||||
|
@ -1073,22 +1036,8 @@ palinfo_init(void)
|
|||
static void __exit
|
||||
palinfo_exit(void)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
/* remove all nodes: depth first pass. Could optimize this */
|
||||
for_each_online_cpu(i) {
|
||||
remove_palinfo_proc_entries(i);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the top level entry finally
|
||||
*/
|
||||
remove_proc_entry(palinfo_dir->name, NULL);
|
||||
|
||||
/*
|
||||
* Unregister from cpu notifier callbacks
|
||||
*/
|
||||
unregister_hotcpu_notifier(&palinfo_cpu_notifier);
|
||||
remove_proc_subtree("pal", NULL);
|
||||
}
|
||||
|
||||
module_init(palinfo_init);
|
||||
|
|
|
@ -86,4 +86,24 @@ static inline int gpio_cansleep(unsigned gpio)
|
|||
return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
|
||||
}
|
||||
|
||||
static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = gpio_request(gpio, label);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (flags & GPIOF_DIR_IN)
|
||||
err = gpio_direction_input(gpio);
|
||||
else
|
||||
err = gpio_direction_output(gpio,
|
||||
(flags & GPIOF_INIT_HIGH) ? 1 : 0);
|
||||
|
||||
if (err)
|
||||
gpio_free(gpio);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
|
|||
(0x1UL << 4), &dummy1, &dummy2);
|
||||
if (lpar_rc == H_SUCCESS)
|
||||
return i;
|
||||
BUG_ON(lpar_rc != H_NOT_FOUND);
|
||||
|
||||
/*
|
||||
* The test for adjunct partition is performed before the
|
||||
* ANDCOND test. H_RESOURCE may be returned, so we need to
|
||||
* check for that as well.
|
||||
*/
|
||||
BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
|
||||
|
||||
slot_offset++;
|
||||
slot_offset &= 0x7;
|
||||
|
|
|
@ -40,7 +40,15 @@
|
|||
#include <asm/percpu.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
/* Set and clear kernel interrupt masks. */
|
||||
/*
|
||||
* Set and clear kernel interrupt masks.
|
||||
*
|
||||
* NOTE: __insn_mtspr() is a compiler builtin marked as a memory
|
||||
* clobber. We rely on it being equivalent to a compiler barrier in
|
||||
* this code since arch_local_irq_save() and friends must act as
|
||||
* compiler barriers. This compiler semantic is baked into enough
|
||||
* places that the compiler will maintain it going forward.
|
||||
*/
|
||||
#if CHIP_HAS_SPLIT_INTR_MASK()
|
||||
#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
|
||||
# error Fix assumptions about which word various interrupts are in
|
||||
|
|
|
@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
|
|||
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
|
||||
}
|
||||
|
||||
void arch_flush_lazy_mmu_mode(void);
|
||||
static inline void arch_flush_lazy_mmu_mode(void)
|
||||
{
|
||||
PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
|
||||
}
|
||||
|
||||
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
||||
phys_addr_t phys, pgprot_t flags)
|
||||
|
|
|
@ -91,6 +91,7 @@ struct pv_lazy_ops {
|
|||
/* Set deferred update mode, used for batching operations. */
|
||||
void (*enter)(void);
|
||||
void (*leave)(void);
|
||||
void (*flush)(void);
|
||||
};
|
||||
|
||||
struct pv_time_ops {
|
||||
|
@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next);
|
|||
|
||||
void paravirt_enter_lazy_mmu(void);
|
||||
void paravirt_leave_lazy_mmu(void);
|
||||
void paravirt_flush_lazy_mmu(void);
|
||||
|
||||
void _paravirt_nop(void);
|
||||
u32 _paravirt_ident_32(u32);
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
#define tlb_flush(tlb) \
|
||||
{ \
|
||||
if (tlb->fullmm == 0) \
|
||||
if (!tlb->fullmm && !tlb->need_flush_all) \
|
||||
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
|
||||
else \
|
||||
flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
|
||||
|
|
|
@ -314,10 +314,11 @@ int intel_pmu_drain_bts_buffer(void)
|
|||
if (top <= at)
|
||||
return 0;
|
||||
|
||||
memset(®s, 0, sizeof(regs));
|
||||
|
||||
ds->bts_index = ds->bts_buffer_base;
|
||||
|
||||
perf_sample_data_init(&data, 0, event->hw.last_period);
|
||||
regs.ip = 0;
|
||||
|
||||
/*
|
||||
* Prepare a generic sample, i.e. fill in the invariant fields.
|
||||
|
|
|
@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void)
|
|||
leave_lazy(PARAVIRT_LAZY_MMU);
|
||||
}
|
||||
|
||||
void paravirt_flush_lazy_mmu(void)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
|
||||
arch_leave_lazy_mmu_mode();
|
||||
arch_enter_lazy_mmu_mode();
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void paravirt_start_context_switch(struct task_struct *prev)
|
||||
{
|
||||
BUG_ON(preemptible());
|
||||
|
@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
|
|||
return this_cpu_read(paravirt_lazy_mode);
|
||||
}
|
||||
|
||||
void arch_flush_lazy_mmu_mode(void)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
|
||||
arch_leave_lazy_mmu_mode();
|
||||
arch_enter_lazy_mmu_mode();
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
struct pv_info pv_info = {
|
||||
.name = "bare hardware",
|
||||
.paravirt_enabled = 0,
|
||||
|
@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = {
|
|||
.lazy_mode = {
|
||||
.enter = paravirt_nop,
|
||||
.leave = paravirt_nop,
|
||||
.flush = paravirt_nop,
|
||||
},
|
||||
|
||||
.set_fixmap = native_set_fixmap,
|
||||
|
|
|
@ -1334,6 +1334,7 @@ __init void lguest_init(void)
|
|||
pv_mmu_ops.read_cr3 = lguest_read_cr3;
|
||||
pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
|
||||
pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
|
||||
pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
|
||||
pv_mmu_ops.pte_update = lguest_pte_update;
|
||||
pv_mmu_ops.pte_update_defer = lguest_pte_update;
|
||||
|
||||
|
|
|
@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
|
|||
if (pgd_none(*pgd_ref))
|
||||
return -1;
|
||||
|
||||
if (pgd_none(*pgd))
|
||||
if (pgd_none(*pgd)) {
|
||||
set_pgd(pgd, *pgd_ref);
|
||||
else
|
||||
arch_flush_lazy_mmu_mode();
|
||||
} else {
|
||||
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
|
||||
}
|
||||
|
||||
/*
|
||||
* Below here mismatches are bugs because these lower tables
|
||||
|
|
|
@ -68,7 +68,7 @@ static int print_split(struct split_state *s)
|
|||
s->gpg++;
|
||||
i += GPS/PAGE_SIZE;
|
||||
} else if (level == PG_LEVEL_2M) {
|
||||
if (!(pte_val(*pte) & _PAGE_PSE)) {
|
||||
if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) {
|
||||
printk(KERN_ERR
|
||||
"%lx level %d but not PSE %Lx\n",
|
||||
addr, level, (u64)pte_val(*pte));
|
||||
|
|
|
@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
|
|||
* We are safe now. Check whether the new pgprot is the same:
|
||||
*/
|
||||
old_pte = *kpte;
|
||||
old_prot = new_prot = req_prot = pte_pgprot(old_pte);
|
||||
old_prot = req_prot = pte_pgprot(old_pte);
|
||||
|
||||
pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
|
||||
pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
|
||||
|
@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
|
|||
* a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
|
||||
* for the ancient hardware that doesn't support it.
|
||||
*/
|
||||
if (pgprot_val(new_prot) & _PAGE_PRESENT)
|
||||
pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
|
||||
if (pgprot_val(req_prot) & _PAGE_PRESENT)
|
||||
pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
|
||||
else
|
||||
pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
|
||||
pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
|
||||
|
||||
new_prot = canon_pgprot(new_prot);
|
||||
req_prot = canon_pgprot(req_prot);
|
||||
|
||||
/*
|
||||
* old_pte points to the large page base address. So we need
|
||||
|
@ -1413,6 +1413,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
|
|||
* but that can deadlock->flush only current cpu:
|
||||
*/
|
||||
__flush_tlb_all();
|
||||
|
||||
arch_flush_lazy_mmu_mode();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
|
|
|
@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
|
|||
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
|
||||
{
|
||||
paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
|
||||
/*
|
||||
* NOTE! For PAE, any changes to the top page-directory-pointer-table
|
||||
* entries need a full cr3 reload to flush.
|
||||
*/
|
||||
#ifdef CONFIG_X86_PAE
|
||||
tlb->need_flush_all = 1;
|
||||
#endif
|
||||
tlb_remove_page(tlb, virt_to_page(pmd));
|
||||
}
|
||||
|
||||
|
|
|
@ -1748,14 +1748,18 @@ static void *m2v(phys_addr_t maddr)
|
|||
}
|
||||
|
||||
/* Set the page permissions on an identity-mapped pages */
|
||||
static void set_page_prot(void *addr, pgprot_t prot)
|
||||
static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
|
||||
{
|
||||
unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
|
||||
pte_t pte = pfn_pte(pfn, prot);
|
||||
|
||||
if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
|
||||
if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
|
||||
BUG();
|
||||
}
|
||||
static void set_page_prot(void *addr, pgprot_t prot)
|
||||
{
|
||||
return set_page_prot_flags(addr, prot, UVMF_NONE);
|
||||
}
|
||||
#ifdef CONFIG_X86_32
|
||||
static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
|
||||
{
|
||||
|
@ -1839,12 +1843,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
|
|||
unsigned long addr)
|
||||
{
|
||||
if (*pt_base == PFN_DOWN(__pa(addr))) {
|
||||
set_page_prot((void *)addr, PAGE_KERNEL);
|
||||
set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
|
||||
clear_page((void *)addr);
|
||||
(*pt_base)++;
|
||||
}
|
||||
if (*pt_end == PFN_DOWN(__pa(addr))) {
|
||||
set_page_prot((void *)addr, PAGE_KERNEL);
|
||||
set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
|
||||
clear_page((void *)addr);
|
||||
(*pt_end)--;
|
||||
}
|
||||
|
@ -2196,6 +2200,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
|
|||
.lazy_mode = {
|
||||
.enter = paravirt_enter_lazy_mmu,
|
||||
.leave = xen_leave_lazy_mmu,
|
||||
.flush = paravirt_flush_lazy_mmu,
|
||||
},
|
||||
|
||||
.set_fixmap = xen_set_fixmap,
|
||||
|
|
|
@ -229,6 +229,8 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \
|
|||
unsigned long val; \
|
||||
ssize_t ret; \
|
||||
ret = queue_var_store(&val, page, count); \
|
||||
if (ret < 0) \
|
||||
return ret; \
|
||||
if (neg) \
|
||||
val = !val; \
|
||||
\
|
||||
|
|
|
@ -257,7 +257,6 @@ void delete_partition(struct gendisk *disk, int partno)
|
|||
|
||||
hd_struct_put(part);
|
||||
}
|
||||
EXPORT_SYMBOL(delete_partition);
|
||||
|
||||
static ssize_t whole_disk_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
|
|
17
crypto/gcm.c
17
crypto/gcm.c
|
@ -44,6 +44,7 @@ struct crypto_rfc4543_ctx {
|
|||
|
||||
struct crypto_rfc4543_req_ctx {
|
||||
u8 auth_tag[16];
|
||||
u8 assocbuf[32];
|
||||
struct scatterlist cipher[1];
|
||||
struct scatterlist payload[2];
|
||||
struct scatterlist assoc[2];
|
||||
|
@ -1133,9 +1134,19 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
|
|||
scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
|
||||
assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
|
||||
|
||||
sg_init_table(assoc, 2);
|
||||
sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
|
||||
req->assoc->offset);
|
||||
if (req->assoc->length == req->assoclen) {
|
||||
sg_init_table(assoc, 2);
|
||||
sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
|
||||
req->assoc->offset);
|
||||
} else {
|
||||
BUG_ON(req->assoclen > sizeof(rctx->assocbuf));
|
||||
|
||||
scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0,
|
||||
req->assoclen, 0);
|
||||
|
||||
sg_init_table(assoc, 2);
|
||||
sg_set_buf(assoc, rctx->assocbuf, req->assoclen);
|
||||
}
|
||||
scatterwalk_crypto_chain(assoc, payload, 0, 2);
|
||||
|
||||
aead_request_set_tfm(subreq, ctx->child);
|
||||
|
|
|
@ -150,6 +150,7 @@ enum piix_controller_ids {
|
|||
tolapai_sata,
|
||||
piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
|
||||
ich8_sata_snb,
|
||||
ich8_2port_sata_snb,
|
||||
};
|
||||
|
||||
struct piix_map_db {
|
||||
|
@ -304,7 +305,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
|
|||
/* SATA Controller IDE (Lynx Point) */
|
||||
{ 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
|
||||
/* SATA Controller IDE (Lynx Point) */
|
||||
{ 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
|
||||
{ 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
|
||||
/* SATA Controller IDE (Lynx Point) */
|
||||
{ 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
|
||||
/* SATA Controller IDE (Lynx Point-LP) */
|
||||
|
@ -439,6 +440,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
|
|||
[ich8m_apple_sata] = &ich8m_apple_map_db,
|
||||
[tolapai_sata] = &tolapai_map_db,
|
||||
[ich8_sata_snb] = &ich8_map_db,
|
||||
[ich8_2port_sata_snb] = &ich8_2port_map_db,
|
||||
};
|
||||
|
||||
static struct pci_bits piix_enable_bits[] = {
|
||||
|
@ -1242,6 +1244,16 @@ static struct ata_port_info piix_port_info[] = {
|
|||
.udma_mask = ATA_UDMA6,
|
||||
.port_ops = &piix_sata_ops,
|
||||
},
|
||||
|
||||
[ich8_2port_sata_snb] =
|
||||
{
|
||||
.flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR
|
||||
| PIIX_FLAG_PIO16,
|
||||
.pio_mask = ATA_PIO4,
|
||||
.mwdma_mask = ATA_MWDMA2,
|
||||
.udma_mask = ATA_UDMA6,
|
||||
.port_ops = &piix_sata_ops,
|
||||
},
|
||||
};
|
||||
|
||||
#define AHCI_PCI_BAR 5
|
||||
|
|
|
@ -2329,7 +2329,7 @@ int ata_dev_configure(struct ata_device *dev)
|
|||
* from SATA Settings page of Identify Device Data Log.
|
||||
*/
|
||||
if (ata_id_has_devslp(dev->id)) {
|
||||
u8 sata_setting[ATA_SECT_SIZE];
|
||||
u8 *sata_setting = ap->sector_buf;
|
||||
int i, j;
|
||||
|
||||
dev->flags |= ATA_DFLAG_DEVSLP;
|
||||
|
@ -2439,6 +2439,9 @@ int ata_dev_configure(struct ata_device *dev)
|
|||
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
|
||||
dev->max_sectors);
|
||||
|
||||
if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
|
||||
dev->max_sectors = ATA_MAX_SECTORS_LBA48;
|
||||
|
||||
if (ap->ops->dev_config)
|
||||
ap->ops->dev_config(dev);
|
||||
|
||||
|
@ -4100,6 +4103,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||
/* Weird ATAPI devices */
|
||||
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
|
||||
{ "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
|
||||
{ "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
|
||||
|
||||
/* Devices we expect to fail diagnostics */
|
||||
|
||||
|
|
|
@ -532,8 +532,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
|
|||
struct scsi_sense_hdr sshdr;
|
||||
scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
|
||||
&sshdr);
|
||||
if (sshdr.sense_key == 0 &&
|
||||
sshdr.asc == 0 && sshdr.ascq == 0)
|
||||
if (sshdr.sense_key == RECOVERED_ERROR &&
|
||||
sshdr.asc == 0 && sshdr.ascq == 0x1d)
|
||||
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
|
||||
}
|
||||
|
||||
|
@ -618,8 +618,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
|
|||
struct scsi_sense_hdr sshdr;
|
||||
scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
|
||||
&sshdr);
|
||||
if (sshdr.sense_key == 0 &&
|
||||
sshdr.asc == 0 && sshdr.ascq == 0)
|
||||
if (sshdr.sense_key == RECOVERED_ERROR &&
|
||||
sshdr.asc == 0 && sshdr.ascq == 0x1d)
|
||||
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
|
||||
}
|
||||
|
||||
|
|
|
@ -943,7 +943,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
|||
unsigned int ival;
|
||||
int val_bytes = map->format.val_bytes;
|
||||
for (i = 0; i < val_len / val_bytes; i++) {
|
||||
ival = map->format.parse_val(val + (i * val_bytes));
|
||||
memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
|
||||
ival = map->format.parse_val(map->work_buf);
|
||||
ret = regcache_write(map, reg + (i * map->reg_stride),
|
||||
ival);
|
||||
if (ret) {
|
||||
|
|
|
@ -1051,29 +1051,12 @@ static int loop_clr_fd(struct loop_device *lo)
|
|||
lo->lo_state = Lo_unbound;
|
||||
/* This is safe: open() is still holding a reference. */
|
||||
module_put(THIS_MODULE);
|
||||
if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
|
||||
ioctl_by_bdev(bdev, BLKRRPART, 0);
|
||||
lo->lo_flags = 0;
|
||||
if (!part_shift)
|
||||
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
|
||||
mutex_unlock(&lo->lo_ctl_mutex);
|
||||
|
||||
/*
|
||||
* Remove all partitions, since BLKRRPART won't remove user
|
||||
* added partitions when max_part=0
|
||||
*/
|
||||
if (bdev) {
|
||||
struct disk_part_iter piter;
|
||||
struct hd_struct *part;
|
||||
|
||||
mutex_lock_nested(&bdev->bd_mutex, 1);
|
||||
invalidate_partition(bdev->bd_disk, 0);
|
||||
disk_part_iter_init(&piter, bdev->bd_disk,
|
||||
DISK_PITER_INCL_EMPTY);
|
||||
while ((part = disk_part_iter_next(&piter)))
|
||||
delete_partition(bdev->bd_disk, part->partno);
|
||||
disk_part_iter_exit(&piter);
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Need not hold lo_ctl_mutex to fput backing file.
|
||||
* Calling fput holding lo_ctl_mutex triggers a circular
|
||||
|
|
|
@ -81,12 +81,17 @@
|
|||
/* Device instance number, incremented each time a device is probed. */
|
||||
static int instance;
|
||||
|
||||
struct list_head online_list;
|
||||
struct list_head removing_list;
|
||||
spinlock_t dev_lock;
|
||||
|
||||
/*
|
||||
* Global variable used to hold the major block device number
|
||||
* allocated in mtip_init().
|
||||
*/
|
||||
static int mtip_major;
|
||||
static struct dentry *dfs_parent;
|
||||
static struct dentry *dfs_device_status;
|
||||
|
||||
static u32 cpu_use[NR_CPUS];
|
||||
|
||||
|
@ -243,40 +248,31 @@ static inline void release_slot(struct mtip_port *port, int tag)
|
|||
/*
|
||||
* Reset the HBA (without sleeping)
|
||||
*
|
||||
* Just like hba_reset, except does not call sleep, so can be
|
||||
* run from interrupt/tasklet context.
|
||||
*
|
||||
* @dd Pointer to the driver data structure.
|
||||
*
|
||||
* return value
|
||||
* 0 The reset was successful.
|
||||
* -1 The HBA Reset bit did not clear.
|
||||
*/
|
||||
static int hba_reset_nosleep(struct driver_data *dd)
|
||||
static int mtip_hba_reset(struct driver_data *dd)
|
||||
{
|
||||
unsigned long timeout;
|
||||
|
||||
/* Chip quirk: quiesce any chip function */
|
||||
mdelay(10);
|
||||
|
||||
/* Set the reset bit */
|
||||
writel(HOST_RESET, dd->mmio + HOST_CTL);
|
||||
|
||||
/* Flush */
|
||||
readl(dd->mmio + HOST_CTL);
|
||||
|
||||
/*
|
||||
* Wait 10ms then spin for up to 1 second
|
||||
* waiting for reset acknowledgement
|
||||
*/
|
||||
timeout = jiffies + msecs_to_jiffies(1000);
|
||||
mdelay(10);
|
||||
while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
|
||||
&& time_before(jiffies, timeout))
|
||||
mdelay(1);
|
||||
/* Spin for up to 2 seconds, waiting for reset acknowledgement */
|
||||
timeout = jiffies + msecs_to_jiffies(2000);
|
||||
do {
|
||||
mdelay(10);
|
||||
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
|
||||
return -1;
|
||||
|
||||
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
|
||||
return -1;
|
||||
} while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
|
||||
&& time_before(jiffies, timeout));
|
||||
|
||||
if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
|
||||
return -1;
|
||||
|
@ -481,7 +477,7 @@ static void mtip_restart_port(struct mtip_port *port)
|
|||
dev_warn(&port->dd->pdev->dev,
|
||||
"PxCMD.CR not clear, escalating reset\n");
|
||||
|
||||
if (hba_reset_nosleep(port->dd))
|
||||
if (mtip_hba_reset(port->dd))
|
||||
dev_err(&port->dd->pdev->dev,
|
||||
"HBA reset escalation failed.\n");
|
||||
|
||||
|
@ -527,6 +523,26 @@ static void mtip_restart_port(struct mtip_port *port)
|
|||
|
||||
}
|
||||
|
||||
static int mtip_device_reset(struct driver_data *dd)
|
||||
{
|
||||
int rv = 0;
|
||||
|
||||
if (mtip_check_surprise_removal(dd->pdev))
|
||||
return 0;
|
||||
|
||||
if (mtip_hba_reset(dd) < 0)
|
||||
rv = -EFAULT;
|
||||
|
||||
mdelay(1);
|
||||
mtip_init_port(dd->port);
|
||||
mtip_start_port(dd->port);
|
||||
|
||||
/* Enable interrupts on the HBA. */
|
||||
writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
|
||||
dd->mmio + HOST_CTL);
|
||||
return rv;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function for tag logging
|
||||
*/
|
||||
|
@ -632,7 +648,7 @@ static void mtip_timeout_function(unsigned long int data)
|
|||
if (cmdto_cnt) {
|
||||
print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
|
||||
if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
|
||||
mtip_restart_port(port);
|
||||
mtip_device_reset(port->dd);
|
||||
wake_up_interruptible(&port->svc_wait);
|
||||
}
|
||||
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
|
||||
|
@ -1283,11 +1299,11 @@ static int mtip_exec_internal_command(struct mtip_port *port,
|
|||
int rv = 0, ready2go = 1;
|
||||
struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
|
||||
unsigned long to;
|
||||
struct driver_data *dd = port->dd;
|
||||
|
||||
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
|
||||
if (buffer & 0x00000007) {
|
||||
dev_err(&port->dd->pdev->dev,
|
||||
"SG buffer is not 8 byte aligned\n");
|
||||
dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
@ -1300,23 +1316,21 @@ static int mtip_exec_internal_command(struct mtip_port *port,
|
|||
mdelay(100);
|
||||
} while (time_before(jiffies, to));
|
||||
if (!ready2go) {
|
||||
dev_warn(&port->dd->pdev->dev,
|
||||
dev_warn(&dd->pdev->dev,
|
||||
"Internal cmd active. new cmd [%02X]\n", fis->command);
|
||||
return -EBUSY;
|
||||
}
|
||||
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
|
||||
port->ic_pause_timer = 0;
|
||||
|
||||
if (fis->command == ATA_CMD_SEC_ERASE_UNIT)
|
||||
clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
|
||||
else if (fis->command == ATA_CMD_DOWNLOAD_MICRO)
|
||||
clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
|
||||
clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
|
||||
clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
|
||||
|
||||
if (atomic == GFP_KERNEL) {
|
||||
if (fis->command != ATA_CMD_STANDBYNOW1) {
|
||||
/* wait for io to complete if non atomic */
|
||||
if (mtip_quiesce_io(port, 5000) < 0) {
|
||||
dev_warn(&port->dd->pdev->dev,
|
||||
dev_warn(&dd->pdev->dev,
|
||||
"Failed to quiesce IO\n");
|
||||
release_slot(port, MTIP_TAG_INTERNAL);
|
||||
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
|
||||
|
@ -1361,58 +1375,84 @@ static int mtip_exec_internal_command(struct mtip_port *port,
|
|||
/* Issue the command to the hardware */
|
||||
mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
|
||||
|
||||
/* Poll if atomic, wait_for_completion otherwise */
|
||||
if (atomic == GFP_KERNEL) {
|
||||
/* Wait for the command to complete or timeout. */
|
||||
if (wait_for_completion_timeout(
|
||||
if (wait_for_completion_interruptible_timeout(
|
||||
&wait,
|
||||
msecs_to_jiffies(timeout)) == 0) {
|
||||
dev_err(&port->dd->pdev->dev,
|
||||
"Internal command did not complete [%d] "
|
||||
"within timeout of %lu ms\n",
|
||||
atomic, timeout);
|
||||
if (mtip_check_surprise_removal(port->dd->pdev) ||
|
||||
msecs_to_jiffies(timeout)) <= 0) {
|
||||
if (rv == -ERESTARTSYS) { /* interrupted */
|
||||
dev_err(&dd->pdev->dev,
|
||||
"Internal command [%02X] was interrupted after %lu ms\n",
|
||||
fis->command, timeout);
|
||||
rv = -EINTR;
|
||||
goto exec_ic_exit;
|
||||
} else if (rv == 0) /* timeout */
|
||||
dev_err(&dd->pdev->dev,
|
||||
"Internal command did not complete [%02X] within timeout of %lu ms\n",
|
||||
fis->command, timeout);
|
||||
else
|
||||
dev_err(&dd->pdev->dev,
|
||||
"Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
|
||||
fis->command, rv, timeout);
|
||||
|
||||
if (mtip_check_surprise_removal(dd->pdev) ||
|
||||
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
|
||||
&port->dd->dd_flag)) {
|
||||
&dd->dd_flag)) {
|
||||
dev_err(&dd->pdev->dev,
|
||||
"Internal command [%02X] wait returned due to SR\n",
|
||||
fis->command);
|
||||
rv = -ENXIO;
|
||||
goto exec_ic_exit;
|
||||
}
|
||||
mtip_device_reset(dd); /* recover from timeout issue */
|
||||
rv = -EAGAIN;
|
||||
goto exec_ic_exit;
|
||||
}
|
||||
} else {
|
||||
u32 hba_stat, port_stat;
|
||||
|
||||
/* Spin for <timeout> checking if command still outstanding */
|
||||
timeout = jiffies + msecs_to_jiffies(timeout);
|
||||
while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
|
||||
& (1 << MTIP_TAG_INTERNAL))
|
||||
&& time_before(jiffies, timeout)) {
|
||||
if (mtip_check_surprise_removal(port->dd->pdev)) {
|
||||
if (mtip_check_surprise_removal(dd->pdev)) {
|
||||
rv = -ENXIO;
|
||||
goto exec_ic_exit;
|
||||
}
|
||||
if ((fis->command != ATA_CMD_STANDBYNOW1) &&
|
||||
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
|
||||
&port->dd->dd_flag)) {
|
||||
&dd->dd_flag)) {
|
||||
rv = -ENXIO;
|
||||
goto exec_ic_exit;
|
||||
}
|
||||
if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) {
|
||||
atomic_inc(&int_cmd->active); /* error */
|
||||
break;
|
||||
port_stat = readl(port->mmio + PORT_IRQ_STAT);
|
||||
if (!port_stat)
|
||||
continue;
|
||||
|
||||
if (port_stat & PORT_IRQ_ERR) {
|
||||
dev_err(&dd->pdev->dev,
|
||||
"Internal command [%02X] failed\n",
|
||||
fis->command);
|
||||
mtip_device_reset(dd);
|
||||
rv = -EIO;
|
||||
goto exec_ic_exit;
|
||||
} else {
|
||||
writel(port_stat, port->mmio + PORT_IRQ_STAT);
|
||||
hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
|
||||
if (hba_stat)
|
||||
writel(hba_stat,
|
||||
dd->mmio + HOST_IRQ_STAT);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (atomic_read(&int_cmd->active) > 1) {
|
||||
dev_err(&port->dd->pdev->dev,
|
||||
"Internal command [%02X] failed\n", fis->command);
|
||||
rv = -EIO;
|
||||
}
|
||||
if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
|
||||
& (1 << MTIP_TAG_INTERNAL)) {
|
||||
rv = -ENXIO;
|
||||
if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
|
||||
&port->dd->dd_flag)) {
|
||||
mtip_restart_port(port);
|
||||
if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
|
||||
mtip_device_reset(dd);
|
||||
rv = -EAGAIN;
|
||||
}
|
||||
}
|
||||
|
@ -1724,7 +1764,8 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
|
|||
* -EINVAL Invalid parameters passed in, trim not supported
|
||||
* -EIO Error submitting trim request to hw
|
||||
*/
|
||||
static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len)
|
||||
static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
|
||||
unsigned int len)
|
||||
{
|
||||
int i, rv = 0;
|
||||
u64 tlba, tlen, sect_left;
|
||||
|
@ -1810,45 +1851,6 @@ static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
|
|||
return (bool) !!port->identify_valid;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset the HBA.
|
||||
*
|
||||
* Resets the HBA by setting the HBA Reset bit in the Global
|
||||
* HBA Control register. After setting the HBA Reset bit the
|
||||
* function waits for 1 second before reading the HBA Reset
|
||||
* bit to make sure it has cleared. If HBA Reset is not clear
|
||||
* an error is returned. Cannot be used in non-blockable
|
||||
* context.
|
||||
*
|
||||
* @dd Pointer to the driver data structure.
|
||||
*
|
||||
* return value
|
||||
* 0 The reset was successful.
|
||||
* -1 The HBA Reset bit did not clear.
|
||||
*/
|
||||
static int mtip_hba_reset(struct driver_data *dd)
|
||||
{
|
||||
mtip_deinit_port(dd->port);
|
||||
|
||||
/* Set the reset bit */
|
||||
writel(HOST_RESET, dd->mmio + HOST_CTL);
|
||||
|
||||
/* Flush */
|
||||
readl(dd->mmio + HOST_CTL);
|
||||
|
||||
/* Wait for reset to clear */
|
||||
ssleep(1);
|
||||
|
||||
/* Check the bit has cleared */
|
||||
if (readl(dd->mmio + HOST_CTL) & HOST_RESET) {
|
||||
dev_err(&dd->pdev->dev,
|
||||
"Reset bit did not clear.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Display the identify command data.
|
||||
*
|
||||
|
@ -2710,6 +2712,100 @@ static ssize_t mtip_hw_show_status(struct device *dev,
|
|||
|
||||
static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
|
||||
|
||||
/* debugsfs entries */
|
||||
|
||||
static ssize_t show_device_status(struct device_driver *drv, char *buf)
|
||||
{
|
||||
int size = 0;
|
||||
struct driver_data *dd, *tmp;
|
||||
unsigned long flags;
|
||||
char id_buf[42];
|
||||
u16 status = 0;
|
||||
|
||||
spin_lock_irqsave(&dev_lock, flags);
|
||||
size += sprintf(&buf[size], "Devices Present:\n");
|
||||
list_for_each_entry_safe(dd, tmp, &online_list, online_list) {
|
||||
if (dd->pdev) {
|
||||
if (dd->port &&
|
||||
dd->port->identify &&
|
||||
dd->port->identify_valid) {
|
||||
strlcpy(id_buf,
|
||||
(char *) (dd->port->identify + 10), 21);
|
||||
status = *(dd->port->identify + 141);
|
||||
} else {
|
||||
memset(id_buf, 0, 42);
|
||||
status = 0;
|
||||
}
|
||||
|
||||
if (dd->port &&
|
||||
test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
|
||||
size += sprintf(&buf[size],
|
||||
" device %s %s (ftl rebuild %d %%)\n",
|
||||
dev_name(&dd->pdev->dev),
|
||||
id_buf,
|
||||
status);
|
||||
} else {
|
||||
size += sprintf(&buf[size],
|
||||
" device %s %s\n",
|
||||
dev_name(&dd->pdev->dev),
|
||||
id_buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size += sprintf(&buf[size], "Devices Being Removed:\n");
|
||||
list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) {
|
||||
if (dd->pdev) {
|
||||
if (dd->port &&
|
||||
dd->port->identify &&
|
||||
dd->port->identify_valid) {
|
||||
strlcpy(id_buf,
|
||||
(char *) (dd->port->identify+10), 21);
|
||||
status = *(dd->port->identify + 141);
|
||||
} else {
|
||||
memset(id_buf, 0, 42);
|
||||
status = 0;
|
||||
}
|
||||
|
||||
if (dd->port &&
|
||||
test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
|
||||
size += sprintf(&buf[size],
|
||||
" device %s %s (ftl rebuild %d %%)\n",
|
||||
dev_name(&dd->pdev->dev),
|
||||
id_buf,
|
||||
status);
|
||||
} else {
|
||||
size += sprintf(&buf[size],
|
||||
" device %s %s\n",
|
||||
dev_name(&dd->pdev->dev),
|
||||
id_buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_lock, flags);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
|
||||
size_t len, loff_t *offset)
|
||||
{
|
||||
int size = *offset;
|
||||
char buf[MTIP_DFS_MAX_BUF_SIZE];
|
||||
|
||||
if (!len || *offset)
|
||||
return 0;
|
||||
|
||||
size += show_device_status(NULL, buf);
|
||||
|
||||
*offset = size <= len ? size : len;
|
||||
size = copy_to_user(ubuf, buf, *offset);
|
||||
if (size)
|
||||
return -EFAULT;
|
||||
|
||||
return *offset;
|
||||
}
|
||||
|
||||
static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
|
||||
size_t len, loff_t *offset)
|
||||
{
|
||||
|
@ -2804,6 +2900,13 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
|
|||
return *offset;
|
||||
}
|
||||
|
||||
static const struct file_operations mtip_device_status_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = mtip_hw_read_device_status,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static const struct file_operations mtip_regs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
|
@ -4161,6 +4264,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
|
|||
const struct cpumask *node_mask;
|
||||
int cpu, i = 0, j = 0;
|
||||
int my_node = NUMA_NO_NODE;
|
||||
unsigned long flags;
|
||||
|
||||
/* Allocate memory for this devices private data. */
|
||||
my_node = pcibus_to_node(pdev->bus);
|
||||
|
@ -4218,6 +4322,9 @@ static int mtip_pci_probe(struct pci_dev *pdev,
|
|||
dd->pdev = pdev;
|
||||
dd->numa_node = my_node;
|
||||
|
||||
INIT_LIST_HEAD(&dd->online_list);
|
||||
INIT_LIST_HEAD(&dd->remove_list);
|
||||
|
||||
memset(dd->workq_name, 0, 32);
|
||||
snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
|
||||
|
||||
|
@ -4305,6 +4412,14 @@ static int mtip_pci_probe(struct pci_dev *pdev,
|
|||
instance++;
|
||||
if (rv != MTIP_FTL_REBUILD_MAGIC)
|
||||
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
|
||||
else
|
||||
rv = 0; /* device in rebuild state, return 0 from probe */
|
||||
|
||||
/* Add to online list even if in ftl rebuild */
|
||||
spin_lock_irqsave(&dev_lock, flags);
|
||||
list_add(&dd->online_list, &online_list);
|
||||
spin_unlock_irqrestore(&dev_lock, flags);
|
||||
|
||||
goto done;
|
||||
|
||||
block_initialize_err:
|
||||
|
@ -4338,9 +4453,15 @@ static void mtip_pci_remove(struct pci_dev *pdev)
|
|||
{
|
||||
struct driver_data *dd = pci_get_drvdata(pdev);
|
||||
int counter = 0;
|
||||
unsigned long flags;
|
||||
|
||||
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
|
||||
|
||||
spin_lock_irqsave(&dev_lock, flags);
|
||||
list_del_init(&dd->online_list);
|
||||
list_add(&dd->remove_list, &removing_list);
|
||||
spin_unlock_irqrestore(&dev_lock, flags);
|
||||
|
||||
if (mtip_check_surprise_removal(pdev)) {
|
||||
while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
|
||||
counter++;
|
||||
|
@ -4366,6 +4487,10 @@ static void mtip_pci_remove(struct pci_dev *pdev)
|
|||
|
||||
pci_disable_msi(pdev);
|
||||
|
||||
spin_lock_irqsave(&dev_lock, flags);
|
||||
list_del_init(&dd->remove_list);
|
||||
spin_unlock_irqrestore(&dev_lock, flags);
|
||||
|
||||
kfree(dd);
|
||||
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
|
||||
}
|
||||
|
@ -4513,6 +4638,11 @@ static int __init mtip_init(void)
|
|||
|
||||
pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
|
||||
|
||||
spin_lock_init(&dev_lock);
|
||||
|
||||
INIT_LIST_HEAD(&online_list);
|
||||
INIT_LIST_HEAD(&removing_list);
|
||||
|
||||
/* Allocate a major block device number to use with this driver. */
|
||||
error = register_blkdev(0, MTIP_DRV_NAME);
|
||||
if (error <= 0) {
|
||||
|
@ -4522,11 +4652,18 @@ static int __init mtip_init(void)
|
|||
}
|
||||
mtip_major = error;
|
||||
|
||||
if (!dfs_parent) {
|
||||
dfs_parent = debugfs_create_dir("rssd", NULL);
|
||||
if (IS_ERR_OR_NULL(dfs_parent)) {
|
||||
pr_warn("Error creating debugfs parent\n");
|
||||
dfs_parent = NULL;
|
||||
dfs_parent = debugfs_create_dir("rssd", NULL);
|
||||
if (IS_ERR_OR_NULL(dfs_parent)) {
|
||||
pr_warn("Error creating debugfs parent\n");
|
||||
dfs_parent = NULL;
|
||||
}
|
||||
if (dfs_parent) {
|
||||
dfs_device_status = debugfs_create_file("device_status",
|
||||
S_IRUGO, dfs_parent, NULL,
|
||||
&mtip_device_status_fops);
|
||||
if (IS_ERR_OR_NULL(dfs_device_status)) {
|
||||
pr_err("Error creating device_status node\n");
|
||||
dfs_device_status = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -129,9 +129,9 @@ enum {
|
|||
MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
|
||||
MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
|
||||
MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
|
||||
MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
|
||||
(1 << MTIP_PF_EH_ACTIVE_BIT) | \
|
||||
(1 << MTIP_PF_SE_ACTIVE_BIT) | \
|
||||
MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) |
|
||||
(1 << MTIP_PF_EH_ACTIVE_BIT) |
|
||||
(1 << MTIP_PF_SE_ACTIVE_BIT) |
|
||||
(1 << MTIP_PF_DM_ACTIVE_BIT)),
|
||||
|
||||
MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
|
||||
|
@ -144,9 +144,9 @@ enum {
|
|||
MTIP_DDF_REMOVE_PENDING_BIT = 1,
|
||||
MTIP_DDF_OVER_TEMP_BIT = 2,
|
||||
MTIP_DDF_WRITE_PROTECT_BIT = 3,
|
||||
MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
|
||||
(1 << MTIP_DDF_SEC_LOCK_BIT) | \
|
||||
(1 << MTIP_DDF_OVER_TEMP_BIT) | \
|
||||
MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
|
||||
(1 << MTIP_DDF_SEC_LOCK_BIT) |
|
||||
(1 << MTIP_DDF_OVER_TEMP_BIT) |
|
||||
(1 << MTIP_DDF_WRITE_PROTECT_BIT)),
|
||||
|
||||
MTIP_DDF_CLEANUP_BIT = 5,
|
||||
|
@ -180,7 +180,7 @@ struct mtip_work {
|
|||
|
||||
#define MTIP_TRIM_TIMEOUT_MS 240000
|
||||
#define MTIP_MAX_TRIM_ENTRIES 8
|
||||
#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8
|
||||
#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8
|
||||
|
||||
struct mtip_trim_entry {
|
||||
u32 lba; /* starting lba of region */
|
||||
|
@ -501,6 +501,10 @@ struct driver_data {
|
|||
atomic_t irq_workers_active;
|
||||
|
||||
int isr_binding;
|
||||
|
||||
struct list_head online_list; /* linkage for online list */
|
||||
|
||||
struct list_head remove_list; /* linkage for removing list */
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -502,7 +502,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
|
|||
|
||||
sample_time = cpu->pstate_policy->sample_rate_ms;
|
||||
delay = msecs_to_jiffies(sample_time);
|
||||
delay -= jiffies % delay;
|
||||
mod_timer_pinned(&cpu->timer, jiffies + delay);
|
||||
}
|
||||
|
||||
|
|
|
@ -1750,7 +1750,7 @@ static struct platform_driver cryp_driver = {
|
|||
.shutdown = ux500_cryp_shutdown,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "cryp1"
|
||||
.name = "cryp1",
|
||||
.pm = &ux500_cryp_pm,
|
||||
}
|
||||
};
|
||||
|
|
|
@ -276,12 +276,20 @@ static void omap_dma_issue_pending(struct dma_chan *chan)
|
|||
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
if (vchan_issue_pending(&c->vc) && !c->desc) {
|
||||
struct omap_dmadev *d = to_omap_dma_dev(chan->device);
|
||||
spin_lock(&d->lock);
|
||||
if (list_empty(&c->node))
|
||||
list_add_tail(&c->node, &d->pending);
|
||||
spin_unlock(&d->lock);
|
||||
tasklet_schedule(&d->task);
|
||||
/*
|
||||
* c->cyclic is used only by audio and in this case the DMA need
|
||||
* to be started without delay.
|
||||
*/
|
||||
if (!c->cyclic) {
|
||||
struct omap_dmadev *d = to_omap_dma_dev(chan->device);
|
||||
spin_lock(&d->lock);
|
||||
if (list_empty(&c->node))
|
||||
list_add_tail(&c->node, &d->pending);
|
||||
spin_unlock(&d->lock);
|
||||
tasklet_schedule(&d->task);
|
||||
} else {
|
||||
omap_dma_start_desc(c);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
}
|
||||
|
|
|
@ -2882,7 +2882,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
{
|
||||
struct dma_pl330_platdata *pdat;
|
||||
struct dma_pl330_dmac *pdmac;
|
||||
struct dma_pl330_chan *pch;
|
||||
struct dma_pl330_chan *pch, *_p;
|
||||
struct pl330_info *pi;
|
||||
struct dma_device *pd;
|
||||
struct resource *res;
|
||||
|
@ -2984,7 +2984,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
ret = dma_async_device_register(pd);
|
||||
if (ret) {
|
||||
dev_err(&adev->dev, "unable to register DMAC\n");
|
||||
goto probe_err2;
|
||||
goto probe_err3;
|
||||
}
|
||||
|
||||
if (adev->dev.of_node) {
|
||||
ret = of_dma_controller_register(adev->dev.of_node,
|
||||
of_dma_pl330_xlate, pdmac);
|
||||
if (ret) {
|
||||
dev_err(&adev->dev,
|
||||
"unable to register DMA to the generic DT DMA helpers\n");
|
||||
}
|
||||
}
|
||||
|
||||
dev_info(&adev->dev,
|
||||
|
@ -2995,16 +3004,21 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
|
||||
pi->pcfg.num_peri, pi->pcfg.num_events);
|
||||
|
||||
ret = of_dma_controller_register(adev->dev.of_node,
|
||||
of_dma_pl330_xlate, pdmac);
|
||||
if (ret) {
|
||||
dev_err(&adev->dev,
|
||||
"unable to register DMA to the generic DT DMA helpers\n");
|
||||
goto probe_err2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
probe_err3:
|
||||
amba_set_drvdata(adev, NULL);
|
||||
|
||||
/* Idle the DMAC */
|
||||
list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
|
||||
chan.device_node) {
|
||||
|
||||
/* Remove the channel */
|
||||
list_del(&pch->chan.device_node);
|
||||
|
||||
/* Flush the channel */
|
||||
pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
|
||||
pl330_free_chan_resources(&pch->chan);
|
||||
}
|
||||
probe_err2:
|
||||
pl330_del(pi);
|
||||
probe_err1:
|
||||
|
@ -3023,8 +3037,10 @@ static int pl330_remove(struct amba_device *adev)
|
|||
if (!pdmac)
|
||||
return 0;
|
||||
|
||||
of_dma_controller_free(adev->dev.of_node);
|
||||
if (adev->dev.of_node)
|
||||
of_dma_controller_free(adev->dev.of_node);
|
||||
|
||||
dma_async_device_unregister(&pdmac->ddma);
|
||||
amba_set_drvdata(adev, NULL);
|
||||
|
||||
/* Idle the DMAC */
|
||||
|
|
|
@ -575,7 +575,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
|
|||
chip->gpio_chip.ngpio,
|
||||
irq_base,
|
||||
&pca953x_irq_simple_ops,
|
||||
NULL);
|
||||
chip);
|
||||
if (!chip->domain)
|
||||
return -ENODEV;
|
||||
|
||||
|
|
|
@ -1544,10 +1544,10 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
|
|||
if (!fb_helper->fb)
|
||||
return 0;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
mutex_lock(&fb_helper->dev->mode_config.mutex);
|
||||
if (!drm_fb_helper_is_bound(fb_helper)) {
|
||||
fb_helper->delayed_hotplug = true;
|
||||
drm_modeset_unlock_all(dev);
|
||||
mutex_unlock(&fb_helper->dev->mode_config.mutex);
|
||||
return 0;
|
||||
}
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
@ -1558,9 +1558,11 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
|
|||
|
||||
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
|
||||
max_height);
|
||||
mutex_unlock(&fb_helper->dev->mode_config.mutex);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
drm_setup_crtcs(fb_helper);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
drm_fb_helper_set_par(fb_helper->fbdev);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -751,8 +751,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
|
|||
int i;
|
||||
unsigned char misc = 0;
|
||||
unsigned char ext_vga[6];
|
||||
unsigned char ext_vga_index24;
|
||||
unsigned char dac_index90 = 0;
|
||||
u8 bppshift;
|
||||
|
||||
static unsigned char dacvalue[] = {
|
||||
|
@ -803,7 +801,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
|
|||
option2 = 0x0000b000;
|
||||
break;
|
||||
case G200_ER:
|
||||
dac_index90 = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -852,10 +849,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
|
|||
WREG_DAC(i, dacvalue[i]);
|
||||
}
|
||||
|
||||
if (mdev->type == G200_ER) {
|
||||
WREG_DAC(0x90, dac_index90);
|
||||
}
|
||||
|
||||
if (mdev->type == G200_ER)
|
||||
WREG_DAC(0x90, 0);
|
||||
|
||||
if (option)
|
||||
pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
|
||||
|
@ -952,8 +947,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
|
|||
if (mdev->type == G200_WB)
|
||||
ext_vga[1] |= 0x88;
|
||||
|
||||
ext_vga_index24 = 0x05;
|
||||
|
||||
/* Set pixel clocks */
|
||||
misc = 0x2d;
|
||||
WREG8(MGA_MISC_OUT, misc);
|
||||
|
@ -965,7 +958,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
|
|||
}
|
||||
|
||||
if (mdev->type == G200_ER)
|
||||
WREG_ECRT(24, ext_vga_index24);
|
||||
WREG_ECRT(0x24, 0x5);
|
||||
|
||||
if (mdev->type == G200_EV) {
|
||||
WREG_ECRT(6, 0);
|
||||
|
|
|
@ -479,7 +479,7 @@ nv50_display_flip_wait(void *data)
|
|||
{
|
||||
struct nv50_display_flip *flip = data;
|
||||
if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
|
||||
flip->chan->data);
|
||||
flip->chan->data)
|
||||
return true;
|
||||
usleep_range(1, 2);
|
||||
return false;
|
||||
|
|
|
@ -61,6 +61,10 @@ static int udl_get_modes(struct drm_connector *connector)
|
|||
int ret;
|
||||
|
||||
edid = (struct edid *)udl_get_edid(udl);
|
||||
if (!edid) {
|
||||
drm_mode_connector_update_edid_property(connector, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We only read the main block, but if the monitor reports extension
|
||||
|
|
|
@ -416,6 +416,8 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock)
|
|||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "%s: can't power on device\n", __func__);
|
||||
pm_runtime_put_noidle(dev);
|
||||
module_put(dev->driver->owner);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
config VMWARE_VMCI
|
||||
tristate "VMware VMCI Driver"
|
||||
depends on X86 && PCI
|
||||
depends on X86 && PCI && NET
|
||||
help
|
||||
This is VMware's Virtual Machine Communication Interface. It enables
|
||||
high-speed communication between host and guest in a virtual
|
||||
|
|
|
@ -4846,9 +4846,18 @@ static int __net_init bond_net_init(struct net *net)
|
|||
static void __net_exit bond_net_exit(struct net *net)
|
||||
{
|
||||
struct bond_net *bn = net_generic(net, bond_net_id);
|
||||
struct bonding *bond, *tmp_bond;
|
||||
LIST_HEAD(list);
|
||||
|
||||
bond_destroy_sysfs(bn);
|
||||
bond_destroy_proc_dir(bn);
|
||||
|
||||
/* Kill off any bonds created after unregistering bond rtnl ops */
|
||||
rtnl_lock();
|
||||
list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
|
||||
unregister_netdevice_queue(bond->dev, &list);
|
||||
unregister_netdevice_many(&list);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static struct pernet_operations bond_net_ops = {
|
||||
|
@ -4902,8 +4911,8 @@ static void __exit bonding_exit(void)
|
|||
|
||||
bond_destroy_debugfs();
|
||||
|
||||
unregister_pernet_subsys(&bond_net_ops);
|
||||
rtnl_link_unregister(&bond_link_ops);
|
||||
unregister_pernet_subsys(&bond_net_ops);
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/*
|
||||
|
|
|
@ -13437,13 +13437,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
|
|||
{
|
||||
struct bnx2x *bp = params->bp;
|
||||
u16 base_page, next_page, not_kr2_device, lane;
|
||||
int sigdet = bnx2x_warpcore_get_sigdet(phy, params);
|
||||
|
||||
if (!sigdet) {
|
||||
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
|
||||
bnx2x_kr2_recovery(params, vars, phy);
|
||||
return;
|
||||
}
|
||||
int sigdet;
|
||||
|
||||
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
|
||||
* since some switches tend to reinit the AN process and clear the
|
||||
|
@ -13454,6 +13448,16 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
|
|||
vars->check_kr2_recovery_cnt--;
|
||||
return;
|
||||
}
|
||||
|
||||
sigdet = bnx2x_warpcore_get_sigdet(phy, params);
|
||||
if (!sigdet) {
|
||||
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
|
||||
bnx2x_kr2_recovery(params, vars, phy);
|
||||
DP(NETIF_MSG_LINK, "No sigdet\n");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
lane = bnx2x_get_warpcore_lane(phy, params);
|
||||
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
|
||||
MDIO_AER_BLOCK_AER_REG, lane);
|
||||
|
|
|
@ -4947,7 +4947,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
|
|||
q);
|
||||
}
|
||||
|
||||
if (!NO_FCOE(bp)) {
|
||||
if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
|
||||
fp = &bp->fp[FCOE_IDX(bp)];
|
||||
queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
|
||||
|
||||
|
@ -13354,6 +13354,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
|
|||
RCU_INIT_POINTER(bp->cnic_ops, NULL);
|
||||
mutex_unlock(&bp->cnic_mutex);
|
||||
synchronize_rcu();
|
||||
bp->cnic_enabled = false;
|
||||
kfree(bp->cnic_kwq);
|
||||
bp->cnic_kwq = NULL;
|
||||
|
||||
|
|
|
@ -870,7 +870,7 @@ static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
|
|||
}
|
||||
|
||||
static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
|
||||
void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
|
||||
int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
|
||||
{
|
||||
struct cb *cb;
|
||||
unsigned long flags;
|
||||
|
@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
|
|||
nic->cbs_avail--;
|
||||
cb->skb = skb;
|
||||
|
||||
err = cb_prepare(nic, cb, skb);
|
||||
if (err)
|
||||
goto err_unlock;
|
||||
|
||||
if (unlikely(!nic->cbs_avail))
|
||||
err = -ENOSPC;
|
||||
|
||||
cb_prepare(nic, cb, skb);
|
||||
|
||||
/* Order is important otherwise we'll be in a race with h/w:
|
||||
* set S-bit in current first, then clear S-bit in previous. */
|
||||
|
@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic)
|
|||
nic->mii.mdio_write = mdio_write;
|
||||
}
|
||||
|
||||
static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
||||
static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
||||
{
|
||||
struct config *config = &cb->u.config;
|
||||
u8 *c = (u8 *)config;
|
||||
|
@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
|||
netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
|
||||
"[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
|
||||
c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
|
@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
|
|||
return fw;
|
||||
}
|
||||
|
||||
static void e100_setup_ucode(struct nic *nic, struct cb *cb,
|
||||
static int e100_setup_ucode(struct nic *nic, struct cb *cb,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
const struct firmware *fw = (void *)skb;
|
||||
|
@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb,
|
|||
cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
|
||||
|
||||
cb->command = cpu_to_le16(cb_ucode | cb_el);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int e100_load_ucode_wait(struct nic *nic)
|
||||
|
@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
|
||||
static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
cb->command = cpu_to_le16(cb_iaaddr);
|
||||
memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
||||
static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
||||
{
|
||||
cb->command = cpu_to_le16(cb_dump);
|
||||
cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
|
||||
offsetof(struct mem, dump_buf));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int e100_phy_check_without_mii(struct nic *nic)
|
||||
|
@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
||||
static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *netdev = nic->netdev;
|
||||
struct netdev_hw_addr *ha;
|
||||
|
@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
|||
memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
|
||||
ETH_ALEN);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void e100_set_multicast_list(struct net_device *netdev)
|
||||
|
@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data)
|
|||
round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
|
||||
}
|
||||
|
||||
static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
|
||||
static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
dma_addr_t dma_addr;
|
||||
cb->command = nic->tx_command;
|
||||
|
||||
dma_addr = pci_map_single(nic->pdev,
|
||||
skb->data, skb->len, PCI_DMA_TODEVICE);
|
||||
/* If we can't map the skb, have the upper layer try later */
|
||||
if (pci_dma_mapping_error(nic->pdev, dma_addr))
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Use the last 4 bytes of the SKB payload packet as the CRC, used for
|
||||
* testing, ie sending frames with bad CRC.
|
||||
|
@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
|
|||
cb->u.tcb.tcb_byte_count = 0;
|
||||
cb->u.tcb.threshold = nic->tx_threshold;
|
||||
cb->u.tcb.tbd_count = 1;
|
||||
cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
|
||||
skb->data, skb->len, PCI_DMA_TODEVICE));
|
||||
/* check for mapping failure? */
|
||||
cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
|
||||
cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
|
||||
skb_tx_timestamp(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
|
||||
|
|
|
@ -2771,16 +2771,17 @@ static int mvneta_probe(struct platform_device *pdev)
|
|||
|
||||
netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
|
||||
|
||||
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
|
||||
dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
|
||||
dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
|
||||
dev->priv_flags |= IFF_UNICAST_FLT;
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err < 0) {
|
||||
dev_err(&pdev->dev, "failed to register\n");
|
||||
goto err_deinit;
|
||||
}
|
||||
|
||||
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
|
||||
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
|
||||
dev->priv_flags |= IFF_UNICAST_FLT;
|
||||
|
||||
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
|
||||
|
||||
platform_set_drvdata(pdev, pp->dev);
|
||||
|
|
|
@ -470,8 +470,10 @@ static void netvsc_send_completion(struct hv_device *device,
|
|||
packet->trans_id;
|
||||
|
||||
/* Notify the layer above us */
|
||||
nvsc_packet->completion.send.send_completion(
|
||||
nvsc_packet->completion.send.send_completion_ctx);
|
||||
if (nvsc_packet)
|
||||
nvsc_packet->completion.send.send_completion(
|
||||
nvsc_packet->completion.send.
|
||||
send_completion_ctx);
|
||||
|
||||
num_outstanding_sends =
|
||||
atomic_dec_return(&net_device->num_outstanding_sends);
|
||||
|
@ -498,6 +500,7 @@ int netvsc_send(struct hv_device *device,
|
|||
int ret = 0;
|
||||
struct nvsp_message sendMessage;
|
||||
struct net_device *ndev;
|
||||
u64 req_id;
|
||||
|
||||
net_device = get_outbound_net_device(device);
|
||||
if (!net_device)
|
||||
|
@ -518,20 +521,24 @@ int netvsc_send(struct hv_device *device,
|
|||
0xFFFFFFFF;
|
||||
sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
|
||||
|
||||
if (packet->completion.send.send_completion)
|
||||
req_id = (u64)packet;
|
||||
else
|
||||
req_id = 0;
|
||||
|
||||
if (packet->page_buf_cnt) {
|
||||
ret = vmbus_sendpacket_pagebuffer(device->channel,
|
||||
packet->page_buf,
|
||||
packet->page_buf_cnt,
|
||||
&sendMessage,
|
||||
sizeof(struct nvsp_message),
|
||||
(unsigned long)packet);
|
||||
req_id);
|
||||
} else {
|
||||
ret = vmbus_sendpacket(device->channel, &sendMessage,
|
||||
sizeof(struct nvsp_message),
|
||||
(unsigned long)packet,
|
||||
req_id,
|
||||
VM_PKT_DATA_INBAND,
|
||||
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
||||
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
|
|
|
@ -241,13 +241,11 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
|
|||
|
||||
if (status == 1) {
|
||||
netif_carrier_on(net);
|
||||
netif_wake_queue(net);
|
||||
ndev_ctx = netdev_priv(net);
|
||||
schedule_delayed_work(&ndev_ctx->dwork, 0);
|
||||
schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
|
||||
} else {
|
||||
netif_carrier_off(net);
|
||||
netif_tx_disable(net);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -61,9 +61,6 @@ struct rndis_request {
|
|||
|
||||
static void rndis_filter_send_completion(void *ctx);
|
||||
|
||||
static void rndis_filter_send_request_completion(void *ctx);
|
||||
|
||||
|
||||
|
||||
static struct rndis_device *get_rndis_device(void)
|
||||
{
|
||||
|
@ -241,10 +238,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
|
|||
packet->page_buf[0].len;
|
||||
}
|
||||
|
||||
packet->completion.send.send_completion_ctx = req;/* packet; */
|
||||
packet->completion.send.send_completion =
|
||||
rndis_filter_send_request_completion;
|
||||
packet->completion.send.send_completion_tid = (unsigned long)dev;
|
||||
packet->completion.send.send_completion = NULL;
|
||||
|
||||
ret = netvsc_send(dev->net_dev->dev, packet);
|
||||
return ret;
|
||||
|
@ -999,9 +993,3 @@ static void rndis_filter_send_completion(void *ctx)
|
|||
/* Pass it back to the original handler */
|
||||
filter_pkt->completion(filter_pkt->completion_ctx);
|
||||
}
|
||||
|
||||
|
||||
static void rndis_filter_send_request_completion(void *ctx)
|
||||
{
|
||||
/* Noop */
|
||||
}
|
||||
|
|
|
@ -280,6 +280,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
|
|||
if (r) {
|
||||
ath_err(common,
|
||||
"Unable to reset channel, reset status %d\n", r);
|
||||
|
||||
ath9k_hw_enable_interrupts(ah);
|
||||
ath9k_queue_reset(sc, RESET_TYPE_BB_HANG);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -3317,15 +3317,15 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
|
|||
goto err;
|
||||
}
|
||||
|
||||
/* External image takes precedence if specified */
|
||||
if (brcmf_sdbrcm_download_code_file(bus)) {
|
||||
brcmf_err("dongle image file download failed\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* External nvram takes precedence if specified */
|
||||
if (brcmf_sdbrcm_download_nvram(bus))
|
||||
if (brcmf_sdbrcm_download_nvram(bus)) {
|
||||
brcmf_err("dongle nvram file download failed\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Take arm out of reset */
|
||||
if (brcmf_sdbrcm_download_state(bus, false)) {
|
||||
|
|
|
@ -1891,8 +1891,10 @@ static s32
|
|||
brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
|
||||
u8 key_idx, const u8 *mac_addr, struct key_params *params)
|
||||
{
|
||||
struct brcmf_if *ifp = netdev_priv(ndev);
|
||||
struct brcmf_wsec_key key;
|
||||
s32 err = 0;
|
||||
u8 keybuf[8];
|
||||
|
||||
memset(&key, 0, sizeof(key));
|
||||
key.index = (u32) key_idx;
|
||||
|
@ -1916,8 +1918,9 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
|
|||
brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
|
||||
memcpy(key.data, params->key, key.len);
|
||||
|
||||
if (params->cipher == WLAN_CIPHER_SUITE_TKIP) {
|
||||
u8 keybuf[8];
|
||||
if ((ifp->vif->mode != WL_MODE_AP) &&
|
||||
(params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
|
||||
brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
|
||||
memcpy(keybuf, &key.data[24], sizeof(keybuf));
|
||||
memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
|
||||
memcpy(&key.data[16], keybuf, sizeof(keybuf));
|
||||
|
@ -2013,7 +2016,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
|
|||
break;
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
if (ifp->vif->mode != WL_MODE_AP) {
|
||||
brcmf_dbg(CONN, "Swapping key\n");
|
||||
brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
|
||||
memcpy(keybuf, &key.data[24], sizeof(keybuf));
|
||||
memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
|
||||
memcpy(&key.data[16], keybuf, sizeof(keybuf));
|
||||
|
@ -2118,8 +2121,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
|
|||
err = -EAGAIN;
|
||||
goto done;
|
||||
}
|
||||
switch (wsec & ~SES_OW_ENABLED) {
|
||||
case WEP_ENABLED:
|
||||
if (wsec & WEP_ENABLED) {
|
||||
sec = &profile->sec;
|
||||
if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
|
||||
params.cipher = WLAN_CIPHER_SUITE_WEP40;
|
||||
|
@ -2128,16 +2130,13 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
|
|||
params.cipher = WLAN_CIPHER_SUITE_WEP104;
|
||||
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
|
||||
}
|
||||
break;
|
||||
case TKIP_ENABLED:
|
||||
} else if (wsec & TKIP_ENABLED) {
|
||||
params.cipher = WLAN_CIPHER_SUITE_TKIP;
|
||||
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
|
||||
break;
|
||||
case AES_ENABLED:
|
||||
} else if (wsec & AES_ENABLED) {
|
||||
params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
|
||||
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
|
||||
break;
|
||||
default:
|
||||
} else {
|
||||
brcmf_err("Invalid algo (0x%x)\n", wsec);
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
|
@ -3824,8 +3823,9 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
|
|||
static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
|
||||
{
|
||||
struct brcmf_if *ifp = netdev_priv(ndev);
|
||||
s32 err = -EPERM;
|
||||
s32 err;
|
||||
struct brcmf_fil_bss_enable_le bss_enable;
|
||||
struct brcmf_join_params join_params;
|
||||
|
||||
brcmf_dbg(TRACE, "Enter\n");
|
||||
|
||||
|
@ -3833,16 +3833,21 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
|
|||
/* Due to most likely deauths outstanding we sleep */
|
||||
/* first to make sure they get processed by fw. */
|
||||
msleep(400);
|
||||
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
|
||||
if (err < 0) {
|
||||
brcmf_err("setting AP mode failed %d\n", err);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
memset(&join_params, 0, sizeof(join_params));
|
||||
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
|
||||
&join_params, sizeof(join_params));
|
||||
if (err < 0)
|
||||
brcmf_err("SET SSID error (%d)\n", err);
|
||||
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
|
||||
if (err < 0) {
|
||||
if (err < 0)
|
||||
brcmf_err("BRCMF_C_UP error %d\n", err);
|
||||
goto exit;
|
||||
}
|
||||
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
|
||||
if (err < 0)
|
||||
brcmf_err("setting AP mode failed %d\n", err);
|
||||
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0);
|
||||
if (err < 0)
|
||||
brcmf_err("setting INFRA mode failed %d\n", err);
|
||||
} else {
|
||||
bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
|
||||
bss_enable.enable = cpu_to_le32(0);
|
||||
|
@ -3855,7 +3860,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
|
|||
set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
|
||||
clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -1393,8 +1393,10 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
|
|||
queue_work(adapter->workqueue, &adapter->main_work);
|
||||
|
||||
/* Perform internal scan synchronously */
|
||||
if (!priv->scan_request)
|
||||
if (!priv->scan_request) {
|
||||
dev_dbg(adapter->dev, "wait internal scan\n");
|
||||
mwifiex_wait_queue_complete(adapter, cmd_node);
|
||||
}
|
||||
} else {
|
||||
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
|
||||
flags);
|
||||
|
@ -1793,7 +1795,12 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
|
|||
/* Need to indicate IOCTL complete */
|
||||
if (adapter->curr_cmd->wait_q_enabled) {
|
||||
adapter->cmd_wait_q.status = 0;
|
||||
mwifiex_complete_cmd(adapter, adapter->curr_cmd);
|
||||
if (!priv->scan_request) {
|
||||
dev_dbg(adapter->dev,
|
||||
"complete internal scan\n");
|
||||
mwifiex_complete_cmd(adapter,
|
||||
adapter->curr_cmd);
|
||||
}
|
||||
}
|
||||
if (priv->report_scan_result)
|
||||
priv->report_scan_result = false;
|
||||
|
|
|
@ -20,6 +20,7 @@ if RT2X00
|
|||
config RT2400PCI
|
||||
tristate "Ralink rt2400 (PCI/PCMCIA) support"
|
||||
depends on PCI
|
||||
select RT2X00_LIB_MMIO
|
||||
select RT2X00_LIB_PCI
|
||||
select EEPROM_93CX6
|
||||
---help---
|
||||
|
@ -31,6 +32,7 @@ config RT2400PCI
|
|||
config RT2500PCI
|
||||
tristate "Ralink rt2500 (PCI/PCMCIA) support"
|
||||
depends on PCI
|
||||
select RT2X00_LIB_MMIO
|
||||
select RT2X00_LIB_PCI
|
||||
select EEPROM_93CX6
|
||||
---help---
|
||||
|
@ -43,6 +45,7 @@ config RT61PCI
|
|||
tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
|
||||
depends on PCI
|
||||
select RT2X00_LIB_PCI
|
||||
select RT2X00_LIB_MMIO
|
||||
select RT2X00_LIB_FIRMWARE
|
||||
select RT2X00_LIB_CRYPTO
|
||||
select CRC_ITU_T
|
||||
|
@ -57,6 +60,7 @@ config RT2800PCI
|
|||
tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
|
||||
depends on PCI || SOC_RT288X || SOC_RT305X
|
||||
select RT2800_LIB
|
||||
select RT2X00_LIB_MMIO
|
||||
select RT2X00_LIB_PCI if PCI
|
||||
select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
|
||||
select RT2X00_LIB_FIRMWARE
|
||||
|
@ -185,6 +189,9 @@ endif
|
|||
config RT2800_LIB
|
||||
tristate
|
||||
|
||||
config RT2X00_LIB_MMIO
|
||||
tristate
|
||||
|
||||
config RT2X00_LIB_PCI
|
||||
tristate
|
||||
select RT2X00_LIB
|
||||
|
|
|
@ -9,6 +9,7 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o
|
|||
rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o
|
||||
|
||||
obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o
|
||||
obj-$(CONFIG_RT2X00_LIB_MMIO) += rt2x00mmio.o
|
||||
obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
|
||||
obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o
|
||||
obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <linux/slab.h>
|
||||
|
||||
#include "rt2x00.h"
|
||||
#include "rt2x00mmio.h"
|
||||
#include "rt2x00pci.h"
|
||||
#include "rt2400pci.h"
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <linux/slab.h>
|
||||
|
||||
#include "rt2x00.h"
|
||||
#include "rt2x00mmio.h"
|
||||
#include "rt2x00pci.h"
|
||||
#include "rt2500pci.h"
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <linux/eeprom_93cx6.h>
|
||||
|
||||
#include "rt2x00.h"
|
||||
#include "rt2x00mmio.h"
|
||||
#include "rt2x00pci.h"
|
||||
#include "rt2x00soc.h"
|
||||
#include "rt2800lib.h"
|
||||
|
|
|
@ -0,0 +1,216 @@
|
|||
/*
|
||||
Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
|
||||
<http://rt2x00.serialmonkey.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the
|
||||
Free Software Foundation, Inc.,
|
||||
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*/
|
||||
|
||||
/*
|
||||
Module: rt2x00mmio
|
||||
Abstract: rt2x00 generic mmio device routines.
|
||||
*/
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "rt2x00.h"
|
||||
#include "rt2x00mmio.h"
|
||||
|
||||
/*
|
||||
* Register access.
|
||||
*/
|
||||
int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
|
||||
const unsigned int offset,
|
||||
const struct rt2x00_field32 field,
|
||||
u32 *reg)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
|
||||
rt2x00pci_register_read(rt2x00dev, offset, reg);
|
||||
if (!rt2x00_get_field32(*reg, field))
|
||||
return 1;
|
||||
udelay(REGISTER_BUSY_DELAY);
|
||||
}
|
||||
|
||||
printk_once(KERN_ERR "%s() Indirect register access failed: "
|
||||
"offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
|
||||
*reg = ~0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
|
||||
|
||||
bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
|
||||
{
|
||||
struct data_queue *queue = rt2x00dev->rx;
|
||||
struct queue_entry *entry;
|
||||
struct queue_entry_priv_pci *entry_priv;
|
||||
struct skb_frame_desc *skbdesc;
|
||||
int max_rx = 16;
|
||||
|
||||
while (--max_rx) {
|
||||
entry = rt2x00queue_get_entry(queue, Q_INDEX);
|
||||
entry_priv = entry->priv_data;
|
||||
|
||||
if (rt2x00dev->ops->lib->get_entry_state(entry))
|
||||
break;
|
||||
|
||||
/*
|
||||
* Fill in desc fields of the skb descriptor
|
||||
*/
|
||||
skbdesc = get_skb_frame_desc(entry->skb);
|
||||
skbdesc->desc = entry_priv->desc;
|
||||
skbdesc->desc_len = entry->queue->desc_size;
|
||||
|
||||
/*
|
||||
* DMA is already done, notify rt2x00lib that
|
||||
* it finished successfully.
|
||||
*/
|
||||
rt2x00lib_dmastart(entry);
|
||||
rt2x00lib_dmadone(entry);
|
||||
|
||||
/*
|
||||
* Send the frame to rt2x00lib for further processing.
|
||||
*/
|
||||
rt2x00lib_rxdone(entry, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
return !max_rx;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
|
||||
|
||||
void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
|
||||
msleep(10);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
|
||||
|
||||
/*
|
||||
* Device initialization handlers.
|
||||
*/
|
||||
static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
|
||||
struct data_queue *queue)
|
||||
{
|
||||
struct queue_entry_priv_pci *entry_priv;
|
||||
void *addr;
|
||||
dma_addr_t dma;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* Allocate DMA memory for descriptor and buffer.
|
||||
*/
|
||||
addr = dma_alloc_coherent(rt2x00dev->dev,
|
||||
queue->limit * queue->desc_size,
|
||||
&dma, GFP_KERNEL);
|
||||
if (!addr)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(addr, 0, queue->limit * queue->desc_size);
|
||||
|
||||
/*
|
||||
* Initialize all queue entries to contain valid addresses.
|
||||
*/
|
||||
for (i = 0; i < queue->limit; i++) {
|
||||
entry_priv = queue->entries[i].priv_data;
|
||||
entry_priv->desc = addr + i * queue->desc_size;
|
||||
entry_priv->desc_dma = dma + i * queue->desc_size;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
|
||||
struct data_queue *queue)
|
||||
{
|
||||
struct queue_entry_priv_pci *entry_priv =
|
||||
queue->entries[0].priv_data;
|
||||
|
||||
if (entry_priv->desc)
|
||||
dma_free_coherent(rt2x00dev->dev,
|
||||
queue->limit * queue->desc_size,
|
||||
entry_priv->desc, entry_priv->desc_dma);
|
||||
entry_priv->desc = NULL;
|
||||
}
|
||||
|
||||
int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
|
||||
{
|
||||
struct data_queue *queue;
|
||||
int status;
|
||||
|
||||
/*
|
||||
* Allocate DMA
|
||||
*/
|
||||
queue_for_each(rt2x00dev, queue) {
|
||||
status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
|
||||
if (status)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/*
|
||||
* Register interrupt handler.
|
||||
*/
|
||||
status = request_irq(rt2x00dev->irq,
|
||||
rt2x00dev->ops->lib->irq_handler,
|
||||
IRQF_SHARED, rt2x00dev->name, rt2x00dev);
|
||||
if (status) {
|
||||
ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
|
||||
rt2x00dev->irq, status);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
exit:
|
||||
queue_for_each(rt2x00dev, queue)
|
||||
rt2x00pci_free_queue_dma(rt2x00dev, queue);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
|
||||
|
||||
void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
|
||||
{
|
||||
struct data_queue *queue;
|
||||
|
||||
/*
|
||||
* Free irq line.
|
||||
*/
|
||||
free_irq(rt2x00dev->irq, rt2x00dev);
|
||||
|
||||
/*
|
||||
* Free DMA
|
||||
*/
|
||||
queue_for_each(rt2x00dev, queue)
|
||||
rt2x00pci_free_queue_dma(rt2x00dev, queue);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
|
||||
|
||||
/*
|
||||
* rt2x00mmio module information.
|
||||
*/
|
||||
MODULE_AUTHOR(DRV_PROJECT);
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
MODULE_DESCRIPTION("rt2x00 mmio library");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -0,0 +1,119 @@
|
|||
/*
|
||||
Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
|
||||
<http://rt2x00.serialmonkey.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the
|
||||
Free Software Foundation, Inc.,
|
||||
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*/
|
||||
|
||||
/*
|
||||
Module: rt2x00mmio
|
||||
Abstract: Data structures for the rt2x00mmio module.
|
||||
*/
|
||||
|
||||
#ifndef RT2X00MMIO_H
|
||||
#define RT2X00MMIO_H
|
||||
|
||||
#include <linux/io.h>
|
||||
|
||||
/*
|
||||
* Register access.
|
||||
*/
|
||||
static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
|
||||
const unsigned int offset,
|
||||
u32 *value)
|
||||
{
|
||||
*value = readl(rt2x00dev->csr.base + offset);
|
||||
}
|
||||
|
||||
static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
|
||||
const unsigned int offset,
|
||||
void *value, const u32 length)
|
||||
{
|
||||
memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
|
||||
}
|
||||
|
||||
static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
|
||||
const unsigned int offset,
|
||||
u32 value)
|
||||
{
|
||||
writel(value, rt2x00dev->csr.base + offset);
|
||||
}
|
||||
|
||||
static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
|
||||
const unsigned int offset,
|
||||
const void *value,
|
||||
const u32 length)
|
||||
{
|
||||
__iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* rt2x00pci_regbusy_read - Read from register with busy check
|
||||
* @rt2x00dev: Device pointer, see &struct rt2x00_dev.
|
||||
* @offset: Register offset
|
||||
* @field: Field to check if register is busy
|
||||
* @reg: Pointer to where register contents should be stored
|
||||
*
|
||||
* This function will read the given register, and checks if the
|
||||
* register is busy. If it is, it will sleep for a couple of
|
||||
* microseconds before reading the register again. If the register
|
||||
* is not read after a certain timeout, this function will return
|
||||
* FALSE.
|
||||
*/
|
||||
int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
|
||||
const unsigned int offset,
|
||||
const struct rt2x00_field32 field,
|
||||
u32 *reg);
|
||||
|
||||
/**
|
||||
* struct queue_entry_priv_pci: Per entry PCI specific information
|
||||
*
|
||||
* @desc: Pointer to device descriptor
|
||||
* @desc_dma: DMA pointer to &desc.
|
||||
* @data: Pointer to device's entry memory.
|
||||
* @data_dma: DMA pointer to &data.
|
||||
*/
|
||||
struct queue_entry_priv_pci {
|
||||
__le32 *desc;
|
||||
dma_addr_t desc_dma;
|
||||
};
|
||||
|
||||
/**
|
||||
* rt2x00pci_rxdone - Handle RX done events
|
||||
* @rt2x00dev: Device pointer, see &struct rt2x00_dev.
|
||||
*
|
||||
* Returns true if there are still rx frames pending and false if all
|
||||
* pending rx frames were processed.
|
||||
*/
|
||||
bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
|
||||
|
||||
/**
|
||||
* rt2x00pci_flush_queue - Flush data queue
|
||||
* @queue: Data queue to stop
|
||||
* @drop: True to drop all pending frames.
|
||||
*
|
||||
* This will wait for a maximum of 100ms, waiting for the queues
|
||||
* to become empty.
|
||||
*/
|
||||
void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
|
||||
|
||||
/*
|
||||
* Device initialization handlers.
|
||||
*/
|
||||
int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
|
||||
void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
|
||||
|
||||
#endif /* RT2X00MMIO_H */
|
|
@ -32,182 +32,6 @@
|
|||
#include "rt2x00.h"
|
||||
#include "rt2x00pci.h"
|
||||
|
||||
/*
|
||||
* Register access.
|
||||
*/
|
||||
int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
|
||||
const unsigned int offset,
|
||||
const struct rt2x00_field32 field,
|
||||
u32 *reg)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
|
||||
rt2x00pci_register_read(rt2x00dev, offset, reg);
|
||||
if (!rt2x00_get_field32(*reg, field))
|
||||
return 1;
|
||||
udelay(REGISTER_BUSY_DELAY);
|
||||
}
|
||||
|
||||
ERROR(rt2x00dev, "Indirect register access failed: "
|
||||
"offset=0x%.08x, value=0x%.08x\n", offset, *reg);
|
||||
*reg = ~0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
|
||||
|
||||
bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
|
||||
{
|
||||
struct data_queue *queue = rt2x00dev->rx;
|
||||
struct queue_entry *entry;
|
||||
struct queue_entry_priv_pci *entry_priv;
|
||||
struct skb_frame_desc *skbdesc;
|
||||
int max_rx = 16;
|
||||
|
||||
while (--max_rx) {
|
||||
entry = rt2x00queue_get_entry(queue, Q_INDEX);
|
||||
entry_priv = entry->priv_data;
|
||||
|
||||
if (rt2x00dev->ops->lib->get_entry_state(entry))
|
||||
break;
|
||||
|
||||
/*
|
||||
* Fill in desc fields of the skb descriptor
|
||||
*/
|
||||
skbdesc = get_skb_frame_desc(entry->skb);
|
||||
skbdesc->desc = entry_priv->desc;
|
||||
skbdesc->desc_len = entry->queue->desc_size;
|
||||
|
||||
/*
|
||||
* DMA is already done, notify rt2x00lib that
|
||||
* it finished successfully.
|
||||
*/
|
||||
rt2x00lib_dmastart(entry);
|
||||
rt2x00lib_dmadone(entry);
|
||||
|
||||
/*
|
||||
* Send the frame to rt2x00lib for further processing.
|
||||
*/
|
||||
rt2x00lib_rxdone(entry, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
return !max_rx;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
|
||||
|
||||
void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
|
||||
msleep(10);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
|
||||
|
||||
/*
|
||||
* Device initialization handlers.
|
||||
*/
|
||||
static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
|
||||
struct data_queue *queue)
|
||||
{
|
||||
struct queue_entry_priv_pci *entry_priv;
|
||||
void *addr;
|
||||
dma_addr_t dma;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* Allocate DMA memory for descriptor and buffer.
|
||||
*/
|
||||
addr = dma_alloc_coherent(rt2x00dev->dev,
|
||||
queue->limit * queue->desc_size,
|
||||
&dma, GFP_KERNEL);
|
||||
if (!addr)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(addr, 0, queue->limit * queue->desc_size);
|
||||
|
||||
/*
|
||||
* Initialize all queue entries to contain valid addresses.
|
||||
*/
|
||||
for (i = 0; i < queue->limit; i++) {
|
||||
entry_priv = queue->entries[i].priv_data;
|
||||
entry_priv->desc = addr + i * queue->desc_size;
|
||||
entry_priv->desc_dma = dma + i * queue->desc_size;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
|
||||
struct data_queue *queue)
|
||||
{
|
||||
struct queue_entry_priv_pci *entry_priv =
|
||||
queue->entries[0].priv_data;
|
||||
|
||||
if (entry_priv->desc)
|
||||
dma_free_coherent(rt2x00dev->dev,
|
||||
queue->limit * queue->desc_size,
|
||||
entry_priv->desc, entry_priv->desc_dma);
|
||||
entry_priv->desc = NULL;
|
||||
}
|
||||
|
||||
int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
|
||||
{
|
||||
struct data_queue *queue;
|
||||
int status;
|
||||
|
||||
/*
|
||||
* Allocate DMA
|
||||
*/
|
||||
queue_for_each(rt2x00dev, queue) {
|
||||
status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
|
||||
if (status)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/*
|
||||
* Register interrupt handler.
|
||||
*/
|
||||
status = request_irq(rt2x00dev->irq,
|
||||
rt2x00dev->ops->lib->irq_handler,
|
||||
IRQF_SHARED, rt2x00dev->name, rt2x00dev);
|
||||
if (status) {
|
||||
ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
|
||||
rt2x00dev->irq, status);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
exit:
|
||||
queue_for_each(rt2x00dev, queue)
|
||||
rt2x00pci_free_queue_dma(rt2x00dev, queue);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
|
||||
|
||||
void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
|
||||
{
|
||||
struct data_queue *queue;
|
||||
|
||||
/*
|
||||
* Free irq line.
|
||||
*/
|
||||
free_irq(rt2x00dev->irq, rt2x00dev);
|
||||
|
||||
/*
|
||||
* Free DMA
|
||||
*/
|
||||
queue_for_each(rt2x00dev, queue)
|
||||
rt2x00pci_free_queue_dma(rt2x00dev, queue);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
|
||||
|
||||
/*
|
||||
* PCI driver handlers.
|
||||
*/
|
||||
|
|
|
@ -35,94 +35,6 @@
|
|||
*/
|
||||
#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops)
|
||||
|
||||
/*
|
||||
* Register access.
|
||||
*/
|
||||
static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
|
||||
const unsigned int offset,
|
||||
u32 *value)
|
||||
{
|
||||
*value = readl(rt2x00dev->csr.base + offset);
|
||||
}
|
||||
|
||||
static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
|
||||
const unsigned int offset,
|
||||
void *value, const u32 length)
|
||||
{
|
||||
memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
|
||||
}
|
||||
|
||||
static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
|
||||
const unsigned int offset,
|
||||
u32 value)
|
||||
{
|
||||
writel(value, rt2x00dev->csr.base + offset);
|
||||
}
|
||||
|
||||
static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
|
||||
const unsigned int offset,
|
||||
const void *value,
|
||||
const u32 length)
|
||||
{
|
||||
__iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* rt2x00pci_regbusy_read - Read from register with busy check
|
||||
* @rt2x00dev: Device pointer, see &struct rt2x00_dev.
|
||||
* @offset: Register offset
|
||||
* @field: Field to check if register is busy
|
||||
* @reg: Pointer to where register contents should be stored
|
||||
*
|
||||
* This function will read the given register, and checks if the
|
||||
* register is busy. If it is, it will sleep for a couple of
|
||||
* microseconds before reading the register again. If the register
|
||||
* is not read after a certain timeout, this function will return
|
||||
* FALSE.
|
||||
*/
|
||||
int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
|
||||
const unsigned int offset,
|
||||
const struct rt2x00_field32 field,
|
||||
u32 *reg);
|
||||
|
||||
/**
|
||||
* struct queue_entry_priv_pci: Per entry PCI specific information
|
||||
*
|
||||
* @desc: Pointer to device descriptor
|
||||
* @desc_dma: DMA pointer to &desc.
|
||||
* @data: Pointer to device's entry memory.
|
||||
* @data_dma: DMA pointer to &data.
|
||||
*/
|
||||
struct queue_entry_priv_pci {
|
||||
__le32 *desc;
|
||||
dma_addr_t desc_dma;
|
||||
};
|
||||
|
||||
/**
|
||||
* rt2x00pci_rxdone - Handle RX done events
|
||||
* @rt2x00dev: Device pointer, see &struct rt2x00_dev.
|
||||
*
|
||||
* Returns true if there are still rx frames pending and false if all
|
||||
* pending rx frames were processed.
|
||||
*/
|
||||
bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
|
||||
|
||||
/**
|
||||
* rt2x00pci_flush_queue - Flush data queue
|
||||
* @queue: Data queue to stop
|
||||
* @drop: True to drop all pending frames.
|
||||
*
|
||||
* This will wait for a maximum of 100ms, waiting for the queues
|
||||
* to become empty.
|
||||
*/
|
||||
void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
|
||||
|
||||
/*
|
||||
* Device initialization handlers.
|
||||
*/
|
||||
int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
|
||||
void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
|
||||
|
||||
/*
|
||||
* PCI driver handlers.
|
||||
*/
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/eeprom_93cx6.h>
|
||||
|
||||
#include "rt2x00.h"
|
||||
#include "rt2x00mmio.h"
|
||||
#include "rt2x00pci.h"
|
||||
#include "rt61pci.h"
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ menu "Remoteproc drivers"
|
|||
config REMOTEPROC
|
||||
tristate
|
||||
depends on HAS_DMA
|
||||
select FW_CONFIG
|
||||
select FW_LOADER
|
||||
select VIRTIO
|
||||
|
||||
config OMAP_REMOTEPROC
|
||||
|
|
|
@ -217,7 +217,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
|
|||
* TODO: support predefined notifyids (via resource table)
|
||||
*/
|
||||
ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
|
||||
if (ret) {
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "idr_alloc failed: %d\n", ret);
|
||||
dma_free_coherent(dev->parent, size, va, dma);
|
||||
return ret;
|
||||
|
@ -366,10 +366,12 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
|
|||
/* it is now safe to add the virtio device */
|
||||
ret = rproc_add_virtio_dev(rvdev, rsc->id);
|
||||
if (ret)
|
||||
goto free_rvdev;
|
||||
goto remove_rvdev;
|
||||
|
||||
return 0;
|
||||
|
||||
remove_rvdev:
|
||||
list_del(&rvdev->node);
|
||||
free_rvdev:
|
||||
kfree(rvdev);
|
||||
return ret;
|
||||
|
|
|
@ -240,6 +240,8 @@ static int sproc_drv_remove(struct platform_device *pdev)
|
|||
|
||||
/* Unregister as remoteproc device */
|
||||
rproc_del(sproc->rproc);
|
||||
dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE,
|
||||
sproc->fw_addr, sproc->fw_dma_addr);
|
||||
rproc_put(sproc->rproc);
|
||||
|
||||
mdev->drv_data = NULL;
|
||||
|
@ -297,10 +299,13 @@ static int sproc_probe(struct platform_device *pdev)
|
|||
/* Register as a remoteproc device */
|
||||
err = rproc_add(rproc);
|
||||
if (err)
|
||||
goto free_rproc;
|
||||
goto free_mem;
|
||||
|
||||
return 0;
|
||||
|
||||
free_mem:
|
||||
dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE,
|
||||
sproc->fw_addr, sproc->fw_dma_addr);
|
||||
free_rproc:
|
||||
/* Reset device data upon error */
|
||||
mdev->drv_data = NULL;
|
||||
|
|
|
@ -769,6 +769,7 @@ struct qeth_card {
|
|||
unsigned long thread_start_mask;
|
||||
unsigned long thread_allowed_mask;
|
||||
unsigned long thread_running_mask;
|
||||
struct task_struct *recovery_task;
|
||||
spinlock_t ip_lock;
|
||||
struct list_head ip_list;
|
||||
struct list_head *ip_tbd_list;
|
||||
|
@ -862,6 +863,8 @@ extern struct qeth_card_list_struct qeth_core_card_list;
|
|||
extern struct kmem_cache *qeth_core_header_cache;
|
||||
extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
|
||||
|
||||
void qeth_set_recovery_task(struct qeth_card *);
|
||||
void qeth_clear_recovery_task(struct qeth_card *);
|
||||
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
|
||||
int qeth_threads_running(struct qeth_card *, unsigned long);
|
||||
int qeth_wait_for_threads(struct qeth_card *, unsigned long);
|
||||
|
|
|
@ -177,6 +177,23 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
|
|||
return "n/a";
|
||||
}
|
||||
|
||||
void qeth_set_recovery_task(struct qeth_card *card)
|
||||
{
|
||||
card->recovery_task = current;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
|
||||
|
||||
void qeth_clear_recovery_task(struct qeth_card *card)
|
||||
{
|
||||
card->recovery_task = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
|
||||
|
||||
static bool qeth_is_recovery_task(const struct qeth_card *card)
|
||||
{
|
||||
return card->recovery_task == current;
|
||||
}
|
||||
|
||||
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
|
||||
int clear_start_mask)
|
||||
{
|
||||
|
@ -205,6 +222,8 @@ EXPORT_SYMBOL_GPL(qeth_threads_running);
|
|||
|
||||
int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
|
||||
{
|
||||
if (qeth_is_recovery_task(card))
|
||||
return 0;
|
||||
return wait_event_interruptible(card->wait_q,
|
||||
qeth_threads_running(card, threads) == 0);
|
||||
}
|
||||
|
|
|
@ -1143,6 +1143,7 @@ static int qeth_l2_recover(void *ptr)
|
|||
QETH_CARD_TEXT(card, 2, "recover2");
|
||||
dev_warn(&card->gdev->dev,
|
||||
"A recovery process has been started for the device\n");
|
||||
qeth_set_recovery_task(card);
|
||||
__qeth_l2_set_offline(card->gdev, 1);
|
||||
rc = __qeth_l2_set_online(card->gdev, 1);
|
||||
if (!rc)
|
||||
|
@ -1153,6 +1154,7 @@ static int qeth_l2_recover(void *ptr)
|
|||
dev_warn(&card->gdev->dev, "The qeth device driver "
|
||||
"failed to recover an error on the device\n");
|
||||
}
|
||||
qeth_clear_recovery_task(card);
|
||||
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
|
||||
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
|
||||
return 0;
|
||||
|
|
|
@ -3515,6 +3515,7 @@ static int qeth_l3_recover(void *ptr)
|
|||
QETH_CARD_TEXT(card, 2, "recover2");
|
||||
dev_warn(&card->gdev->dev,
|
||||
"A recovery process has been started for the device\n");
|
||||
qeth_set_recovery_task(card);
|
||||
__qeth_l3_set_offline(card->gdev, 1);
|
||||
rc = __qeth_l3_set_online(card->gdev, 1);
|
||||
if (!rc)
|
||||
|
@ -3525,6 +3526,7 @@ static int qeth_l3_recover(void *ptr)
|
|||
dev_warn(&card->gdev->dev, "The qeth device driver "
|
||||
"failed to recover an error on the device\n");
|
||||
}
|
||||
qeth_clear_recovery_task(card);
|
||||
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
|
||||
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
|
||||
return 0;
|
||||
|
|
|
@ -1899,8 +1899,8 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
|
|||
sdev->allow_restart = 1;
|
||||
blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
|
||||
}
|
||||
scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
|
||||
spin_unlock_irqrestore(shost->host_lock, lock_flags);
|
||||
scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -5148,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
|
|||
ipr_trace;
|
||||
}
|
||||
|
||||
list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
|
||||
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
|
||||
if (!ipr_is_naca_model(res))
|
||||
res->needs_sync_complete = 1;
|
||||
|
||||
|
@ -9349,7 +9349,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
|
|||
int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
|
||||
rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
|
||||
if (ioa_cfg->intr_flag == IPR_USE_MSIX)
|
||||
rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
|
||||
else
|
||||
rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
|
||||
return rc;
|
||||
|
@ -9371,7 +9374,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
|
|||
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
|
||||
free_irq(pdev->irq, ioa_cfg);
|
||||
if (ioa_cfg->intr_flag == IPR_USE_MSIX)
|
||||
free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
|
||||
else
|
||||
free_irq(pdev->irq, ioa_cfg);
|
||||
|
||||
LEAVE;
|
||||
|
||||
|
@ -9722,6 +9728,7 @@ static void __ipr_remove(struct pci_dev *pdev)
|
|||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
|
||||
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
|
||||
flush_work(&ioa_cfg->work_q);
|
||||
INIT_LIST_HEAD(&ioa_cfg->used_res_q);
|
||||
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
|
||||
|
||||
spin_lock(&ipr_driver_lock);
|
||||
|
|
|
@ -235,6 +235,17 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
|
|||
linkrate = phy->linkrate;
|
||||
memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
|
||||
|
||||
/* Handle vacant phy - rest of dr data is not valid so skip it */
|
||||
if (phy->phy_state == PHY_VACANT) {
|
||||
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
|
||||
phy->attached_dev_type = NO_DEVICE;
|
||||
if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
|
||||
phy->phy_id = phy_id;
|
||||
goto skip;
|
||||
} else
|
||||
goto out;
|
||||
}
|
||||
|
||||
phy->attached_dev_type = to_dev_type(dr);
|
||||
if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
|
||||
goto out;
|
||||
|
@ -272,6 +283,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
|
|||
phy->phy->maximum_linkrate = dr->pmax_linkrate;
|
||||
phy->phy->negotiated_linkrate = phy->linkrate;
|
||||
|
||||
skip:
|
||||
if (new_phy)
|
||||
if (sas_phy_add(phy->phy)) {
|
||||
sas_phy_free(phy->phy);
|
||||
|
@ -388,7 +400,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single)
|
|||
if (!disc_req)
|
||||
return -ENOMEM;
|
||||
|
||||
disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE);
|
||||
disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
|
||||
if (!disc_resp) {
|
||||
kfree(disc_req);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -438,11 +438,12 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
|
|||
struct lpfc_rqe *temp_hrqe;
|
||||
struct lpfc_rqe *temp_drqe;
|
||||
struct lpfc_register doorbell;
|
||||
int put_index = hq->host_index;
|
||||
int put_index;
|
||||
|
||||
/* sanity check on queue memory */
|
||||
if (unlikely(!hq) || unlikely(!dq))
|
||||
return -ENOMEM;
|
||||
put_index = hq->host_index;
|
||||
temp_hrqe = hq->qe[hq->host_index].rqe;
|
||||
temp_drqe = dq->qe[dq->host_index].rqe;
|
||||
|
||||
|
|
|
@ -1938,11 +1938,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
|
|||
"Timer for the VP[%d] has stopped\n", vha->vp_idx);
|
||||
}
|
||||
|
||||
/* No pending activities shall be there on the vha now */
|
||||
if (ql2xextended_error_logging & ql_dbg_user)
|
||||
msleep(random32()%10); /* Just to see if something falls on
|
||||
* the net we have placed below */
|
||||
|
||||
BUG_ON(atomic_read(&vha->vref_count));
|
||||
|
||||
qla2x00_free_fcports(vha);
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
* | Mailbox commands | 0x115b | 0x111a-0x111b |
|
||||
* | | | 0x112c-0x112e |
|
||||
* | | | 0x113a |
|
||||
* | | | 0x1155-0x1158 |
|
||||
* | Device Discovery | 0x2087 | 0x2020-0x2022, |
|
||||
* | | | 0x2016 |
|
||||
* | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b |
|
||||
|
@ -401,7 +402,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
|
|||
void *ring;
|
||||
} aq, *aqp;
|
||||
|
||||
if (!ha->tgt.atio_q_length)
|
||||
if (!ha->tgt.atio_ring)
|
||||
return ptr;
|
||||
|
||||
num_queues = 1;
|
||||
|
|
|
@ -863,7 +863,6 @@ typedef struct {
|
|||
#define MBX_1 BIT_1
|
||||
#define MBX_0 BIT_0
|
||||
|
||||
#define RNID_TYPE_SET_VERSION 0x9
|
||||
#define RNID_TYPE_ASIC_TEMP 0xC
|
||||
|
||||
/*
|
||||
|
|
|
@ -357,9 +357,6 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *, dma_addr_t, uint16_t , uint16_t *,
|
|||
extern int
|
||||
qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
|
||||
|
||||
extern int
|
||||
qla2x00_set_driver_version(scsi_qla_host_t *, char *);
|
||||
|
||||
extern int
|
||||
qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
|
||||
uint16_t, uint16_t, uint16_t, uint16_t);
|
||||
|
|
|
@ -619,8 +619,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
|
|||
if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
|
||||
qla24xx_read_fcp_prio_cfg(vha);
|
||||
|
||||
qla2x00_set_driver_version(vha, QLA2XXX_VERSION);
|
||||
|
||||
return (rval);
|
||||
}
|
||||
|
||||
|
@ -1399,7 +1397,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
|
|||
mq_size += ha->max_rsp_queues *
|
||||
(rsp->length * sizeof(response_t));
|
||||
}
|
||||
if (ha->tgt.atio_q_length)
|
||||
if (ha->tgt.atio_ring)
|
||||
mq_size += ha->tgt.atio_q_length * sizeof(request_t);
|
||||
/* Allocate memory for Fibre Channel Event Buffer. */
|
||||
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
|
||||
|
|
|
@ -3866,64 +3866,6 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
|
|||
return rval;
|
||||
}
|
||||
|
||||
int
|
||||
qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version)
|
||||
{
|
||||
int rval;
|
||||
mbx_cmd_t mc;
|
||||
mbx_cmd_t *mcp = &mc;
|
||||
int len;
|
||||
uint16_t dwlen;
|
||||
uint8_t *str;
|
||||
dma_addr_t str_dma;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha))
|
||||
return QLA_FUNCTION_FAILED;
|
||||
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155,
|
||||
"Entered %s.\n", __func__);
|
||||
|
||||
str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
|
||||
if (!str) {
|
||||
ql_log(ql_log_warn, vha, 0x1156,
|
||||
"Failed to allocate driver version param.\n");
|
||||
return QLA_MEMORY_ALLOC_FAILED;
|
||||
}
|
||||
|
||||
memcpy(str, "\x7\x3\x11\x0", 4);
|
||||
dwlen = str[0];
|
||||
len = dwlen * sizeof(uint32_t) - 4;
|
||||
memset(str + 4, 0, len);
|
||||
if (len > strlen(version))
|
||||
len = strlen(version);
|
||||
memcpy(str + 4, version, len);
|
||||
|
||||
mcp->mb[0] = MBC_SET_RNID_PARAMS;
|
||||
mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
|
||||
mcp->mb[2] = MSW(LSD(str_dma));
|
||||
mcp->mb[3] = LSW(LSD(str_dma));
|
||||
mcp->mb[6] = MSW(MSD(str_dma));
|
||||
mcp->mb[7] = LSW(MSD(str_dma));
|
||||
mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
|
||||
mcp->in_mb = MBX_0;
|
||||
mcp->tov = MBX_TOV_SECONDS;
|
||||
mcp->flags = 0;
|
||||
rval = qla2x00_mailbox_command(vha, mcp);
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x1157,
|
||||
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
|
||||
} else {
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158,
|
||||
"Done %s.\n", __func__);
|
||||
}
|
||||
|
||||
dma_pool_free(ha->s_dma_pool, str, str_dma);
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
static int
|
||||
qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
|
||||
{
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
/*
|
||||
* Driver version
|
||||
*/
|
||||
#define QLA2XXX_VERSION "8.04.00.08-k"
|
||||
#define QLA2XXX_VERSION "8.04.00.13-k"
|
||||
|
||||
#define QLA_DRIVER_MAJOR_VER 8
|
||||
#define QLA_DRIVER_MINOR_VER 4
|
||||
|
|
|
@ -4112,6 +4112,10 @@ static int st_probe(struct device *dev)
|
|||
tpnt->disk = disk;
|
||||
disk->private_data = &tpnt->driver;
|
||||
disk->queue = SDp->request_queue;
|
||||
/* SCSI tape doesn't register this gendisk via add_disk(). Manually
|
||||
* take queue reference that release_disk() expects. */
|
||||
if (!blk_get_queue(disk->queue))
|
||||
goto out_put_disk;
|
||||
tpnt->driver = &st_template;
|
||||
|
||||
tpnt->device = SDp;
|
||||
|
@ -4185,7 +4189,7 @@ static int st_probe(struct device *dev)
|
|||
idr_preload_end();
|
||||
if (error < 0) {
|
||||
pr_warn("st: idr allocation failed: %d\n", error);
|
||||
goto out_put_disk;
|
||||
goto out_put_queue;
|
||||
}
|
||||
tpnt->index = error;
|
||||
sprintf(disk->disk_name, "st%d", tpnt->index);
|
||||
|
@ -4211,6 +4215,8 @@ static int st_probe(struct device *dev)
|
|||
spin_lock(&st_index_lock);
|
||||
idr_remove(&st_index_idr, tpnt->index);
|
||||
spin_unlock(&st_index_lock);
|
||||
out_put_queue:
|
||||
blk_put_queue(disk->queue);
|
||||
out_put_disk:
|
||||
put_disk(disk);
|
||||
kfree(tpnt);
|
||||
|
|
|
@ -409,6 +409,7 @@ static inline int core_alua_state_standby(
|
|||
case REPORT_LUNS:
|
||||
case RECEIVE_DIAGNOSTIC:
|
||||
case SEND_DIAGNOSTIC:
|
||||
return 0;
|
||||
case MAINTENANCE_IN:
|
||||
switch (cdb[1] & 0x1f) {
|
||||
case MI_REPORT_TARGET_PGS:
|
||||
|
@ -451,6 +452,7 @@ static inline int core_alua_state_unavailable(
|
|||
switch (cdb[0]) {
|
||||
case INQUIRY:
|
||||
case REPORT_LUNS:
|
||||
return 0;
|
||||
case MAINTENANCE_IN:
|
||||
switch (cdb[1] & 0x1f) {
|
||||
case MI_REPORT_TARGET_PGS:
|
||||
|
@ -491,6 +493,7 @@ static inline int core_alua_state_transition(
|
|||
switch (cdb[0]) {
|
||||
case INQUIRY:
|
||||
case REPORT_LUNS:
|
||||
return 0;
|
||||
case MAINTENANCE_IN:
|
||||
switch (cdb[1] & 0x1f) {
|
||||
case MI_REPORT_TARGET_PGS:
|
||||
|
|
|
@ -2643,9 +2643,9 @@ static int mxser_probe(struct pci_dev *pdev,
|
|||
mxvar_sdriver, brd->idx + i, &pdev->dev);
|
||||
if (IS_ERR(tty_dev)) {
|
||||
retval = PTR_ERR(tty_dev);
|
||||
for (i--; i >= 0; i--)
|
||||
for (; i > 0; i--)
|
||||
tty_unregister_device(mxvar_sdriver,
|
||||
brd->idx + i);
|
||||
brd->idx + i - 1);
|
||||
goto err_relbrd;
|
||||
}
|
||||
}
|
||||
|
@ -2751,9 +2751,9 @@ static int __init mxser_module_init(void)
|
|||
tty_dev = tty_port_register_device(&brd->ports[i].port,
|
||||
mxvar_sdriver, brd->idx + i, NULL);
|
||||
if (IS_ERR(tty_dev)) {
|
||||
for (i--; i >= 0; i--)
|
||||
for (; i > 0; i--)
|
||||
tty_unregister_device(mxvar_sdriver,
|
||||
brd->idx + i);
|
||||
brd->idx + i - 1);
|
||||
for (i = 0; i < brd->info->nports; i++)
|
||||
tty_port_destroy(&brd->ports[i].port);
|
||||
free_irq(brd->irq, brd);
|
||||
|
|
|
@ -429,7 +429,6 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
|
|||
{
|
||||
struct uart_8250_port uart;
|
||||
int ret, line, flags = dev_id->driver_data;
|
||||
struct resource *res = NULL;
|
||||
|
||||
if (flags & UNKNOWN_DEV) {
|
||||
ret = serial_pnp_guess_board(dev);
|
||||
|
@ -440,12 +439,11 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
|
|||
memset(&uart, 0, sizeof(uart));
|
||||
if (pnp_irq_valid(dev, 0))
|
||||
uart.port.irq = pnp_irq(dev, 0);
|
||||
if ((flags & CIR_PORT) && pnp_port_valid(dev, 2))
|
||||
res = pnp_get_resource(dev, IORESOURCE_IO, 2);
|
||||
else if (pnp_port_valid(dev, 0))
|
||||
res = pnp_get_resource(dev, IORESOURCE_IO, 0);
|
||||
if (pnp_resource_enabled(res)) {
|
||||
uart.port.iobase = res->start;
|
||||
if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) {
|
||||
uart.port.iobase = pnp_port_start(dev, 2);
|
||||
uart.port.iotype = UPIO_PORT;
|
||||
} else if (pnp_port_valid(dev, 0)) {
|
||||
uart.port.iobase = pnp_port_start(dev, 0);
|
||||
uart.port.iotype = UPIO_PORT;
|
||||
} else if (pnp_mem_valid(dev, 0)) {
|
||||
uart.port.mapbase = pnp_mem_start(dev, 0);
|
||||
|
|
|
@ -886,6 +886,17 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
|
|||
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
|
||||
/* FIFO ENABLE, DMA MODE */
|
||||
|
||||
up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK;
|
||||
/*
|
||||
* NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK
|
||||
* sets Enables the granularity of 1 for TRIGGER RX
|
||||
* level. Along with setting RX FIFO trigger level
|
||||
* to 1 (as noted below, 16 characters) and TLR[3:0]
|
||||
* to zero this will result RX FIFO threshold level
|
||||
* to 1 character, instead of 16 as noted in comment
|
||||
* below.
|
||||
*/
|
||||
|
||||
/* Set receive FIFO threshold to 16 characters and
|
||||
* transmit FIFO threshold to 16 spaces
|
||||
*/
|
||||
|
|
|
@ -346,6 +346,7 @@ static long vfio_pci_ioctl(void *device_data,
|
|||
|
||||
if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
|
||||
size_t size;
|
||||
int max = vfio_pci_get_irq_count(vdev, hdr.index);
|
||||
|
||||
if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
|
||||
size = sizeof(uint8_t);
|
||||
|
@ -355,7 +356,7 @@ static long vfio_pci_ioctl(void *device_data,
|
|||
return -EINVAL;
|
||||
|
||||
if (hdr.argsz - minsz < hdr.count * size ||
|
||||
hdr.count > vfio_pci_get_irq_count(vdev, hdr.index))
|
||||
hdr.start >= max || hdr.start + hdr.count > max)
|
||||
return -EINVAL;
|
||||
|
||||
data = memdup_user((void __user *)(arg + minsz),
|
||||
|
|
|
@ -74,9 +74,8 @@ enum {
|
|||
|
||||
struct vhost_scsi {
|
||||
/* Protected by vhost_scsi->dev.mutex */
|
||||
struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET];
|
||||
struct tcm_vhost_tpg **vs_tpg;
|
||||
char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
|
||||
bool vs_endpoint;
|
||||
|
||||
struct vhost_dev dev;
|
||||
struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
|
||||
|
@ -579,9 +578,27 @@ static void tcm_vhost_submission_work(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
|
||||
static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
|
||||
struct vhost_virtqueue *vq, int head, unsigned out)
|
||||
{
|
||||
struct virtio_scsi_cmd_resp __user *resp;
|
||||
struct virtio_scsi_cmd_resp rsp;
|
||||
int ret;
|
||||
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
|
||||
resp = vq->iov[out].iov_base;
|
||||
ret = __copy_to_user(resp, &rsp, sizeof(rsp));
|
||||
if (!ret)
|
||||
vhost_add_used_and_signal(&vs->dev, vq, head, 0);
|
||||
else
|
||||
pr_err("Faulted on virtio_scsi_cmd_resp\n");
|
||||
}
|
||||
|
||||
static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
|
||||
struct vhost_virtqueue *vq)
|
||||
{
|
||||
struct tcm_vhost_tpg **vs_tpg;
|
||||
struct virtio_scsi_cmd_req v_req;
|
||||
struct tcm_vhost_tpg *tv_tpg;
|
||||
struct tcm_vhost_cmd *tv_cmd;
|
||||
|
@ -590,8 +607,16 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
|
|||
int head, ret;
|
||||
u8 target;
|
||||
|
||||
/* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
|
||||
if (unlikely(!vs->vs_endpoint))
|
||||
/*
|
||||
* We can handle the vq only after the endpoint is setup by calling the
|
||||
* VHOST_SCSI_SET_ENDPOINT ioctl.
|
||||
*
|
||||
* TODO: Check that we are running from vhost_worker which acts
|
||||
* as read-side critical section for vhost kind of RCU.
|
||||
* See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
|
||||
*/
|
||||
vs_tpg = rcu_dereference_check(vq->private_data, 1);
|
||||
if (!vs_tpg)
|
||||
return;
|
||||
|
||||
mutex_lock(&vq->mutex);
|
||||
|
@ -661,23 +686,11 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
|
|||
|
||||
/* Extract the tpgt */
|
||||
target = v_req.lun[1];
|
||||
tv_tpg = vs->vs_tpg[target];
|
||||
tv_tpg = ACCESS_ONCE(vs_tpg[target]);
|
||||
|
||||
/* Target does not exist, fail the request */
|
||||
if (unlikely(!tv_tpg)) {
|
||||
struct virtio_scsi_cmd_resp __user *resp;
|
||||
struct virtio_scsi_cmd_resp rsp;
|
||||
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
|
||||
resp = vq->iov[out].iov_base;
|
||||
ret = __copy_to_user(resp, &rsp, sizeof(rsp));
|
||||
if (!ret)
|
||||
vhost_add_used_and_signal(&vs->dev,
|
||||
vq, head, 0);
|
||||
else
|
||||
pr_err("Faulted on virtio_scsi_cmd_resp\n");
|
||||
|
||||
vhost_scsi_send_bad_target(vs, vq, head, out);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -690,22 +703,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
|
|||
if (IS_ERR(tv_cmd)) {
|
||||
vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
|
||||
PTR_ERR(tv_cmd));
|
||||
break;
|
||||
goto err_cmd;
|
||||
}
|
||||
pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
|
||||
": %d\n", tv_cmd, exp_data_len, data_direction);
|
||||
|
||||
tv_cmd->tvc_vhost = vs;
|
||||
tv_cmd->tvc_vq = vq;
|
||||
|
||||
if (unlikely(vq->iov[out].iov_len !=
|
||||
sizeof(struct virtio_scsi_cmd_resp))) {
|
||||
vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
|
||||
" bytes, out: %d, in: %d\n",
|
||||
vq->iov[out].iov_len, out, in);
|
||||
break;
|
||||
}
|
||||
|
||||
tv_cmd->tvc_resp = vq->iov[out].iov_base;
|
||||
|
||||
/*
|
||||
|
@ -725,7 +729,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
|
|||
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
|
||||
scsi_command_size(tv_cmd->tvc_cdb),
|
||||
TCM_VHOST_MAX_CDB_SIZE);
|
||||
break; /* TODO */
|
||||
goto err_free;
|
||||
}
|
||||
tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
|
||||
|
||||
|
@ -738,7 +742,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
|
|||
data_direction == DMA_TO_DEVICE);
|
||||
if (unlikely(ret)) {
|
||||
vq_err(vq, "Failed to map iov to sgl\n");
|
||||
break; /* TODO */
|
||||
goto err_free;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -759,6 +763,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
|
|||
}
|
||||
|
||||
mutex_unlock(&vq->mutex);
|
||||
return;
|
||||
|
||||
err_free:
|
||||
vhost_scsi_free_cmd(tv_cmd);
|
||||
err_cmd:
|
||||
vhost_scsi_send_bad_target(vs, vq, head, out);
|
||||
mutex_unlock(&vq->mutex);
|
||||
}
|
||||
|
||||
static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
|
||||
|
@ -780,6 +791,20 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
|
|||
vhost_scsi_handle_vq(vs, vq);
|
||||
}
|
||||
|
||||
static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
|
||||
{
|
||||
vhost_poll_flush(&vs->dev.vqs[index].poll);
|
||||
}
|
||||
|
||||
static void vhost_scsi_flush(struct vhost_scsi *vs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
|
||||
vhost_scsi_flush_vq(vs, i);
|
||||
vhost_work_flush(&vs->dev, &vs->vs_completion_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from vhost_scsi_ioctl() context to walk the list of available
|
||||
* tcm_vhost_tpg with an active struct tcm_vhost_nexus
|
||||
|
@ -790,8 +815,10 @@ static int vhost_scsi_set_endpoint(
|
|||
{
|
||||
struct tcm_vhost_tport *tv_tport;
|
||||
struct tcm_vhost_tpg *tv_tpg;
|
||||
struct tcm_vhost_tpg **vs_tpg;
|
||||
struct vhost_virtqueue *vq;
|
||||
int index, ret, i, len;
|
||||
bool match = false;
|
||||
int index, ret;
|
||||
|
||||
mutex_lock(&vs->dev.mutex);
|
||||
/* Verify that ring has been setup correctly. */
|
||||
|
@ -803,6 +830,15 @@ static int vhost_scsi_set_endpoint(
|
|||
}
|
||||
}
|
||||
|
||||
len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
|
||||
vs_tpg = kzalloc(len, GFP_KERNEL);
|
||||
if (!vs_tpg) {
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (vs->vs_tpg)
|
||||
memcpy(vs_tpg, vs->vs_tpg, len);
|
||||
|
||||
mutex_lock(&tcm_vhost_mutex);
|
||||
list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
|
||||
mutex_lock(&tv_tpg->tv_tpg_mutex);
|
||||
|
@ -817,14 +853,15 @@ static int vhost_scsi_set_endpoint(
|
|||
tv_tport = tv_tpg->tport;
|
||||
|
||||
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
|
||||
if (vs->vs_tpg[tv_tpg->tport_tpgt]) {
|
||||
if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
|
||||
mutex_unlock(&tv_tpg->tv_tpg_mutex);
|
||||
mutex_unlock(&tcm_vhost_mutex);
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
kfree(vs_tpg);
|
||||
return -EEXIST;
|
||||
}
|
||||
tv_tpg->tv_tpg_vhost_count++;
|
||||
vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
|
||||
vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
|
||||
smp_mb__after_atomic_inc();
|
||||
match = true;
|
||||
}
|
||||
|
@ -835,12 +872,27 @@ static int vhost_scsi_set_endpoint(
|
|||
if (match) {
|
||||
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
|
||||
sizeof(vs->vs_vhost_wwpn));
|
||||
vs->vs_endpoint = true;
|
||||
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
|
||||
vq = &vs->vqs[i];
|
||||
/* Flushing the vhost_work acts as synchronize_rcu */
|
||||
mutex_lock(&vq->mutex);
|
||||
rcu_assign_pointer(vq->private_data, vs_tpg);
|
||||
vhost_init_used(vq);
|
||||
mutex_unlock(&vq->mutex);
|
||||
}
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = -EEXIST;
|
||||
}
|
||||
|
||||
/*
|
||||
* Act as synchronize_rcu to make sure access to
|
||||
* old vs->vs_tpg is finished.
|
||||
*/
|
||||
vhost_scsi_flush(vs);
|
||||
kfree(vs->vs_tpg);
|
||||
vs->vs_tpg = vs_tpg;
|
||||
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -851,6 +903,8 @@ static int vhost_scsi_clear_endpoint(
|
|||
{
|
||||
struct tcm_vhost_tport *tv_tport;
|
||||
struct tcm_vhost_tpg *tv_tpg;
|
||||
struct vhost_virtqueue *vq;
|
||||
bool match = false;
|
||||
int index, ret, i;
|
||||
u8 target;
|
||||
|
||||
|
@ -862,9 +916,14 @@ static int vhost_scsi_clear_endpoint(
|
|||
goto err_dev;
|
||||
}
|
||||
}
|
||||
|
||||
if (!vs->vs_tpg) {
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
|
||||
target = i;
|
||||
|
||||
tv_tpg = vs->vs_tpg[target];
|
||||
if (!tv_tpg)
|
||||
continue;
|
||||
|
@ -886,10 +945,27 @@ static int vhost_scsi_clear_endpoint(
|
|||
}
|
||||
tv_tpg->tv_tpg_vhost_count--;
|
||||
vs->vs_tpg[target] = NULL;
|
||||
vs->vs_endpoint = false;
|
||||
match = true;
|
||||
mutex_unlock(&tv_tpg->tv_tpg_mutex);
|
||||
}
|
||||
if (match) {
|
||||
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
|
||||
vq = &vs->vqs[i];
|
||||
/* Flushing the vhost_work acts as synchronize_rcu */
|
||||
mutex_lock(&vq->mutex);
|
||||
rcu_assign_pointer(vq->private_data, NULL);
|
||||
mutex_unlock(&vq->mutex);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Act as synchronize_rcu to make sure access to
|
||||
* old vs->vs_tpg is finished.
|
||||
*/
|
||||
vhost_scsi_flush(vs);
|
||||
kfree(vs->vs_tpg);
|
||||
vs->vs_tpg = NULL;
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
err_tpg:
|
||||
|
@ -899,6 +975,24 @@ static int vhost_scsi_clear_endpoint(
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
|
||||
{
|
||||
if (features & ~VHOST_SCSI_FEATURES)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&vs->dev.mutex);
|
||||
if ((features & (1 << VHOST_F_LOG_ALL)) &&
|
||||
!vhost_log_access_ok(&vs->dev)) {
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return -EFAULT;
|
||||
}
|
||||
vs->dev.acked_features = features;
|
||||
smp_wmb();
|
||||
vhost_scsi_flush(vs);
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vhost_scsi_open(struct inode *inode, struct file *f)
|
||||
{
|
||||
struct vhost_scsi *s;
|
||||
|
@ -939,38 +1033,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
|
||||
{
|
||||
vhost_poll_flush(&vs->dev.vqs[index].poll);
|
||||
}
|
||||
|
||||
static void vhost_scsi_flush(struct vhost_scsi *vs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
|
||||
vhost_scsi_flush_vq(vs, i);
|
||||
vhost_work_flush(&vs->dev, &vs->vs_completion_work);
|
||||
}
|
||||
|
||||
static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
|
||||
{
|
||||
if (features & ~VHOST_SCSI_FEATURES)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&vs->dev.mutex);
|
||||
if ((features & (1 << VHOST_F_LOG_ALL)) &&
|
||||
!vhost_log_access_ok(&vs->dev)) {
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return -EFAULT;
|
||||
}
|
||||
vs->dev.acked_features = features;
|
||||
smp_wmb();
|
||||
vhost_scsi_flush(vs);
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
|
||||
unsigned long arg)
|
||||
{
|
||||
|
|
|
@ -117,7 +117,7 @@ config ARM_SP805_WATCHDOG
|
|||
|
||||
config AT91RM9200_WATCHDOG
|
||||
tristate "AT91RM9200 watchdog"
|
||||
depends on ARCH_AT91
|
||||
depends on ARCH_AT91RM9200
|
||||
help
|
||||
Watchdog timer embedded into AT91RM9200 chips. This will reboot your
|
||||
system when the timeout is reached.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue