Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next

This merges (3f509c6 netfilter: nf_nat_sip: fix incorrect handling
of EBUSY for RTCP expectation) to Patrick McHardy's IPv6 NAT changes.
This commit is contained in:
Pablo Neira Ayuso 2012-09-03 15:28:30 +02:00
commit ace1fe1231
368 changed files with 6766 additions and 3350 deletions

View File

@ -0,0 +1,75 @@
Properties for an MDIO bus multiplexer controlled by a memory-mapped device
This is a special case of a MDIO bus multiplexer. A memory-mapped device,
like an FPGA, is used to control which child bus is connected. The mdio-mux
node must be a child of the memory-mapped device. The driver currently only
supports devices with eight-bit registers.
Required properties in addition to the generic multiplexer properties:
- compatible : string, must contain "mdio-mux-mmioreg"
- reg : integer, contains the offset of the register that controls the bus
multiplexer. The size field in the 'reg' property is the size of
register, and must therefore be 1.
- mux-mask : integer, contains an eight-bit mask that specifies which
bits in the register control the actual bus multiplexer. The
'reg' property of each child mdio-mux node must be constrained by
this mask.
Example:
The FPGA node defines a memory-mapped FPGA with a register space of 0x30 bytes.
For the "EMI2" MDIO bus, register 9 (BRDCFG1) controls the mux on that bus.
A bitmask of 0x6 means that bits 1 and 2 (bit 0 is lsb) are the bits on
BRDCFG1 that control the actual mux.
/* The FPGA node */
fpga: board-control@3,0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,p5020ds-fpga", "fsl,fpga-ngpixis";
reg = <3 0 0x30>;
ranges = <0 3 0 0x30>;
mdio-mux-emi2 {
compatible = "mdio-mux-mmioreg", "mdio-mux";
mdio-parent-bus = <&xmdio0>;
#address-cells = <1>;
#size-cells = <0>;
reg = <9 1>; // BRDCFG1
mux-mask = <0x6>; // EMI2
emi2_slot1: mdio@0 { // Slot 1 XAUI (FM2)
reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
phy_xgmii_slot1: ethernet-phy@0 {
compatible = "ethernet-phy-ieee802.3-c45";
reg = <4>;
};
};
emi2_slot2: mdio@2 { // Slot 2 XAUI (FM1)
reg = <2>;
#address-cells = <1>;
#size-cells = <0>;
phy_xgmii_slot2: ethernet-phy@4 {
compatible = "ethernet-phy-ieee802.3-c45";
reg = <0>;
};
};
};
};
/* The parent MDIO bus. */
xmdio0: mdio@f1000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,fman-xmdio";
reg = <0xf1000 0x1000>;
interrupts = <100 1 0 0>;
};

View File

@ -75,9 +75,10 @@ folder:
There is a special folder for debugging information:
# ls /sys/kernel/debug/batman_adv/bat0/
# bla_claim_table log socket transtable_local
# gateways originators transtable_global vis_data
# ls /sys/kernel/debug/batman_adv/bat0/
# bla_backbone_table log transtable_global
# bla_claim_table originators transtable_local
# gateways socket vis_data
Some of the files contain all sort of status information regard-
ing the mesh network. For example, you can view the table of

View File

@ -439,7 +439,9 @@ tcp_stdurg - BOOLEAN
tcp_synack_retries - INTEGER
Number of times SYNACKs for a passive TCP connection attempt will
be retransmitted. Should not be higher than 255. Default value
is 5, which corresponds to ~180seconds.
is 5, which corresponds to 31seconds till the last retransmission
with the current initial RTO of 1second. With this the final timeout
for a passive TCP connection will happen after 63seconds.
tcp_syncookies - BOOLEAN
Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
@ -465,20 +467,37 @@ tcp_syncookies - BOOLEAN
tcp_fastopen - INTEGER
Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data
in the opening SYN packet. To use this feature, the client application
must not use connect(). Instead, it should use sendmsg() or sendto()
with MSG_FASTOPEN flag which performs a TCP handshake automatically.
must use sendmsg() or sendto() with MSG_FASTOPEN flag rather than
connect() to perform a TCP handshake automatically.
The values (bitmap) are:
1: Enables sending data in the opening SYN on the client
5: Enables sending data in the opening SYN on the client regardless
of cookie availability.
The values (bitmap) are
1: Enables sending data in the opening SYN on the client.
2: Enables TCP Fast Open on the server side, i.e., allowing data in
a SYN packet to be accepted and passed to the application before
3-way hand shake finishes.
4: Send data in the opening SYN regardless of cookie availability and
without a cookie option.
0x100: Accept SYN data w/o validating the cookie.
0x200: Accept data-in-SYN w/o any cookie option present.
0x400/0x800: Enable Fast Open on all listeners regardless of the
TCP_FASTOPEN socket option. The two different flags designate two
different ways of setting max_qlen without the TCP_FASTOPEN socket
option.
Default: 0
Note that the client & server side Fast Open flags (1 and 2
respectively) must be also enabled before the rest of flags can take
effect.
See include/net/tcp.h and the code for more details.
tcp_syn_retries - INTEGER
Number of times initial SYNs for an active TCP connection attempt
will be retransmitted. Should not be higher than 255. Default value
is 5, which corresponds to ~180seconds.
is 6, which corresponds to 63seconds till the last restransmission
with the current initial RTO of 1second. With this the final timeout
for an active TCP connection attempt will happen after 127seconds.
tcp_timestamps - BOOLEAN
Enable timestamps as defined in RFC1323.

View File

@ -173,7 +173,6 @@ Where:
For MDIO bus The we have:
struct stmmac_mdio_bus_data {
int bus_id;
int (*phy_reset)(void *priv);
unsigned int phy_mask;
int *irqs;
@ -181,7 +180,6 @@ For MDIO bus The we have:
};
Where:
o bus_id: bus identifier;
o phy_reset: hook to reset the phy device attached to the bus.
o phy_mask: phy mask passed when register the MDIO bus within the driver.
o irqs: list of IRQs, one per PHY.
@ -230,9 +228,6 @@ there are two MAC cores: one MAC is for MDIO Bus/PHY emulation
with fixed_link support.
static struct stmmac_mdio_bus_data stmmac1_mdio_bus = {
.bus_id = 1,
|
|-> phy device on the bus_id 1
.phy_reset = phy_reset;
|
|-> function to provide the phy_reset on this board

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 6
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc3
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*

View File

@ -120,7 +120,6 @@ obj-$(CONFIG_VHOST_NET) += vhost/
obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
obj-y += ieee802154/
#common clk code
obj-y += clk/

View File

@ -387,6 +387,7 @@ acpi_get_table_with_size(char *signature,
return (AE_NOT_FOUND);
}
ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
acpi_status
acpi_get_table(char *signature,

View File

@ -48,12 +48,12 @@ config BCMA_DRIVER_MIPS
config BCMA_SFLASH
bool
depends on BCMA_DRIVER_MIPS && BROKEN
depends on BCMA_DRIVER_MIPS
default y
config BCMA_NFLASH
bool
depends on BCMA_DRIVER_MIPS && BROKEN
depends on BCMA_DRIVER_MIPS
default y
config BCMA_DRIVER_GMAC_CMN

View File

@ -54,6 +54,7 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc);
#ifdef CONFIG_BCMA_SFLASH
/* driver_chipcommon_sflash.c */
int bcma_sflash_init(struct bcma_drv_cc *cc);
extern struct platform_device bcma_sflash_dev;
#else
static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
{
@ -65,6 +66,7 @@ static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
#ifdef CONFIG_BCMA_NFLASH
/* driver_chipcommon_nflash.c */
int bcma_nflash_init(struct bcma_drv_cc *cc);
extern struct platform_device bcma_nflash_dev;
#else
static inline int bcma_nflash_init(struct bcma_drv_cc *cc)
{

View File

@ -5,15 +5,37 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
#include <linux/bcma/bcma_driver_chipcommon.h>
#include <linux/delay.h>
#include "bcma_private.h"
struct platform_device bcma_nflash_dev = {
.name = "bcma_nflash",
.num_resources = 0,
};
/* Initialize NAND flash access */
int bcma_nflash_init(struct bcma_drv_cc *cc)
{
bcma_err(cc->core->bus, "NAND flash support is broken\n");
struct bcma_bus *bus = cc->core->bus;
if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
cc->core->id.rev != 0x38) {
bcma_err(bus, "NAND flash on unsupported board!\n");
return -ENOTSUPP;
}
if (!(cc->capabilities & BCMA_CC_CAP_NFLASH)) {
bcma_err(bus, "NAND flash not present according to ChipCommon\n");
return -ENODEV;
}
cc->nflash.present = true;
/* Prepare platform device, but don't register it yet. It's too early,
* malloc (required by device_private_init) is not available yet. */
bcma_nflash_dev.dev.platform_data = &cc->nflash;
return 0;
}

View File

@ -5,15 +5,132 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
#include <linux/bcma/bcma_driver_chipcommon.h>
#include <linux/delay.h>
#include "bcma_private.h"
static struct resource bcma_sflash_resource = {
.name = "bcma_sflash",
.start = BCMA_SFLASH,
.end = 0,
.flags = IORESOURCE_MEM | IORESOURCE_READONLY,
};
struct platform_device bcma_sflash_dev = {
.name = "bcma_sflash",
.resource = &bcma_sflash_resource,
.num_resources = 1,
};
struct bcma_sflash_tbl_e {
char *name;
u32 id;
u32 blocksize;
u16 numblocks;
};
static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
{ "", 0x14, 0x10000, 32, },
{ 0 },
};
static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
{ 0 },
};
static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
{ 0 },
};
static void bcma_sflash_cmd(struct bcma_drv_cc *cc, u32 opcode)
{
int i;
bcma_cc_write32(cc, BCMA_CC_FLASHCTL,
BCMA_CC_FLASHCTL_START | opcode);
for (i = 0; i < 1000; i++) {
if (!(bcma_cc_read32(cc, BCMA_CC_FLASHCTL) &
BCMA_CC_FLASHCTL_BUSY))
return;
cpu_relax();
}
bcma_err(cc->core->bus, "SFLASH control command failed (timeout)!\n");
}
/* Initialize serial flash access */
int bcma_sflash_init(struct bcma_drv_cc *cc)
{
bcma_err(cc->core->bus, "Serial flash support is broken\n");
struct bcma_bus *bus = cc->core->bus;
struct bcma_sflash *sflash = &cc->sflash;
struct bcma_sflash_tbl_e *e;
u32 id, id2;
switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
case BCMA_CC_FLASHT_STSER:
bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_DP);
bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 0);
bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 1);
bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
id2 = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
switch (id) {
case 0xbf:
for (e = bcma_sflash_sst_tbl; e->name; e++) {
if (e->id == id2)
break;
}
break;
default:
for (e = bcma_sflash_st_tbl; e->name; e++) {
if (e->id == id)
break;
}
break;
}
if (!e->name) {
bcma_err(bus, "Unsupported ST serial flash (id: 0x%X, id2: 0x%X)\n", id, id2);
return -ENOTSUPP;
}
break;
case BCMA_CC_FLASHT_ATSER:
bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_AT_STATUS);
id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA) & 0x3c;
for (e = bcma_sflash_at_tbl; e->name; e++) {
if (e->id == id)
break;
}
if (!e->name) {
bcma_err(bus, "Unsupported Atmel serial flash (id: 0x%X)\n", id);
return -ENOTSUPP;
}
break;
default:
bcma_err(bus, "Unsupported flash type\n");
return -ENOTSUPP;
}
sflash->window = BCMA_SFLASH;
sflash->blocksize = e->blocksize;
sflash->numblocks = e->numblocks;
sflash->size = sflash->blocksize * sflash->numblocks;
sflash->present = true;
bcma_info(bus, "Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n",
e->name, sflash->size / 1024, sflash->blocksize,
sflash->numblocks);
/* Prepare platform device, but don't register it yet. It's too early,
* malloc (required by device_private_init) is not available yet. */
bcma_sflash_dev.resource[0].end = bcma_sflash_dev.resource[0].start +
sflash->size;
bcma_sflash_dev.dev.platform_data = sflash;
return 0;
}

View File

@ -7,6 +7,7 @@
#include "bcma_private.h"
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
#include <linux/slab.h>
@ -136,6 +137,22 @@ static int bcma_register_cores(struct bcma_bus *bus)
dev_id++;
}
#ifdef CONFIG_BCMA_SFLASH
if (bus->drv_cc.sflash.present) {
err = platform_device_register(&bcma_sflash_dev);
if (err)
bcma_err(bus, "Error registering serial flash\n");
}
#endif
#ifdef CONFIG_BCMA_NFLASH
if (bus->drv_cc.nflash.present) {
err = platform_device_register(&bcma_nflash_dev);
if (err)
bcma_err(bus, "Error registering NAND flash\n");
}
#endif
return 0;
}

View File

@ -64,6 +64,7 @@
#define I830_PTE_SYSTEM_CACHED 0x00000006
/* GT PTE cache control fields */
#define GEN6_PTE_UNCACHED 0x00000002
#define HSW_PTE_UNCACHED 0x00000000
#define GEN6_PTE_LLC 0x00000004
#define GEN6_PTE_LLC_MLC 0x00000006
#define GEN6_PTE_GFDT 0x00000008

View File

@ -1156,6 +1156,30 @@ static bool gen6_check_flags(unsigned int flags)
return true;
}
static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
if (type_mask == AGP_USER_MEMORY)
pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
} else { /* set 'normal'/'cached' to LLC by default */
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
}
static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
@ -1382,6 +1406,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver haswell_gtt_driver = {
.gen = 6,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = haswell_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver valleyview_gtt_driver = {
.gen = 7,
.setup = i9xx_setup,
@ -1499,77 +1532,77 @@ static const struct intel_gtt_driver_description {
{ PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
"ValleyView", &valleyview_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
"Haswell", &haswell_gtt_driver },
{ 0, NULL, NULL }
};

View File

@ -706,9 +706,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
p->crtc_hadjusted = false;
p->crtc_vadjusted = false;
}
EXPORT_SYMBOL(drm_mode_set_crtcinfo);

View File

@ -89,7 +89,7 @@ static const struct file_operations drm_proc_fops = {
* Create a given set of proc files represented by an array of
* gdm_proc_lists in the given root directory.
*/
int drm_proc_create_files(struct drm_info_list *files, int count,
static int drm_proc_create_files(struct drm_info_list *files, int count,
struct proc_dir_entry *root, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
@ -172,7 +172,7 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
return 0;
}
int drm_proc_remove_files(struct drm_info_list *files, int count,
static int drm_proc_remove_files(struct drm_info_list *files, int count,
struct drm_minor *minor)
{
struct list_head *pos, *q;

View File

@ -2365,6 +2365,10 @@ int i915_gpu_idle(struct drm_device *dev)
/* Flush everything onto the inactive list. */
for_each_ring(ring, dev_priv, i) {
ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
if (ret)
return ret;
ret = i915_ring_idle(ring);
if (ret)
return ret;
@ -2372,10 +2376,6 @@ int i915_gpu_idle(struct drm_device *dev)
/* Is the device fubar? */
if (WARN_ON(!list_empty(&ring->gpu_write_list)))
return -EBUSY;
ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
if (ret)
return ret;
}
return 0;

View File

@ -261,7 +261,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
pte_flags |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
pte_flags |= GEN6_PTE_UNCACHED;
if (IS_HASWELL(dev))
pte_flags |= HSW_PTE_UNCACHED;
else
pte_flags |= GEN6_PTE_UNCACHED;
break;
default:
BUG();

View File

@ -115,6 +115,7 @@
#define GEN6_PTE_VALID (1 << 0)
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_CACHE_BITS (3 << 1)

View File

@ -326,6 +326,36 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
return ret;
}
static struct edid *intel_crt_get_edid(struct drm_connector *connector,
struct i2c_adapter *i2c)
{
struct edid *edid;
edid = drm_get_edid(connector, i2c);
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(i2c, true);
edid = drm_get_edid(connector, i2c);
intel_gmbus_force_bit(i2c, false);
}
return edid;
}
/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
static int intel_crt_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
edid = intel_crt_get_edid(connector, adapter);
if (!edid)
return 0;
return intel_connector_update_modes(connector, edid);
}
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
struct intel_crt *crt = intel_attached_crt(connector);
@ -336,7 +366,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
edid = drm_get_edid(connector, i2c);
edid = intel_crt_get_edid(connector, i2c);
if (edid) {
bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
@ -544,13 +574,13 @@ static int intel_crt_get_modes(struct drm_connector *connector)
struct i2c_adapter *i2c;
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
ret = intel_ddc_get_modes(connector, i2c);
ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
return ret;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
return intel_ddc_get_modes(connector, i2c);
return intel_crt_ddc_get_modes(connector, i2c);
}
static int intel_crt_set_property(struct drm_connector *connector,

View File

@ -342,6 +342,8 @@ struct intel_fbc_work {
int interval;
};
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern void intel_attach_force_audio_property(struct drm_connector *connector);

View File

@ -32,6 +32,25 @@
#include "intel_drv.h"
#include "i915_drv.h"
/**
* intel_connector_update_modes - update connector from edid
* @connector: DRM connector device to use
* @edid: previously read EDID information
*/
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid)
{
int ret;
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
connector->display_info.raw_edid = NULL;
kfree(edid);
return ret;
}
/**
* intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
@ -43,18 +62,12 @@ int intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
int ret = 0;
edid = drm_get_edid(connector, adapter);
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
connector->display_info.raw_edid = NULL;
kfree(edid);
}
if (!edid)
return 0;
return ret;
return intel_connector_update_modes(connector, edid);
}
static const struct drm_prop_enum_list force_audio_names[] = {

View File

@ -2441,17 +2441,10 @@ static void gen6_enable_rps(struct drm_device *dev)
dev_priv->max_delay << 24 |
dev_priv->min_delay << 16);
if (IS_HASWELL(dev)) {
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
I915_WRITE(GEN6_RP_UP_EI, 66000);
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
} else {
I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
I915_WRITE(GEN6_RP_UP_EI, 100000);
I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
}
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
I915_WRITE(GEN6_RP_UP_EI, 66000);
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,

View File

@ -1692,6 +1692,7 @@ static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
edid = intel_sdvo_get_edid(connector);
if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
return has_audio;
}

View File

@ -444,11 +444,28 @@ union atom_enable_ss {
static void atombios_crtc_program_ss(struct radeon_device *rdev,
int enable,
int pll_id,
int crtc_id,
struct radeon_atom_ss *ss)
{
unsigned i;
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
union atom_enable_ss args;
if (!enable) {
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] &&
rdev->mode_info.crtcs[i]->enabled &&
i != crtc_id &&
pll_id == rdev->mode_info.crtcs[i]->pll_id) {
/* one other crtc is using this pll don't turn
* off spread spectrum as it might turn off
* display on active crtc
*/
return;
}
}
}
memset(&args, 0, sizeof(args));
if (ASIC_IS_DCE5(rdev)) {
@ -1028,7 +1045,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
encoder_mode, radeon_encoder->encoder_id, mode->clock,
@ -1051,7 +1068,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
ss.step = step_size;
}
atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
}
}
@ -1572,11 +1589,11 @@ void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
ASIC_INTERNAL_SS_ON_DCPLL,
rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss);
atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
/* XXX: DCE5, make sure voltage, dispclk is high enough */
atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss);
atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
}
}

View File

@ -47,13 +47,17 @@ struct r600_cs_track {
u32 npipes;
/* value we track */
u32 sq_config;
u32 log_nsamples;
u32 nsamples;
u32 cb_color_base_last[8];
struct radeon_bo *cb_color_bo[8];
u64 cb_color_bo_mc[8];
u32 cb_color_bo_offset[8];
struct radeon_bo *cb_color_frag_bo[8]; /* unused */
struct radeon_bo *cb_color_tile_bo[8]; /* unused */
u64 cb_color_bo_offset[8];
struct radeon_bo *cb_color_frag_bo[8];
u64 cb_color_frag_offset[8];
struct radeon_bo *cb_color_tile_bo[8];
u64 cb_color_tile_offset[8];
u32 cb_color_mask[8];
u32 cb_color_info[8];
u32 cb_color_view[8];
u32 cb_color_size_idx[8]; /* unused */
@ -349,10 +353,6 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
unsigned array_mode;
u32 format;
if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
return -EINVAL;
}
size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
format = G_0280A0_FORMAT(track->cb_color_info[i]);
if (!r600_fmt_is_valid_color(format)) {
@ -420,7 +420,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
}
/* check offset */
tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format);
tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
r600_fmt_get_blocksize(format) * track->nsamples;
switch (array_mode) {
default:
case V_0280A0_ARRAY_LINEAR_GENERAL:
@ -441,7 +442,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
* broken userspace.
*/
} else {
dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n",
dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
__func__, i, array_mode,
track->cb_color_bo_offset[i], tmp,
radeon_bo_size(track->cb_color_bo[i]),
@ -458,6 +459,51 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
ib[track->cb_color_size_idx[i]] = tmp;
/* FMASK/CMASK */
switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
case V_0280A0_TILE_DISABLE:
break;
case V_0280A0_FRAG_ENABLE:
if (track->nsamples > 1) {
uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
/* the tile size is 8x8, but the size is in units of bits.
* for bytes, do just * 8. */
uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
if (bytes + track->cb_color_frag_offset[i] >
radeon_bo_size(track->cb_color_frag_bo[i])) {
dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
"(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
__func__, tile_max, bytes,
track->cb_color_frag_offset[i],
radeon_bo_size(track->cb_color_frag_bo[i]));
return -EINVAL;
}
}
/* fall through */
case V_0280A0_CLEAR_ENABLE:
{
uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
/* One block = 128x128 pixels, one 8x8 tile has 4 bits..
* (128*128) / (8*8) / 2 = 128 bytes per block. */
uint32_t bytes = (block_max + 1) * 128;
if (bytes + track->cb_color_tile_offset[i] >
radeon_bo_size(track->cb_color_tile_bo[i])) {
dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
"(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
__func__, block_max, bytes,
track->cb_color_tile_offset[i],
radeon_bo_size(track->cb_color_tile_bo[i]));
return -EINVAL;
}
break;
}
default:
dev_warn(p->dev, "%s invalid tile mode\n", __func__);
return -EINVAL;
}
return 0;
}
@ -566,7 +612,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
tmp = ntiles * bpe * 64 * nviews;
tmp = ntiles * bpe * 64 * nviews * track->nsamples;
if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
array_mode,
@ -1231,6 +1277,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case R_028C04_PA_SC_AA_CONFIG:
tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
track->log_nsamples = tmp;
track->nsamples = 1 << tmp;
track->cb_dirty = true;
break;
@ -1312,16 +1359,21 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
return -EINVAL;
}
ib[idx] = track->cb_color_base_last[tmp];
track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
ib[idx] = track->cb_color_base_last[tmp];
} else {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->cb_color_frag_bo[tmp] = reloc->robj;
track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
}
if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
track->cb_dirty = true;
}
break;
case R_0280C0_CB_COLOR0_TILE:
@ -1338,16 +1390,35 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
return -EINVAL;
}
ib[idx] = track->cb_color_base_last[tmp];
track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
ib[idx] = track->cb_color_base_last[tmp];
} else {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->cb_color_tile_bo[tmp] = reloc->robj;
track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
}
if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
track->cb_dirty = true;
}
break;
case R_028100_CB_COLOR0_MASK:
case R_028104_CB_COLOR1_MASK:
case R_028108_CB_COLOR2_MASK:
case R_02810C_CB_COLOR3_MASK:
case R_028110_CB_COLOR4_MASK:
case R_028114_CB_COLOR5_MASK:
case R_028118_CB_COLOR6_MASK:
case R_02811C_CB_COLOR7_MASK:
tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
track->cb_color_mask[tmp] = ib[idx];
if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
track->cb_dirty = true;
}
break;
case CB_COLOR0_BASE:
@ -1492,7 +1563,7 @@ unsigned r600_mip_minify(unsigned size, unsigned level)
}
static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
unsigned w0, unsigned h0, unsigned d0, unsigned format,
unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
unsigned block_align, unsigned height_align, unsigned base_align,
unsigned *l0_size, unsigned *mipmap_size)
{
@ -1520,7 +1591,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
depth = r600_mip_minify(d0, i);
size = nbx * nby * blocksize;
size = nbx * nby * blocksize * nsamples;
if (nfaces)
size *= nfaces;
else
@ -1672,7 +1743,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
nfaces = larray - barray + 1;
}
r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format,
r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
pitch_align, height_align, base_align,
&l0_size, &mipmap_size);
/* using get ib will give us the offset into the texture bo */

View File

@ -92,6 +92,20 @@
#define R_028094_CB_COLOR5_VIEW 0x028094
#define R_028098_CB_COLOR6_VIEW 0x028098
#define R_02809C_CB_COLOR7_VIEW 0x02809C
#define R_028100_CB_COLOR0_MASK 0x028100
#define S_028100_CMASK_BLOCK_MAX(x) (((x) & 0xFFF) << 0)
#define G_028100_CMASK_BLOCK_MAX(x) (((x) >> 0) & 0xFFF)
#define C_028100_CMASK_BLOCK_MAX 0xFFFFF000
#define S_028100_FMASK_TILE_MAX(x) (((x) & 0xFFFFF) << 12)
#define G_028100_FMASK_TILE_MAX(x) (((x) >> 12) & 0xFFFFF)
#define C_028100_FMASK_TILE_MAX 0x00000FFF
#define R_028104_CB_COLOR1_MASK 0x028104
#define R_028108_CB_COLOR2_MASK 0x028108
#define R_02810C_CB_COLOR3_MASK 0x02810C
#define R_028110_CB_COLOR4_MASK 0x028110
#define R_028114_CB_COLOR5_MASK 0x028114
#define R_028118_CB_COLOR6_MASK 0x028118
#define R_02811C_CB_COLOR7_MASK 0x02811C
#define CB_COLOR0_INFO 0x280a0
# define CB_FORMAT(x) ((x) << 2)
# define CB_ARRAY_MODE(x) ((x) << 8)
@ -1400,6 +1414,9 @@
#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18)
#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3)
#define C_0280A0_TILE_MODE 0xFFF3FFFF
#define V_0280A0_TILE_DISABLE 0
#define V_0280A0_CLEAR_ENABLE 1
#define V_0280A0_FRAG_ENABLE 2
#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20)
#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1)
#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF

View File

@ -142,21 +142,6 @@ struct radeon_device;
/*
* BIOS.
*/
#define ATRM_BIOS_PAGE 4096
#if defined(CONFIG_VGA_SWITCHEROO)
bool radeon_atrm_supported(struct pci_dev *pdev);
int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
#else
static inline bool radeon_atrm_supported(struct pci_dev *pdev)
{
return false;
}
static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
return -EINVAL;
}
#endif
bool radeon_get_bios(struct radeon_device *rdev);
/*

View File

@ -452,7 +452,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
if ((dev->pdev->device == 0x9802) &&
if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
(dev->pdev->subsystem_vendor == 0x1734) &&
(dev->pdev->subsystem_device == 0x11bd)) {
if (*connector_type == DRM_MODE_CONNECTOR_VGA) {

View File

@ -30,57 +30,8 @@ static struct radeon_atpx_priv {
/* handle for device - and atpx */
acpi_handle dhandle;
acpi_handle atpx_handle;
acpi_handle atrm_handle;
} radeon_atpx_priv;
/* retrieve the ROM in 4k blocks */
static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
int offset, int len)
{
acpi_status status;
union acpi_object atrm_arg_elements[2], *obj;
struct acpi_object_list atrm_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
atrm_arg.count = 2;
atrm_arg.pointer = &atrm_arg_elements[0];
atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
atrm_arg_elements[0].integer.value = offset;
atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
atrm_arg_elements[1].integer.value = len;
status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
if (ACPI_FAILURE(status)) {
printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
return -ENODEV;
}
obj = (union acpi_object *)buffer.pointer;
memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
len = obj->buffer.length;
kfree(buffer.pointer);
return len;
}
bool radeon_atrm_supported(struct pci_dev *pdev)
{
/* get the discrete ROM only via ATRM */
if (!radeon_atpx_priv.atpx_detected)
return false;
if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
return false;
return true;
}
int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
{
return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
}
static int radeon_atpx_get_version(acpi_handle handle)
{
acpi_status status;
@ -198,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
{
acpi_handle dhandle, atpx_handle, atrm_handle;
acpi_handle dhandle, atpx_handle;
acpi_status status;
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
@ -209,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
if (ACPI_FAILURE(status))
return false;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
if (ACPI_FAILURE(status))
return false;
radeon_atpx_priv.dhandle = dhandle;
radeon_atpx_priv.atpx_handle = atpx_handle;
radeon_atpx_priv.atrm_handle = atrm_handle;
return true;
}

View File

@ -32,6 +32,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <linux/acpi.h>
/*
* BIOS.
*/
@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev)
return true;
}
#ifdef CONFIG_ACPI
/* ATRM is used to get the BIOS on the discrete cards in
* dual-gpu systems.
*/
/* retrieve the ROM in 4k blocks */
#define ATRM_BIOS_PAGE 4096
/**
* radeon_atrm_call - fetch a chunk of the vbios
*
* @atrm_handle: acpi ATRM handle
* @bios: vbios image pointer
* @offset: offset of vbios image data to fetch
* @len: length of vbios image data to fetch
*
* Executes ATRM to fetch a chunk of the discrete
* vbios image on PX systems (all asics).
* Returns the length of the buffer fetched.
*/
static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
int offset, int len)
{
acpi_status status;
union acpi_object atrm_arg_elements[2], *obj;
struct acpi_object_list atrm_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
atrm_arg.count = 2;
atrm_arg.pointer = &atrm_arg_elements[0];
atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
atrm_arg_elements[0].integer.value = offset;
atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
atrm_arg_elements[1].integer.value = len;
status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
if (ACPI_FAILURE(status)) {
printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
return -ENODEV;
}
obj = (union acpi_object *)buffer.pointer;
memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
len = obj->buffer.length;
kfree(buffer.pointer);
return len;
}
static bool radeon_atrm_get_bios(struct radeon_device *rdev)
{
int ret;
int size = 256 * 1024;
int i;
struct pci_dev *pdev = NULL;
acpi_handle dhandle, atrm_handle;
acpi_status status;
bool found = false;
if (!radeon_atrm_supported(rdev->pdev))
/* ATRM is for the discrete card only */
if (rdev->flags & RADEON_IS_IGP)
return false;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
if (!ACPI_FAILURE(status)) {
found = true;
break;
}
}
if (!found)
return false;
rdev->bios = kmalloc(size, GFP_KERNEL);
@ -117,9 +183,10 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
}
for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
ret = radeon_atrm_get_bios_chunk(rdev->bios,
(i * ATRM_BIOS_PAGE),
ATRM_BIOS_PAGE);
ret = radeon_atrm_call(atrm_handle,
rdev->bios,
(i * ATRM_BIOS_PAGE),
ATRM_BIOS_PAGE);
if (ret < ATRM_BIOS_PAGE)
break;
}
@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
}
return true;
}
#else
static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
{
return false;
}
#endif
static bool ni_read_disabled_bios(struct radeon_device *rdev)
{
@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
return legacy_read_disabled_bios(rdev);
}
#ifdef CONFIG_ACPI
static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
{
bool ret = false;
struct acpi_table_header *hdr;
acpi_size tbl_size;
UEFI_ACPI_VFCT *vfct;
GOP_VBIOS_CONTENT *vbios;
VFCT_IMAGE_HEADER *vhdr;
if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
return false;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
goto out_unmap;
}
vfct = (UEFI_ACPI_VFCT *)hdr;
if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
goto out_unmap;
}
vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
vhdr = &vbios->VbiosHeader;
DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
if (vhdr->PCIBus != rdev->pdev->bus->number ||
vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
vhdr->VendorID != rdev->pdev->vendor ||
vhdr->DeviceID != rdev->pdev->device) {
DRM_INFO("ACPI VFCT table is not for this card\n");
goto out_unmap;
};
if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
DRM_ERROR("ACPI VFCT image truncated\n");
goto out_unmap;
}
rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
ret = !!rdev->bios;
out_unmap:
return ret;
}
#else
static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
{
return false;
}
#endif
bool radeon_get_bios(struct radeon_device *rdev)
{
@ -483,6 +611,8 @@ bool radeon_get_bios(struct radeon_device *rdev)
uint16_t tmp;
r = radeon_atrm_get_bios(rdev);
if (r == false)
r = radeon_acpi_vfct_bios(rdev);
if (r == false)
r = igp_read_bios_from_vram(rdev);
if (r == false)

View File

@ -62,9 +62,10 @@
* 2.18.0 - r600-eg: allow "invalid" DB formats
* 2.19.0 - r600-eg: MSAA textures
* 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
* 2.21.0 - r600-r700: FMASK and CMASK
*/
#define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 20
#define KMS_DRIVER_MINOR 21
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);

View File

@ -132,6 +132,7 @@ int radeon_bo_create(struct radeon_device *rdev,
acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
sizeof(struct radeon_bo));
retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
@ -145,8 +146,6 @@ int radeon_bo_create(struct radeon_device *rdev,
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
INIT_LIST_HEAD(&bo->va);
retry:
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);

View File

@ -706,6 +706,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
if (radeon_debugfs_ring_init(rdev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
radeon_ring_lockup_update(ring);
return 0;
}

View File

@ -744,14 +744,6 @@ r600 0x9400
0x00028C38 CB_CLRCMP_DST
0x00028C3C CB_CLRCMP_MSK
0x00028C34 CB_CLRCMP_SRC
0x00028100 CB_COLOR0_MASK
0x00028104 CB_COLOR1_MASK
0x00028108 CB_COLOR2_MASK
0x0002810C CB_COLOR3_MASK
0x00028110 CB_COLOR4_MASK
0x00028114 CB_COLOR5_MASK
0x00028118 CB_COLOR6_MASK
0x0002811C CB_COLOR7_MASK
0x00028808 CB_COLOR_CONTROL
0x0002842C CB_FOG_BLUE
0x00028428 CB_FOG_GREEN

View File

@ -354,8 +354,7 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
static void udl_crtc_disable(struct drm_crtc *crtc)
{
udl_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void udl_crtc_destroy(struct drm_crtc *crtc)

View File

@ -1688,15 +1688,19 @@ int vmw_du_page_flip(struct drm_crtc *crtc,
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct drm_framebuffer *old_fb = crtc->fb;
struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
struct drm_file *file_priv = event->base.file_priv;
struct drm_file *file_priv ;
struct vmw_fence_obj *fence = NULL;
struct drm_clip_rect clips;
int ret;
if (event == NULL)
return -EINVAL;
/* require ScreenObject support for page flipping */
if (!dev_priv->sou_priv)
return -ENOSYS;
file_priv = event->base.file_priv;
if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
return -EINVAL;

View File

@ -405,6 +405,7 @@ static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
}
}
}
ret = num;
abort:
sret = diolan_i2c_stop(dev);
if (sret < 0 && ret >= 0)

View File

@ -350,10 +350,6 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
i2c_clk = clk_get_rate(dev->clk);
/* fallback to std. mode if machine has not provided it */
if (dev->cfg.clk_freq == 0)
dev->cfg.clk_freq = 100000;
/*
* The spec says, in case of std. mode the divider is
* 2 whereas it is 3 for fast and fastplus mode of
@ -911,20 +907,32 @@ static const struct i2c_algorithm nmk_i2c_algo = {
.functionality = nmk_i2c_functionality
};
static struct nmk_i2c_controller u8500_i2c = {
/*
* Slave data setup time; 250ns, 100ns, and 10ns, which
* is 14, 6 and 2 respectively for a 48Mhz i2c clock.
*/
.slsu = 0xe,
.tft = 1, /* Tx FIFO threshold */
.rft = 8, /* Rx FIFO threshold */
.clk_freq = 400000, /* fast mode operation */
.timeout = 200, /* Slave response timeout(ms) */
.sm = I2C_FREQ_MODE_FAST,
};
static atomic_t adapter_id = ATOMIC_INIT(0);
static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
struct nmk_i2c_controller *pdata =
adev->dev.platform_data;
struct nmk_i2c_controller *pdata = adev->dev.platform_data;
struct nmk_i2c_dev *dev;
struct i2c_adapter *adap;
if (!pdata) {
dev_warn(&adev->dev, "no platform data\n");
return -ENODEV;
}
if (!pdata)
/* No i2c configuration found, using the default. */
pdata = &u8500_i2c;
dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL);
if (!dev) {
dev_err(&adev->dev, "cannot allocate memory\n");

View File

@ -584,7 +584,7 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
r = pm_runtime_get_sync(dev->dev);
if (IS_ERR_VALUE(r))
return r;
goto out;
r = omap_i2c_wait_for_bb(dev);
if (r < 0)

View File

@ -712,7 +712,7 @@ static int __devexit tegra_i2c_remove(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
static int tegra_i2c_suspend(struct device *dev)
{
struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);

View File

@ -107,8 +107,6 @@ config MII
or internal device. It is safe to say Y or M here even if your
ethernet card lacks MII.
source "drivers/ieee802154/Kconfig"
config IFB
tristate "Intermediate Functional Block support"
depends on NET_CLS_ACT
@ -290,6 +288,8 @@ source "drivers/net/wimax/Kconfig"
source "drivers/net/wan/Kconfig"
source "drivers/net/ieee802154/Kconfig"
config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver"
depends on XEN

View File

@ -53,6 +53,7 @@ obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
obj-$(CONFIG_WAN) += wan/
obj-$(CONFIG_WLAN) += wireless/
obj-$(CONFIG_WIMAX) += wimax/
obj-$(CONFIG_IEEE802154) += ieee802154/
obj-$(CONFIG_VMXNET3) += vmxnet3/
obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o

View File

@ -2811,12 +2811,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
arp_work.work);
struct slave *slave, *oldcurrent;
int do_failover = 0;
int delta_in_ticks;
int delta_in_ticks, extra_ticks;
int i;
read_lock(&bond->lock);
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
extra_ticks = delta_in_ticks / 2;
if (bond->slave_cnt == 0)
goto re_arm;
@ -2839,10 +2840,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
if (slave->link != BOND_LINK_UP) {
if (time_in_range(jiffies,
trans_start - delta_in_ticks,
trans_start + delta_in_ticks) &&
trans_start + delta_in_ticks + extra_ticks) &&
time_in_range(jiffies,
slave->dev->last_rx - delta_in_ticks,
slave->dev->last_rx + delta_in_ticks)) {
slave->dev->last_rx + delta_in_ticks + extra_ticks)) {
slave->link = BOND_LINK_UP;
bond_set_active_slave(slave);
@ -2872,10 +2873,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
*/
if (!time_in_range(jiffies,
trans_start - delta_in_ticks,
trans_start + 2 * delta_in_ticks) ||
trans_start + 2 * delta_in_ticks + extra_ticks) ||
!time_in_range(jiffies,
slave->dev->last_rx - delta_in_ticks,
slave->dev->last_rx + 2 * delta_in_ticks)) {
slave->dev->last_rx + 2 * delta_in_ticks + extra_ticks)) {
slave->link = BOND_LINK_DOWN;
bond_set_backup_slave(slave);
@ -2933,6 +2934,14 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
struct slave *slave;
int i, commit = 0;
unsigned long trans_start;
int extra_ticks;
/* All the time comparisons below need some extra time. Otherwise, on
* fast networks the ARP probe/reply may arrive within the same jiffy
* as it was sent. Then, the next time the ARP monitor is run, one
* arp_interval will already have passed in the comparisons.
*/
extra_ticks = delta_in_ticks / 2;
bond_for_each_slave(bond, slave, i) {
slave->new_link = BOND_LINK_NOCHANGE;
@ -2940,7 +2949,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
if (slave->link != BOND_LINK_UP) {
if (time_in_range(jiffies,
slave_last_rx(bond, slave) - delta_in_ticks,
slave_last_rx(bond, slave) + delta_in_ticks)) {
slave_last_rx(bond, slave) + delta_in_ticks + extra_ticks)) {
slave->new_link = BOND_LINK_UP;
commit++;
@ -2956,7 +2965,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
*/
if (time_in_range(jiffies,
slave->jiffies - delta_in_ticks,
slave->jiffies + 2 * delta_in_ticks))
slave->jiffies + 2 * delta_in_ticks + extra_ticks))
continue;
/*
@ -2976,7 +2985,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
!bond->current_arp_slave &&
!time_in_range(jiffies,
slave_last_rx(bond, slave) - delta_in_ticks,
slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
slave_last_rx(bond, slave) + 3 * delta_in_ticks + extra_ticks)) {
slave->new_link = BOND_LINK_DOWN;
commit++;
@ -2992,10 +3001,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
if (bond_is_active_slave(slave) &&
(!time_in_range(jiffies,
trans_start - delta_in_ticks,
trans_start + 2 * delta_in_ticks) ||
trans_start + 2 * delta_in_ticks + extra_ticks) ||
!time_in_range(jiffies,
slave_last_rx(bond, slave) - delta_in_ticks,
slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
slave_last_rx(bond, slave) + 2 * delta_in_ticks + extra_ticks))) {
slave->new_link = BOND_LINK_DOWN;
commit++;
@ -3027,7 +3036,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
if ((!bond->curr_active_slave &&
time_in_range(jiffies,
trans_start - delta_in_ticks,
trans_start + delta_in_ticks)) ||
trans_start + delta_in_ticks + delta_in_ticks / 2)) ||
bond->curr_active_slave != slave) {
slave->link = BOND_LINK_UP;
if (bond->current_arp_slave) {

View File

@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
dev->irq = res_irq->start;
priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
priv->irq_flags |= IRQF_SHARED;
priv->reg_base = addr;
/* The CAN clock frequency is half the oscillator clock frequency */
priv->can.clock.freq = pdata->osc_freq / 2;

View File

@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card,
const uint8_t *mem, *end, *dat;
uint16_t type, len;
uint32_t addr;
uint8_t *buf = NULL;
uint8_t *buf = NULL, *new_buf;
int buflen = 0;
int8_t type_end = 0;
@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card,
if (len > buflen) {
/* align buflen */
buflen = (len + (1024-1)) & ~(1024-1);
buf = krealloc(buf, buflen, GFP_KERNEL);
if (!buf) {
new_buf = krealloc(buf, buflen, GFP_KERNEL);
if (!new_buf) {
ret = -ENOMEM;
goto failed;
}
buf = new_buf;
}
/* verify record data */
memcpy_fromio(buf, &dpram[addr + offset], len);

View File

@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params {
continue; \
else
#define for_each_napi_rx_queue(bp, var) \
for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
/* Skip OOO FP */
#define for_each_tx_queue(bp, var) \
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \

View File

@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
*/
bnx2x_setup_tc(bp->dev, bp->max_cos);
/* Add all NAPI objects */
bnx2x_add_all_napi(bp);
bnx2x_napi_enable(bp);
/* set pf load just before approaching the MCP */
@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 1);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
/* Release IRQs */
bnx2x_free_irq(bp);

View File

@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
bp->num_napi_queues = bp->num_queues;
/* Add NAPI objects */
for_each_napi_rx_queue(bp, i)
for_each_rx_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT);
}
@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
for_each_napi_rx_queue(bp, i)
for_each_rx_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}

View File

@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev,
*/
static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
{
bnx2x_del_all_napi(bp);
bnx2x_disable_msi(bp);
BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
bnx2x_set_int_mode(bp);
bnx2x_add_all_napi(bp);
}
/**

View File

@ -8427,6 +8427,8 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 1);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
/* Release IRQs */
bnx2x_free_irq(bp);
@ -11229,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void poll_bnx2x(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
int i;
disable_irq(bp->pdev->irq);
bnx2x_interrupt(bp->pdev->irq, dev);
enable_irq(bp->pdev->irq);
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
}
}
#endif
@ -11899,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
*/
bnx2x_set_int_mode(bp);
/* Add all NAPI objects */
bnx2x_add_all_napi(bp);
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
@ -11976,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
/* Power on: we can't let PCI layer write to us while we are in D3 */
bnx2x_set_power_state(bp, PCI_D0);
@ -12025,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bnx2x_tx_disable(bp);
bnx2x_netif_stop(bp, 0);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
del_timer_sync(&bp->timer);

View File

@ -110,6 +110,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
#define FW_VER_LEN 32
struct be_dma_mem {

View File

@ -120,7 +120,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
dev_warn(&adapter->pdev->dev,
"opcode %d-%d is not permitted\n",
"VF is not privileged to issue opcode %d-%d\n",
opcode, subsystem);
} else {
extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter)
int num = 0, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
spin_lock_bh(&adapter->mcc_cq_lock);
spin_lock(&adapter->mcc_cq_lock);
while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
/* Interpret flags as an async trailer */
@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter)
if (num)
be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
spin_unlock_bh(&adapter->mcc_cq_lock);
spin_unlock(&adapter->mcc_cq_lock);
return status;
}
@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (be_error(adapter))
return -EIO;
local_bh_disable();
status = be_process_mcc(adapter);
local_bh_enable();
if (atomic_read(&mcc_obj->q.used) == 0)
break;

View File

@ -2176,8 +2176,7 @@ static uint be_num_rss_want(struct be_adapter *adapter)
{
u32 num = 0;
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
!sriov_want(adapter) && be_physfn(adapter) &&
!be_is_mc(adapter)) {
!sriov_want(adapter) && be_physfn(adapter)) {
num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
}
@ -2646,8 +2645,8 @@ static int be_vf_setup(struct be_adapter *adapter)
}
for_all_vfs(adapter, vf_cfg, vf) {
status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
NULL, vf + 1);
lnk_speed = 1000;
status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
if (status)
goto err;
vf_cfg->tx_rate = lnk_speed * 10;
@ -2724,6 +2723,8 @@ static int be_get_config(struct be_adapter *adapter)
if (pos) {
pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
&dev_num_vfs);
if (!lancer_chip(adapter))
dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
adapter->dev_num_vfs = dev_num_vfs;
}
return 0;
@ -3437,6 +3438,7 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
if (mem->va)
dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
mem->dma);
kfree(adapter->pmac_id);
}
static int be_ctrl_init(struct be_adapter *adapter)
@ -3473,6 +3475,12 @@ static int be_ctrl_init(struct be_adapter *adapter)
}
memset(rx_filter->va, 0, rx_filter->size);
/* primary mac needs 1 pmac entry */
adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
sizeof(*adapter->pmac_id), GFP_KERNEL);
if (!adapter->pmac_id)
return -ENOMEM;
mutex_init(&adapter->mbox_lock);
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
@ -3609,12 +3617,6 @@ static int be_get_initial_config(struct be_adapter *adapter)
else
adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
/* primary mac needs 1 pmac entry */
adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
sizeof(u32), GFP_KERNEL);
if (!adapter->pmac_id)
return -ENOMEM;
status = be_cmd_get_cntl_attributes(adapter);
if (status)
return status;
@ -3763,7 +3765,9 @@ static void be_worker(struct work_struct *work)
/* when interrupts are not yet enabled, just reap any pending
* mcc completions */
if (!netif_running(adapter->netdev)) {
local_bh_disable();
be_process_mcc(adapter);
local_bh_enable();
goto reschedule;
}

View File

@ -62,6 +62,13 @@ config FSL_PQ_MDIO
---help---
This driver supports the MDIO bus used by the gianfar and UCC drivers.
config FSL_XGMAC_MDIO
tristate "Freescale XGMAC MDIO"
depends on FSL_SOC
select PHYLIB
---help---
This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
depends on QUICC_ENGINE

View File

@ -9,6 +9,7 @@ ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
endif
obj-$(CONFIG_FS_ENET) += fs_enet/
obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
obj-$(CONFIG_GIANFAR) += gianfar_driver.o
obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
gianfar_driver-objs := gianfar.o \

View File

@ -19,54 +19,90 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <asm/ucc.h>
#include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */
#include "gianfar.h"
#include "fsl_pq_mdio.h"
#define MIIMIND_BUSY 0x00000001
#define MIIMIND_NOTVALID 0x00000004
#define MIIMCFG_INIT_VALUE 0x00000007
#define MIIMCFG_RESET 0x80000000
#define MII_READ_COMMAND 0x00000001
struct fsl_pq_mii {
u32 miimcfg; /* MII management configuration reg */
u32 miimcom; /* MII management command reg */
u32 miimadd; /* MII management address reg */
u32 miimcon; /* MII management control reg */
u32 miimstat; /* MII management status reg */
u32 miimind; /* MII management indication reg */
};
struct fsl_pq_mdio {
u8 res1[16];
u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
u8 res2[4];
u32 emapm; /* MDIO Event mapping register (for etsec2)*/
u8 res3[1280];
struct fsl_pq_mii mii;
u8 res4[28];
u32 utbipar; /* TBI phy address reg (only on UCC) */
u8 res5[2728];
} __packed;
/* Number of microseconds to wait for an MII register to respond */
#define MII_TIMEOUT 1000
struct fsl_pq_mdio_priv {
void __iomem *map;
struct fsl_pq_mdio __iomem *regs;
struct fsl_pq_mii __iomem *regs;
int irqs[PHY_MAX_ADDR];
};
/*
* Write value to the PHY at mii_id at register regnum,
* on the bus attached to the local interface, which may be different from the
* generic mdio bus (tied to a single interface), waiting until the write is
* done before returning. This is helpful in programming interfaces like
* the TBI which control interfaces like onchip SERDES and are always tied to
* the local mdio pins, which may not be the same as system mdio bus, used for
* Per-device-type data. Each type of device tree node that we support gets
* one of these.
*
* @mii_offset: the offset of the MII registers within the memory map of the
* node. Some nodes define only the MII registers, and some define the whole
* MAC (which includes the MII registers).
*
* @get_tbipa: determines the address of the TBIPA register
*
* @ucc_configure: a special function for extra QE configuration
*/
struct fsl_pq_mdio_data {
unsigned int mii_offset; /* offset of the MII registers */
uint32_t __iomem * (*get_tbipa)(void __iomem *p);
void (*ucc_configure)(phys_addr_t start, phys_addr_t end);
};
/*
* Write value to the PHY at mii_id at register regnum, on the bus attached
* to the local interface, which may be different from the generic mdio bus
* (tied to a single interface), waiting until the write is done before
* returning. This is helpful in programming interfaces like the TBI which
* control interfaces like onchip SERDES and are always tied to the local
* mdio pins, which may not be the same as system mdio bus, used for
* controlling the external PHYs, for example.
*/
int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
int regnum, u16 value)
static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
struct fsl_pq_mdio_priv *priv = bus->priv;
struct fsl_pq_mii __iomem *regs = priv->regs;
u32 status;
/* Set the PHY address and the register address we want to write */
@ -83,20 +119,21 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
}
/*
* Read the bus for PHY at addr mii_id, register regnum, and
* return the value. Clears miimcom first. All PHY operation
* done on the bus attached to the local interface,
* which may be different from the generic mdio bus
* This is helpful in programming interfaces like
* the TBI which, in turn, control interfaces like onchip SERDES
* and are always tied to the local mdio pins, which may not be the
* Read the bus for PHY at addr mii_id, register regnum, and return the value.
* Clears miimcom first.
*
* All PHY operation done on the bus attached to the local interface, which
* may be different from the generic mdio bus. This is helpful in programming
* interfaces like the TBI which, in turn, control interfaces like on-chip
* SERDES and are always tied to the local mdio pins, which may not be the
* same as system mdio bus, used for controlling the external PHYs, for eg.
*/
int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
int mii_id, int regnum)
static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
u16 value;
struct fsl_pq_mdio_priv *priv = bus->priv;
struct fsl_pq_mii __iomem *regs = priv->regs;
u32 status;
u16 value;
/* Set the PHY address and the register address we want to read */
out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@ -115,44 +152,15 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
/* Grab the value of the register from miimstat */
value = in_be32(&regs->miimstat);
dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
return value;
}
static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
{
struct fsl_pq_mdio_priv *priv = bus->priv;
return priv->regs;
}
/*
* Write value to the PHY at mii_id at register regnum,
* on the bus, waiting until the write is done before returning.
*/
int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
{
struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
/* Write to the local MII regs */
return fsl_pq_local_mdio_write(regs, mii_id, regnum, value);
}
/*
* Read the bus for PHY at addr mii_id, register regnum, and
* return the value. Clears miimcom first.
*/
int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
/* Read the local MII regs */
return fsl_pq_local_mdio_read(regs, mii_id, regnum);
}
/* Reset the MIIM registers, and wait for the bus to free */
static int fsl_pq_mdio_reset(struct mii_bus *bus)
{
struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
struct fsl_pq_mdio_priv *priv = bus->priv;
struct fsl_pq_mii __iomem *regs = priv->regs;
u32 status;
mutex_lock(&bus->mdio_lock);
@ -170,234 +178,291 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
mutex_unlock(&bus->mdio_lock);
if (!status) {
printk(KERN_ERR "%s: The MII Bus is stuck!\n",
bus->name);
dev_err(&bus->dev, "timeout waiting for MII bus\n");
return -EBUSY;
}
return 0;
}
void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
{
const u32 *addr;
u64 taddr = OF_BAD_ADDR;
addr = of_get_address(np, 0, NULL, NULL);
if (addr)
taddr = of_translate_address(np, addr);
snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
(unsigned long long)taddr);
}
EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
{
#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
struct gfar __iomem *enet_regs;
/*
* This is mildly evil, but so is our hardware for doing this.
* Also, we have to cast back to struct gfar because of
* definition weirdness done in gianfar.h.
*/
static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
{
struct gfar __iomem *enet_regs = p;
/*
* This is mildly evil, but so is our hardware for doing this.
* Also, we have to cast back to struct gfar because of
* definition weirdness done in gianfar.h.
*/
if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
of_device_is_compatible(np, "fsl,gianfar-tbi") ||
of_device_is_compatible(np, "gianfar")) {
enet_regs = (struct gfar __iomem *)regs;
return &enet_regs->tbipa;
} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi")) {
return of_iomap(np, 1);
}
#endif
return NULL;
return &enet_regs->tbipa;
}
static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
/*
* Return the TBIPAR address for an eTSEC2 node
*/
static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
{
return p;
}
#endif
#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
/*
* Return the TBIPAR address for a QE MDIO node
*/
static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
{
struct fsl_pq_mdio __iomem *mdio = p;
return &mdio->utbipar;
}
/*
* Find the UCC node that controls the given MDIO node
*
* For some reason, the QE MDIO nodes are not children of the UCC devices
* that control them. Therefore, we need to scan all UCC nodes looking for
* the one that encompases the given MDIO node. We do this by comparing
* physical addresses. The 'start' and 'end' addresses of the MDIO node are
* passed, and the correct UCC node will cover the entire address range.
*
* This assumes that there is only one QE MDIO node in the entire device tree.
*/
static void ucc_configure(phys_addr_t start, phys_addr_t end)
{
static bool found_mii_master;
struct device_node *np = NULL;
int err = 0;
if (found_mii_master)
return;
for_each_compatible_node(np, NULL, "ucc_geth") {
struct resource tempres;
struct resource res;
const uint32_t *iprop;
uint32_t id;
int ret;
err = of_address_to_resource(np, 0, &tempres);
if (err)
ret = of_address_to_resource(np, 0, &res);
if (ret < 0) {
pr_debug("fsl-pq-mdio: no address range in node %s\n",
np->full_name);
continue;
}
/* if our mdio regs fall within this UCC regs range */
if ((start >= tempres.start) && (end <= tempres.end)) {
/* Find the id of the UCC */
const u32 *id;
if ((start < res.start) || (end > res.end))
continue;
id = of_get_property(np, "cell-index", NULL);
if (!id) {
id = of_get_property(np, "device-id", NULL);
if (!id)
continue;
iprop = of_get_property(np, "cell-index", NULL);
if (!iprop) {
iprop = of_get_property(np, "device-id", NULL);
if (!iprop) {
pr_debug("fsl-pq-mdio: no UCC ID in node %s\n",
np->full_name);
continue;
}
*ucc_id = *id;
return 0;
}
}
if (err)
return err;
else
return -EINVAL;
#else
return -ENODEV;
#endif
id = be32_to_cpup(iprop);
/*
* cell-index and device-id for QE nodes are
* numbered from 1, not 0.
*/
if (ucc_set_qe_mux_mii_mng(id - 1) < 0) {
pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n",
np->full_name);
continue;
}
pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id);
found_mii_master = true;
}
}
static int fsl_pq_mdio_probe(struct platform_device *ofdev)
#endif
static struct of_device_id fsl_pq_mdio_match[] = {
#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
{
.compatible = "fsl,gianfar-tbi",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = 0,
.get_tbipa = get_gfar_tbipa,
},
},
{
.compatible = "fsl,gianfar-mdio",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = 0,
.get_tbipa = get_gfar_tbipa,
},
},
{
.type = "mdio",
.compatible = "gianfar",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = offsetof(struct fsl_pq_mdio, mii),
.get_tbipa = get_gfar_tbipa,
},
},
{
.compatible = "fsl,etsec2-tbi",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = offsetof(struct fsl_pq_mdio, mii),
.get_tbipa = get_etsec_tbipa,
},
},
{
.compatible = "fsl,etsec2-mdio",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = offsetof(struct fsl_pq_mdio, mii),
.get_tbipa = get_etsec_tbipa,
},
},
#endif
#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
{
.compatible = "fsl,ucc-mdio",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = 0,
.get_tbipa = get_ucc_tbipa,
.ucc_configure = ucc_configure,
},
},
{
/* Legacy UCC MDIO node */
.type = "mdio",
.compatible = "ucc_geth_phy",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = 0,
.get_tbipa = get_ucc_tbipa,
.ucc_configure = ucc_configure,
},
},
#endif
/* No Kconfig option for Fman support yet */
{
.compatible = "fsl,fman-mdio",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = 0,
/* Fman TBI operations are handled elsewhere */
},
},
{},
};
MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
static int fsl_pq_mdio_probe(struct platform_device *pdev)
{
struct device_node *np = ofdev->dev.of_node;
const struct of_device_id *id =
of_match_device(fsl_pq_mdio_match, &pdev->dev);
const struct fsl_pq_mdio_data *data = id->data;
struct device_node *np = pdev->dev.of_node;
struct resource res;
struct device_node *tbi;
struct fsl_pq_mdio_priv *priv;
struct fsl_pq_mdio __iomem *regs = NULL;
void __iomem *map;
u32 __iomem *tbipa;
struct mii_bus *new_bus;
int tbiaddr = -1;
const u32 *addrp;
u64 addr = 0, size = 0;
int err;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
new_bus = mdiobus_alloc_size(sizeof(*priv));
if (!new_bus)
return -ENOMEM;
new_bus = mdiobus_alloc();
if (!new_bus) {
err = -ENOMEM;
goto err_free_priv;
}
priv = new_bus->priv;
new_bus->name = "Freescale PowerQUICC MII Bus",
new_bus->read = &fsl_pq_mdio_read,
new_bus->write = &fsl_pq_mdio_write,
new_bus->reset = &fsl_pq_mdio_reset,
new_bus->priv = priv;
fsl_pq_mdio_bus_name(new_bus->id, np);
new_bus->read = &fsl_pq_mdio_read;
new_bus->write = &fsl_pq_mdio_write;
new_bus->reset = &fsl_pq_mdio_reset;
new_bus->irq = priv->irqs;
addrp = of_get_address(np, 0, &size, NULL);
if (!addrp) {
err = -EINVAL;
goto err_free_bus;
err = of_address_to_resource(np, 0, &res);
if (err < 0) {
dev_err(&pdev->dev, "could not obtain address information\n");
goto error;
}
/* Set the PHY base address */
addr = of_translate_address(np, addrp);
if (addr == OF_BAD_ADDR) {
err = -EINVAL;
goto err_free_bus;
}
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name,
(unsigned long long)res.start);
map = ioremap(addr, size);
if (!map) {
priv->map = of_iomap(np, 0);
if (!priv->map) {
err = -ENOMEM;
goto err_free_bus;
}
priv->map = map;
if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
of_device_is_compatible(np, "fsl,gianfar-tbi") ||
of_device_is_compatible(np, "fsl,ucc-mdio") ||
of_device_is_compatible(np, "ucc_geth_phy"))
map -= offsetof(struct fsl_pq_mdio, miimcfg);
regs = map;
priv->regs = regs;
new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
if (NULL == new_bus->irq) {
err = -ENOMEM;
goto err_unmap_regs;
goto error;
}
new_bus->parent = &ofdev->dev;
dev_set_drvdata(&ofdev->dev, new_bus);
/*
* Some device tree nodes represent only the MII registers, and
* others represent the MAC and MII registers. The 'mii_offset' field
* contains the offset of the MII registers inside the mapped register
* space.
*/
if (data->mii_offset > resource_size(&res)) {
dev_err(&pdev->dev, "invalid register map\n");
err = -EINVAL;
goto error;
}
priv->regs = priv->map + data->mii_offset;
if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
of_device_is_compatible(np, "fsl,gianfar-tbi") ||
of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi") ||
of_device_is_compatible(np, "gianfar")) {
tbipa = get_gfar_tbipa(regs, np);
if (!tbipa) {
err = -EINVAL;
goto err_free_irqs;
new_bus->parent = &pdev->dev;
dev_set_drvdata(&pdev->dev, new_bus);
if (data->get_tbipa) {
for_each_child_of_node(np, tbi) {
if (strcmp(tbi->type, "tbi-phy") == 0) {
dev_dbg(&pdev->dev, "found TBI PHY node %s\n",
strrchr(tbi->full_name, '/') + 1);
break;
}
}
} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
of_device_is_compatible(np, "ucc_geth_phy")) {
u32 id;
static u32 mii_mng_master;
tbipa = &regs->utbipar;
if (tbi) {
const u32 *prop = of_get_property(tbi, "reg", NULL);
uint32_t __iomem *tbipa;
if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
goto err_free_irqs;
if (!prop) {
dev_err(&pdev->dev,
"missing 'reg' property in node %s\n",
tbi->full_name);
err = -EBUSY;
goto error;
}
if (!mii_mng_master) {
mii_mng_master = id;
ucc_set_qe_mux_mii_mng(id - 1);
}
} else {
err = -ENODEV;
goto err_free_irqs;
}
tbipa = data->get_tbipa(priv->map);
for_each_child_of_node(np, tbi) {
if (!strncmp(tbi->type, "tbi-phy", 8))
break;
}
if (tbi) {
const u32 *prop = of_get_property(tbi, "reg", NULL);
if (prop)
tbiaddr = *prop;
if (tbiaddr == -1) {
err = -EBUSY;
goto err_free_irqs;
} else {
out_be32(tbipa, tbiaddr);
out_be32(tbipa, be32_to_cpup(prop));
}
}
if (data->ucc_configure)
data->ucc_configure(res.start, res.end);
err = of_mdiobus_register(new_bus, np);
if (err) {
printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
new_bus->name);
goto err_free_irqs;
dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
new_bus->name);
goto error;
}
return 0;
err_free_irqs:
kfree(new_bus->irq);
err_unmap_regs:
iounmap(priv->map);
err_free_bus:
error:
if (priv->map)
iounmap(priv->map);
kfree(new_bus);
err_free_priv:
kfree(priv);
return err;
}
static int fsl_pq_mdio_remove(struct platform_device *ofdev)
static int fsl_pq_mdio_remove(struct platform_device *pdev)
{
struct device *device = &ofdev->dev;
struct device *device = &pdev->dev;
struct mii_bus *bus = dev_get_drvdata(device);
struct fsl_pq_mdio_priv *priv = bus->priv;
@ -406,41 +471,11 @@ static int fsl_pq_mdio_remove(struct platform_device *ofdev)
dev_set_drvdata(device, NULL);
iounmap(priv->map);
bus->priv = NULL;
mdiobus_free(bus);
kfree(priv);
return 0;
}
static struct of_device_id fsl_pq_mdio_match[] = {
{
.type = "mdio",
.compatible = "ucc_geth_phy",
},
{
.type = "mdio",
.compatible = "gianfar",
},
{
.compatible = "fsl,ucc-mdio",
},
{
.compatible = "fsl,gianfar-tbi",
},
{
.compatible = "fsl,gianfar-mdio",
},
{
.compatible = "fsl,etsec2-tbi",
},
{
.compatible = "fsl,etsec2-mdio",
},
{},
};
MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
static struct platform_driver fsl_pq_mdio_driver = {
.driver = {
.name = "fsl-pq_mdio",

View File

@ -1,52 +0,0 @@
/*
* Freescale PowerQUICC MDIO Driver -- MII Management Bus Implementation
* Driver for the MDIO bus controller on Freescale PowerQUICC processors
*
* Author: Andy Fleming
* Modifier: Sandeep Gopalpet
*
* Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifndef __FSL_PQ_MDIO_H
#define __FSL_PQ_MDIO_H
#define MIIMIND_BUSY 0x00000001
#define MIIMIND_NOTVALID 0x00000004
#define MIIMCFG_INIT_VALUE 0x00000007
#define MIIMCFG_RESET 0x80000000
#define MII_READ_COMMAND 0x00000001
struct fsl_pq_mdio {
u8 res1[16];
u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
u8 res2[4];
u32 emapm; /* MDIO Event mapping register (for etsec2)*/
u8 res3[1280];
u32 miimcfg; /* MII management configuration reg */
u32 miimcom; /* MII management command reg */
u32 miimadd; /* MII management address reg */
u32 miimcon; /* MII management control reg */
u32 miimstat; /* MII management status reg */
u32 miimind; /* MII management indication reg */
u8 reserved[28]; /* Space holder */
u32 utbipar; /* TBI phy address reg (only on UCC) */
u8 res4[2728];
} __packed;
int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
int regnum, u16 value);
int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, int mii_id, int regnum);
int __init fsl_pq_mdio_init(void);
void fsl_pq_mdio_exit(void);
void fsl_pq_mdio_bus_name(char *name, struct device_node *np);
#endif /* FSL_PQ_MDIO_H */

View File

@ -100,7 +100,6 @@
#include <linux/of_net.h>
#include "gianfar.h"
#include "fsl_pq_mdio.h"
#define TX_TIMEOUT (1*HZ)
@ -1041,7 +1040,7 @@ static int gfar_probe(struct platform_device *ofdev)
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->features |= NETIF_F_HW_VLAN_RX;
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {

View File

@ -42,7 +42,6 @@
#include <asm/machdep.h>
#include "ucc_geth.h"
#include "fsl_pq_mdio.h"
#undef DEBUG

View File

@ -0,0 +1,274 @@
/*
* QorIQ 10G MDIO Controller
*
* Copyright 2012 Freescale Semiconductor, Inc.
*
* Authors: Andy Fleming <afleming@freescale.com>
* Timur Tabi <timur@freescale.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/mdio.h>
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
/* Number of microseconds to wait for a register to respond */
#define TIMEOUT 1000
struct tgec_mdio_controller {
__be32 reserved[12];
__be32 mdio_stat; /* MDIO configuration and status */
__be32 mdio_ctl; /* MDIO control */
__be32 mdio_data; /* MDIO data */
__be32 mdio_addr; /* MDIO address */
} __packed;
#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
#define MDIO_STAT_BSY (1 << 0)
#define MDIO_STAT_RD_ER (1 << 1)
#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
#define MDIO_CTL_PRE_DIS (1 << 10)
#define MDIO_CTL_SCAN_EN (1 << 11)
#define MDIO_CTL_POST_INC (1 << 14)
#define MDIO_CTL_READ (1 << 15)
#define MDIO_DATA(x) (x & 0xffff)
#define MDIO_DATA_BSY (1 << 31)
/*
* Wait untill the MDIO bus is free
*/
static int xgmac_wait_until_free(struct device *dev,
struct tgec_mdio_controller __iomem *regs)
{
uint32_t status;
/* Wait till the bus is free */
status = spin_event_timeout(
!((in_be32(&regs->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0);
if (!status) {
dev_err(dev, "timeout waiting for bus to be free\n");
return -ETIMEDOUT;
}
return 0;
}
/*
* Wait till the MDIO read or write operation is complete
*/
static int xgmac_wait_until_done(struct device *dev,
struct tgec_mdio_controller __iomem *regs)
{
uint32_t status;
/* Wait till the MDIO write is complete */
status = spin_event_timeout(
!((in_be32(&regs->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0);
if (!status) {
dev_err(dev, "timeout waiting for operation to complete\n");
return -ETIMEDOUT;
}
return 0;
}
/*
* Write value to the PHY for this device to the register at regnum,waiting
* until the write is done before it returns. All PHY configuration has to be
* done through the TSEC1 MIIM regs.
*/
static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
{
struct tgec_mdio_controller __iomem *regs = bus->priv;
uint16_t dev_addr = regnum >> 16;
int ret;
/* Setup the MII Mgmt clock speed */
out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
ret = xgmac_wait_until_free(&bus->dev, regs);
if (ret)
return ret;
/* Set the port and dev addr */
out_be32(&regs->mdio_ctl,
MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
/* Set the register address */
out_be32(&regs->mdio_addr, regnum & 0xffff);
ret = xgmac_wait_until_free(&bus->dev, regs);
if (ret)
return ret;
/* Write the value to the register */
out_be32(&regs->mdio_data, MDIO_DATA(value));
ret = xgmac_wait_until_done(&bus->dev, regs);
if (ret)
return ret;
return 0;
}
/*
* Reads from register regnum in the PHY for device dev, returning the value.
* Clears miimcom first. All PHY configuration has to be done through the
* TSEC1 MIIM regs.
*/
static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
struct tgec_mdio_controller __iomem *regs = bus->priv;
uint16_t dev_addr = regnum >> 16;
uint32_t mdio_ctl;
uint16_t value;
int ret;
/* Setup the MII Mgmt clock speed */
out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
ret = xgmac_wait_until_free(&bus->dev, regs);
if (ret)
return ret;
/* Set the Port and Device Addrs */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
out_be32(&regs->mdio_ctl, mdio_ctl);
/* Set the register address */
out_be32(&regs->mdio_addr, regnum & 0xffff);
ret = xgmac_wait_until_free(&bus->dev, regs);
if (ret)
return ret;
/* Initiate the read */
out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
ret = xgmac_wait_until_done(&bus->dev, regs);
if (ret)
return ret;
/* Return all Fs if nothing was there */
if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
dev_err(&bus->dev, "MDIO read error\n");
return 0xffff;
}
value = in_be32(&regs->mdio_data) & 0xffff;
dev_dbg(&bus->dev, "read %04x\n", value);
return value;
}
/* Reset the MIIM registers, and wait for the bus to free */
static int xgmac_mdio_reset(struct mii_bus *bus)
{
struct tgec_mdio_controller __iomem *regs = bus->priv;
int ret;
mutex_lock(&bus->mdio_lock);
/* Setup the MII Mgmt clock speed */
out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
ret = xgmac_wait_until_free(&bus->dev, regs);
mutex_unlock(&bus->mdio_lock);
return ret;
}
static int __devinit xgmac_mdio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mii_bus *bus;
struct resource res;
int ret;
ret = of_address_to_resource(np, 0, &res);
if (ret) {
dev_err(&pdev->dev, "could not obtain address\n");
return ret;
}
bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int));
if (!bus)
return -ENOMEM;
bus->name = "Freescale XGMAC MDIO Bus";
bus->read = xgmac_mdio_read;
bus->write = xgmac_mdio_write;
bus->reset = xgmac_mdio_reset;
bus->irq = bus->priv;
bus->parent = &pdev->dev;
snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
/* Set the PHY base address */
bus->priv = of_iomap(np, 0);
if (!bus->priv) {
ret = -ENOMEM;
goto err_ioremap;
}
ret = of_mdiobus_register(bus, np);
if (ret) {
dev_err(&pdev->dev, "cannot register MDIO bus\n");
goto err_registration;
}
dev_set_drvdata(&pdev->dev, bus);
return 0;
err_registration:
iounmap(bus->priv);
err_ioremap:
mdiobus_free(bus);
return ret;
}
static int __devexit xgmac_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = dev_get_drvdata(&pdev->dev);
mdiobus_unregister(bus);
iounmap(bus->priv);
mdiobus_free(bus);
return 0;
}
static struct of_device_id xgmac_mdio_match[] = {
{
.compatible = "fsl,fman-xmdio",
},
{},
};
MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
static struct platform_driver xgmac_mdio_driver = {
.driver = {
.name = "fsl-fman_xmdio",
.of_match_table = xgmac_mdio_match,
},
.probe = xgmac_mdio_probe,
.remove = xgmac_mdio_remove,
};
module_platform_driver(xgmac_mdio_driver);
MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller");
MODULE_LICENSE("GPL v2");

View File

@ -653,7 +653,7 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
**/
static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
{
u16 data = er32(POEMB);
u32 data = er32(POEMB);
if (active)
data |= E1000_PHY_CTRL_D0A_LPLU;
@ -677,7 +677,7 @@ static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
**/
static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
{
u16 data = er32(POEMB);
u32 data = er32(POEMB);
if (!active) {
data &= ~E1000_PHY_CTRL_NOND0A_LPLU;

View File

@ -310,6 +310,7 @@ struct e1000_adapter {
*/
struct e1000_ring *tx_ring /* One per active queue */
____cacheline_aligned_in_smp;
u32 tx_fifo_limit;
struct napi_struct napi;

View File

@ -1942,7 +1942,8 @@ static int e1000_set_coalesce(struct net_device *netdev,
return -EINVAL;
if (ec->rx_coalesce_usecs == 4) {
adapter->itr = adapter->itr_setting = 4;
adapter->itr_setting = 4;
adapter->itr = adapter->itr_setting;
} else if (ec->rx_coalesce_usecs <= 3) {
adapter->itr = 20000;
adapter->itr_setting = ec->rx_coalesce_usecs;

View File

@ -56,7 +56,7 @@
#define DRV_EXTRAVERSION "-k"
#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION
#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@ -3446,7 +3446,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
/*
* if short on Rx space, Rx wins and must trump Tx
* adjustment or use Early Receive if available
* adjustment
*/
if (pba < min_rx_space)
pba = min_rx_space;
@ -3516,6 +3516,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
break;
}
/*
* Alignment of Tx data is on an arbitrary byte boundary with the
* maximum size per Tx descriptor limited only to the transmit
* allocation of the packet buffer minus 96 bytes with an upper
* limit of 24KB due to receive synchronization limitations.
*/
adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
24 << 10);
/*
* Disable Adaptive Interrupt Moderation if 2 full packets cannot
* fit in receive buffer.
@ -3746,6 +3755,10 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
e_dbg("icr is %08X\n", icr);
if (icr & E1000_ICR_RXSEQ) {
adapter->flags &= ~FLAG_MSI_TEST_FAILED;
/*
* Force memory writes to complete before acknowledging the
* interrupt is handled.
*/
wmb();
}
@ -3787,6 +3800,10 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
goto msi_test_failed;
}
/*
* Force memory writes to complete before enabling and firing an
* interrupt.
*/
wmb();
e1000_irq_enable(adapter);
@ -3798,7 +3815,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
rmb();
rmb(); /* read flags after interrupt has been fired */
if (adapter->flags & FLAG_MSI_TEST_FAILED) {
adapter->int_mode = E1000E_INT_MODE_LEGACY;
@ -4661,7 +4678,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
struct e1000_buffer *buffer_info;
unsigned int i;
u32 cmd_length = 0;
u16 ipcse = 0, tucse, mss;
u16 ipcse = 0, mss;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
if (!skb_is_gso(skb))
@ -4695,7 +4712,6 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
tucss = skb_transport_offset(skb);
tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
tucse = 0;
cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
@ -4709,7 +4725,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
context_desc->upper_setup.tcp_fields.tucss = tucss;
context_desc->upper_setup.tcp_fields.tucso = tucso;
context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
context_desc->upper_setup.tcp_fields.tucse = 0;
context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
context_desc->cmd_and_length = cpu_to_le32(cmd_length);
@ -4785,12 +4801,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
return 1;
}
#define E1000_MAX_PER_TXD 8192
#define E1000_MAX_TXD_PWR 12
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
unsigned int first, unsigned int max_per_txd,
unsigned int nr_frags, unsigned int mss)
unsigned int nr_frags)
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct pci_dev *pdev = adapter->pdev;
@ -5023,20 +5036,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
{
BUG_ON(size > tx_ring->count);
if (e1000_desc_unused(tx_ring) >= size)
return 0;
return __e1000_maybe_stop_tx(tx_ring, size);
}
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_ring *tx_ring = adapter->tx_ring;
unsigned int first;
unsigned int max_per_txd = E1000_MAX_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
unsigned int len = skb_headlen(skb);
unsigned int nr_frags;
@ -5056,18 +5068,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
}
mss = skb_shinfo(skb)->gso_size;
/*
* The controller does a simple calculation to
* make sure there is enough room in the FIFO before
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
* overrun the FIFO, adjust the max buffer len if mss
* drops.
*/
if (mss) {
u8 hdr_len;
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
/*
* TSO Workaround for 82571/2/3 Controllers -- if skb->data
@ -5097,12 +5099,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count++;
count++;
count += TXD_USE_COUNT(len, max_txd_pwr);
count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++)
count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
max_txd_pwr);
count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
adapter->tx_fifo_limit);
if (adapter->hw.mac.tx_pkt_filtering)
e1000_transfer_dhcp_info(adapter, skb);
@ -5144,15 +5146,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
tx_flags |= E1000_TX_FLAGS_NO_FCS;
/* if count is 0 then mapping error has occurred */
count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
nr_frags);
if (count) {
skb_tx_timestamp(skb);
netdev_sent_queue(netdev, skb->len);
e1000_tx_queue(tx_ring, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
e1000_maybe_stop_tx(tx_ring,
(MAX_SKB_FRAGS *
DIV_ROUND_UP(PAGE_SIZE,
adapter->tx_fifo_limit) + 2));
} else {
dev_kfree_skb_any(skb);
tx_ring->buffer_info[first].time_stamp = 0;
@ -6327,8 +6332,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->hw.phy.autoneg_advertised = 0x2f;
/* ring size defaults */
adapter->rx_ring->count = 256;
adapter->tx_ring->count = 256;
adapter->rx_ring->count = E1000_DEFAULT_RXD;
adapter->tx_ring->count = E1000_DEFAULT_TXD;
/*
* Initial Wake on LAN setting - If APM wake is enabled in

View File

@ -101,7 +101,9 @@ struct ixgbevf_ring {
/* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
#define IXGBEVF_RXBUFFER_2048 2048
#define IXGBEVF_RXBUFFER_3K 3072
#define IXGBEVF_RXBUFFER_7K 7168
#define IXGBEVF_RXBUFFER_15K 15360
#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256

View File

@ -1057,15 +1057,46 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
srrctl |= IXGBEVF_RXBUFFER_2048 >>
IXGBE_SRRCTL_BSIZEPKT_SHIFT;
else
srrctl |= rx_ring->rx_buf_len >>
IXGBE_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
IXGBE_SRRCTL_BSIZEPKT_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
}
static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i;
u16 rx_buf_len;
/* notify the PF of our intent to use this size of frame */
ixgbevf_rlpml_set_vf(hw, max_frame);
/* PF will allow an extra 4 bytes past for vlan tagged frames */
max_frame += VLAN_HLEN;
/*
* Make best use of allocation by using all but 1K of a
* power of 2 allocation that will be used for skb->head.
*/
if ((hw->mac.type == ixgbe_mac_X540_vf) &&
(max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
else if (max_frame <= IXGBEVF_RXBUFFER_3K)
rx_buf_len = IXGBEVF_RXBUFFER_3K;
else if (max_frame <= IXGBEVF_RXBUFFER_7K)
rx_buf_len = IXGBEVF_RXBUFFER_7K;
else if (max_frame <= IXGBEVF_RXBUFFER_15K)
rx_buf_len = IXGBEVF_RXBUFFER_15K;
else
rx_buf_len = IXGBEVF_MAX_RXBUFFER;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i].rx_buf_len = rx_buf_len;
}
/**
* ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
* @adapter: board private structure
@ -1076,18 +1107,14 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
{
u64 rdba;
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, j;
u32 rdlen;
int rx_buf_len;
/* PSRTYPE must be initialized in 82599 */
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
if (netdev->mtu <= ETH_DATA_LEN)
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
else
rx_buf_len = ALIGN(max_frame, 1024);
/* set_rx_buffer_len must be called before ring initialization */
ixgbevf_set_rx_buffer_len(adapter);
rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
@ -1103,7 +1130,6 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
adapter->rx_ring[i].head = IXGBE_VFRDH(j);
adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
adapter->rx_ring[i].rx_buf_len = rx_buf_len;
ixgbevf_configure_srrctl(adapter, j);
}
@ -1315,7 +1341,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
int i, j = 0;
int num_rx_rings = adapter->num_rx_queues;
u32 txdctl, rxdctl;
u32 msg[2];
for (i = 0; i < adapter->num_tx_queues; i++) {
j = adapter->tx_ring[i].reg_idx;
@ -1356,10 +1381,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
}
msg[0] = IXGBE_VF_SET_LPE;
msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
hw->mbx.ops.write_posted(hw, msg, 2);
spin_unlock(&adapter->mbx_lock);
clear_bit(__IXGBEVF_DOWN, &adapter->state);
@ -1866,6 +1887,22 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
return err;
}
/**
* ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
* @adapter: board private structure to clear interrupt scheme on
*
* We go through and clear interrupt specific resources and reset the structure
* to pre-load conditions
**/
static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
{
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
ixgbevf_free_q_vectors(adapter);
ixgbevf_reset_interrupt_capability(adapter);
}
/**
* ixgbevf_sw_init - Initialize general software structures
* (struct ixgbevf_adapter)
@ -2860,10 +2897,8 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
u32 msg[2];
if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
@ -2877,35 +2912,91 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
if (!netif_running(netdev)) {
msg[0] = IXGBE_VF_SET_LPE;
msg[1] = max_frame;
hw->mbx.ops.write_posted(hw, msg, 2);
}
if (netif_running(netdev))
ixgbevf_reinit_locked(adapter);
return 0;
}
static void ixgbevf_shutdown(struct pci_dev *pdev)
static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
#ifdef CONFIG_PM
int retval = 0;
#endif
netif_device_detach(netdev);
if (netif_running(netdev)) {
rtnl_lock();
ixgbevf_down(adapter);
ixgbevf_free_irq(adapter);
ixgbevf_free_all_tx_resources(adapter);
ixgbevf_free_all_rx_resources(adapter);
rtnl_unlock();
}
ixgbevf_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
if (retval)
return retval;
#endif
pci_disable_device(pdev);
return 0;
}
#ifdef CONFIG_PM
static int ixgbevf_resume(struct pci_dev *pdev)
{
struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
u32 err;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/*
* pci_restore_state clears dev->state_saved so call
* pci_save_state to restore it.
*/
pci_save_state(pdev);
pci_disable_device(pdev);
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
rtnl_lock();
err = ixgbevf_init_interrupt_scheme(adapter);
rtnl_unlock();
if (err) {
dev_err(&pdev->dev, "Cannot initialize interrupts\n");
return err;
}
ixgbevf_reset(adapter);
if (netif_running(netdev)) {
err = ixgbevf_open(netdev);
if (err)
return err;
}
netif_device_attach(netdev);
return err;
}
#endif /* CONFIG_PM */
static void ixgbevf_shutdown(struct pci_dev *pdev)
{
ixgbevf_suspend(pdev, PMSG_SUSPEND);
}
static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
@ -2946,7 +3037,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats;
}
static const struct net_device_ops ixgbe_netdev_ops = {
static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_open = ixgbevf_open,
.ndo_stop = ixgbevf_close,
.ndo_start_xmit = ixgbevf_xmit_frame,
@ -2962,7 +3053,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
{
dev->netdev_ops = &ixgbe_netdev_ops;
dev->netdev_ops = &ixgbevf_netdev_ops;
ixgbevf_set_ethtool_ops(dev);
dev->watchdog_timeo = 5 * HZ;
}
@ -3131,6 +3222,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
return 0;
err_register:
ixgbevf_clear_interrupt_scheme(adapter);
err_sw_init:
ixgbevf_reset_interrupt_capability(adapter);
iounmap(hw->hw_addr);
@ -3168,6 +3260,7 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
ixgbevf_clear_interrupt_scheme(adapter);
ixgbevf_reset_interrupt_capability(adapter);
iounmap(adapter->hw.hw_addr);
@ -3267,6 +3360,11 @@ static struct pci_driver ixgbevf_driver = {
.id_table = ixgbevf_pci_tbl,
.probe = ixgbevf_probe,
.remove = __devexit_p(ixgbevf_remove),
#ifdef CONFIG_PM
/* Power Management Hooks */
.suspend = ixgbevf_suspend,
.resume = ixgbevf_resume,
#endif
.shutdown = ixgbevf_shutdown,
.err_handler = &ixgbevf_err_handler
};

View File

@ -419,6 +419,20 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
return 0;
}
/**
* ixgbevf_rlpml_set_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
{
u32 msgbuf[2];
msgbuf[0] = IXGBE_VF_SET_LPE;
msgbuf[1] = max_size;
ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
}
static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.init_hw = ixgbevf_init_hw_vf,
.reset_hw = ixgbevf_reset_hw_vf,

View File

@ -170,5 +170,6 @@ struct ixgbevf_info {
const struct ixgbe_mac_operations *mac_ops;
};
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
#endif /* __IXGBE_VF_H__ */

View File

@ -3409,7 +3409,7 @@ static int nv_update_linkspeed(struct net_device *dev)
pause_flags = 0;
/* setup pause frame */
if (np->duplex != 0) {
if (netif_running(dev) && (np->duplex != 0)) {
if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
@ -4435,7 +4435,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
regs->version = FORCEDETH_REGS_VER;
spin_lock_irq(&np->lock);
for (i = 0; i <= np->register_size/sizeof(u32); i++)
for (i = 0; i < np->register_size/sizeof(u32); i++)
rbuf[i] = readl(base + i*sizeof(u32));
spin_unlock_irq(&np->lock);
}
@ -5455,6 +5455,7 @@ static int nv_close(struct net_device *dev)
netif_stop_queue(dev);
spin_lock_irq(&np->lock);
nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
nv_stop_rxtx(dev);
nv_txrx_reset(dev);
@ -5904,11 +5905,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
goto out_error;
}
netif_carrier_off(dev);
/* Some NICs freeze when TX pause is enabled while NIC is
* down, and this stays across warm reboots. The sequence
* below should be enough to recover from that state.
*/
nv_update_pause(dev, 0);
nv_start_tx(dev);
nv_stop_tx(dev);
if (id->driver_data & DEV_HAS_VLAN)
nv_vlan_mode(dev, dev->features);
netif_carrier_off(dev);
dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);

View File

@ -287,6 +287,8 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
{ PCI_VENDOR_ID_DLINK, 0x4300,
PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },

View File

@ -202,11 +202,21 @@ static void efx_stop_all(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
if ((efx->state == STATE_RUNNING) || \
if ((efx->state == STATE_READY) || \
(efx->state == STATE_DISABLED)) \
ASSERT_RTNL(); \
} while (0)
static int efx_check_disabled(struct efx_nic *efx)
{
if (efx->state == STATE_DISABLED) {
netif_err(efx, drv, efx->net_dev,
"device is disabled due to earlier errors\n");
return -EIO;
}
return 0;
}
/**************************************************************************
*
* Event queue processing
@ -630,6 +640,16 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_buffer_order = get_order(efx->rx_buffer_len +
sizeof(struct efx_rx_page_state));
/* We must keep at least one descriptor in a TX ring empty.
* We could avoid this when the queue size does not exactly
* match the hardware ring size, but it's not that important.
* Therefore we stop the queue when one more skb might fill
* the ring completely. We wake it when half way back to
* empty.
*/
efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
/* Initialise the channels */
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel)
@ -730,7 +750,11 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
u32 old_rxq_entries, old_txq_entries;
unsigned i, next_buffer_table = 0;
int rc = 0;
int rc;
rc = efx_check_disabled(efx);
if (rc)
return rc;
/* Not all channels should be reallocated. We must avoid
* reallocating their buffer table entries.
@ -1365,6 +1389,8 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
{
struct efx_channel *channel;
BUG_ON(efx->state == STATE_DISABLED);
if (efx->legacy_irq)
efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
@ -1382,6 +1408,9 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
{
struct efx_channel *channel;
if (efx->state == STATE_DISABLED)
return;
efx_mcdi_mode_poll(efx);
efx_nic_disable_interrupts(efx);
@ -1533,22 +1562,21 @@ static int efx_probe_all(struct efx_nic *efx)
return rc;
}
/* Called after previous invocation(s) of efx_stop_all, restarts the port,
* kernel transmit queues and NAPI processing, and ensures that the port is
* scheduled to be reconfigured. This function is safe to call multiple
* times when the NIC is in any state.
/* If the interface is supposed to be running but is not, start
* the hardware and software data path, regular activity for the port
* (MAC statistics, link polling, etc.) and schedule the port to be
* reconfigured. Interrupts must already be enabled. This function
* is safe to call multiple times, so long as the NIC is not disabled.
* Requires the RTNL lock.
*/
static void efx_start_all(struct efx_nic *efx)
{
EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->state == STATE_DISABLED);
/* Check that it is appropriate to restart the interface. All
* of these flags are safe to read under just the rtnl lock */
if (efx->port_enabled)
return;
if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
return;
if (!netif_running(efx->net_dev))
if (efx->port_enabled || !netif_running(efx->net_dev))
return;
efx_start_port(efx);
@ -1582,11 +1610,11 @@ static void efx_flush_all(struct efx_nic *efx)
cancel_work_sync(&efx->mac_work);
}
/* Quiesce hardware and software without bringing the link down.
* Safe to call multiple times, when the nic and interface is in any
* state. The caller is guaranteed to subsequently be in a position
* to modify any hardware and software state they see fit without
* taking locks. */
/* Quiesce the hardware and software data path, and regular activity
* for the port without bringing the link down. Safe to call multiple
* times with the NIC in almost any state, but interrupts should be
* enabled. Requires the RTNL lock.
*/
static void efx_stop_all(struct efx_nic *efx)
{
EFX_ASSERT_RESET_SERIALISED(efx);
@ -1739,8 +1767,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
struct efx_nic *efx = netdev_priv(net_dev);
struct mii_ioctl_data *data = if_mii(ifr);
EFX_ASSERT_RESET_SERIALISED(efx);
/* Convert phy_id from older PRTAD/DEVAD format */
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
(data->phy_id & 0xfc00) == 0x0400)
@ -1820,13 +1846,14 @@ static void efx_netpoll(struct net_device *net_dev)
static int efx_net_open(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
EFX_ASSERT_RESET_SERIALISED(efx);
int rc;
netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
raw_smp_processor_id());
if (efx->state == STATE_DISABLED)
return -EIO;
rc = efx_check_disabled(efx);
if (rc)
return rc;
if (efx->phy_mode & PHY_MODE_SPECIAL)
return -EBUSY;
if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
@ -1852,10 +1879,8 @@ static int efx_net_stop(struct net_device *net_dev)
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id());
if (efx->state != STATE_DISABLED) {
/* Stop the device and flush all the channels */
efx_stop_all(efx);
}
/* Stop the device and flush all the channels */
efx_stop_all(efx);
return 0;
}
@ -1915,9 +1940,11 @@ static void efx_watchdog(struct net_device *net_dev)
static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
rc = efx_check_disabled(efx);
if (rc)
return rc;
if (new_mtu > EFX_MAX_MTU)
return -EINVAL;
@ -1926,8 +1953,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
mutex_lock(&efx->mac_lock);
/* Reconfigure the MAC before enabling the dma queues so that
* the RX buffers don't overflow */
net_dev->mtu = new_mtu;
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
@ -1942,8 +1967,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
struct sockaddr *addr = data;
char *new_addr = addr->sa_data;
EFX_ASSERT_RESET_SERIALISED(efx);
if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev,
"invalid ethernet MAC address requested: %pM\n",
@ -2079,11 +2102,27 @@ static int efx_register_netdev(struct efx_nic *efx)
rtnl_lock();
/* Enable resets to be scheduled and check whether any were
* already requested. If so, the NIC is probably hosed so we
* abort.
*/
efx->state = STATE_READY;
smp_mb(); /* ensure we change state before checking reset_pending */
if (efx->reset_pending) {
netif_err(efx, probe, efx->net_dev,
"aborting probe due to scheduled reset\n");
rc = -EIO;
goto fail_locked;
}
rc = dev_alloc_name(net_dev, net_dev->name);
if (rc < 0)
goto fail_locked;
efx_update_name(efx);
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(net_dev);
rc = register_netdevice(net_dev);
if (rc)
goto fail_locked;
@ -2094,9 +2133,6 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_init_tx_queue_core_txq(tx_queue);
}
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(net_dev);
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@ -2108,14 +2144,14 @@ static int efx_register_netdev(struct efx_nic *efx)
return 0;
fail_registered:
rtnl_lock();
unregister_netdevice(net_dev);
fail_locked:
efx->state = STATE_UNINIT;
rtnl_unlock();
netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
return rc;
fail_registered:
unregister_netdev(net_dev);
return rc;
}
static void efx_unregister_netdev(struct efx_nic *efx)
@ -2138,7 +2174,11 @@ static void efx_unregister_netdev(struct efx_nic *efx)
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
rtnl_lock();
unregister_netdevice(efx->net_dev);
efx->state = STATE_UNINIT;
rtnl_unlock();
}
/**************************************************************************
@ -2154,9 +2194,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
EFX_ASSERT_RESET_SERIALISED(efx);
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
efx_stop_interrupts(efx, false);
mutex_lock(&efx->mac_lock);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
efx->phy_op->fini(efx);
efx->type->fini(efx);
@ -2276,16 +2316,15 @@ static void efx_reset_work(struct work_struct *data)
if (!pending)
return;
/* If we're not RUNNING then don't reset. Leave the reset_pending
* flags set so that efx_pci_probe_main will be retried */
if (efx->state != STATE_RUNNING) {
netif_info(efx, drv, efx->net_dev,
"scheduled reset quenched. NIC not RUNNING\n");
return;
}
rtnl_lock();
(void)efx_reset(efx, fls(pending) - 1);
/* We checked the state in efx_schedule_reset() but it may
* have changed by now. Now that we have the RTNL lock,
* it cannot change again.
*/
if (efx->state == STATE_READY)
(void)efx_reset(efx, fls(pending) - 1);
rtnl_unlock();
}
@ -2311,6 +2350,13 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
}
set_bit(method, &efx->reset_pending);
smp_mb(); /* ensure we change reset_pending before checking state */
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
if (ACCESS_ONCE(efx->state) != STATE_READY)
return;
/* efx_process_channel() will no longer read events once a
* reset is scheduled. So switch back to poll'd MCDI completions. */
@ -2376,13 +2422,12 @@ static const struct efx_phy_operations efx_dummy_phy_operations = {
/* This zeroes out and then fills in the invariants in a struct
* efx_nic (including all sub-structures).
*/
static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
static int efx_init_struct(struct efx_nic *efx,
struct pci_dev *pci_dev, struct net_device *net_dev)
{
int i;
/* Initialise common structures */
memset(efx, 0, sizeof(*efx));
spin_lock_init(&efx->biu_lock);
#ifdef CONFIG_SFC_MTD
INIT_LIST_HEAD(&efx->mtd_list);
@ -2392,7 +2437,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_INIT;
efx->state = STATE_UNINIT;
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
@ -2409,8 +2454,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
goto fail;
}
efx->type = type;
EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
/* Higher numbered interrupt modes are less capable! */
@ -2455,6 +2498,12 @@ static void efx_fini_struct(struct efx_nic *efx)
*/
static void efx_pci_remove_main(struct efx_nic *efx)
{
/* Flush reset_work. It can no longer be scheduled since we
* are not READY.
*/
BUG_ON(efx->state == STATE_READY);
cancel_work_sync(&efx->reset_work);
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL;
@ -2480,24 +2529,15 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Mark the NIC as fini, then stop the interface */
rtnl_lock();
efx->state = STATE_FINI;
dev_close(efx->net_dev);
/* Allow any queued efx_resets() to complete */
efx_stop_interrupts(efx, false);
rtnl_unlock();
efx_stop_interrupts(efx, false);
efx_sriov_fini(efx);
efx_unregister_netdev(efx);
efx_mtd_remove(efx);
/* Wait for any scheduled resets to complete. No more will be
* scheduled from this point because efx_stop_all() has been
* called, we are no longer registered with driverlink, and
* the net_device's have been removed. */
cancel_work_sync(&efx->reset_work);
efx_pci_remove_main(efx);
efx_fini_io(efx);
@ -2617,7 +2657,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
struct net_device *net_dev;
struct efx_nic *efx;
int rc;
@ -2627,10 +2666,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
EFX_MAX_RX_QUEUES);
if (!net_dev)
return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG |
efx = netdev_priv(net_dev);
efx->type = (const struct efx_nic_type *) entry->driver_data;
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO |
NETIF_F_RXCSUM);
if (type->offload_features & NETIF_F_V6_CSUM)
if (efx->type->offload_features & NETIF_F_V6_CSUM)
net_dev->features |= NETIF_F_TSO6;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
@ -2638,10 +2679,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
NETIF_F_RXCSUM);
/* All offloads can be toggled */
net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
efx = netdev_priv(net_dev);
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
rc = efx_init_struct(efx, type, pci_dev, net_dev);
rc = efx_init_struct(efx, pci_dev, net_dev);
if (rc)
goto fail1;
@ -2656,28 +2696,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
goto fail2;
rc = efx_pci_probe_main(efx);
/* Serialise against efx_reset(). No more resets will be
* scheduled since efx_stop_all() has been called, and we have
* not and never have been registered.
*/
cancel_work_sync(&efx->reset_work);
if (rc)
goto fail3;
/* If there was a scheduled reset during probe, the NIC is
* probably hosed anyway.
*/
if (efx->reset_pending) {
rc = -EIO;
goto fail4;
}
/* Switch to the running state before we expose the device to the OS,
* so that dev_open()|efx_start_all() will actually start the device */
efx->state = STATE_RUNNING;
rc = efx_register_netdev(efx);
if (rc)
goto fail4;
@ -2717,12 +2738,18 @@ static int efx_pm_freeze(struct device *dev)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
efx->state = STATE_FINI;
rtnl_lock();
netif_device_detach(efx->net_dev);
if (efx->state != STATE_DISABLED) {
efx->state = STATE_UNINIT;
efx_stop_all(efx);
efx_stop_interrupts(efx, false);
netif_device_detach(efx->net_dev);
efx_stop_all(efx);
efx_stop_interrupts(efx, false);
}
rtnl_unlock();
return 0;
}
@ -2731,21 +2758,25 @@ static int efx_pm_thaw(struct device *dev)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
efx->state = STATE_INIT;
rtnl_lock();
efx_start_interrupts(efx, false);
if (efx->state != STATE_DISABLED) {
efx_start_interrupts(efx, false);
mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx);
mutex_unlock(&efx->mac_lock);
mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
efx_start_all(efx);
netif_device_attach(efx->net_dev);
netif_device_attach(efx->net_dev);
efx->state = STATE_RUNNING;
efx->state = STATE_READY;
efx->type->resume_wol(efx);
efx->type->resume_wol(efx);
}
rtnl_unlock();
/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
queue_work(reset_workqueue, &efx->reset_work);

View File

@ -529,9 +529,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (!efx_tests)
goto fail;
ASSERT_RTNL();
if (efx->state != STATE_RUNNING) {
if (efx->state != STATE_READY) {
rc = -EIO;
goto fail1;
}
@ -863,8 +861,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
&ip_entry->ip4dst, &ip_entry->pdst);
if (rc != 0) {
rc = efx_filter_get_ipv4_full(
&spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
&ip_entry->ip4dst, &ip_entry->pdst);
&spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
&ip_entry->ip4src, &ip_entry->psrc);
EFX_WARN_ON_PARANOID(rc);
ip_mask->ip4src = ~0;
ip_mask->psrc = ~0;

View File

@ -380,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
new_mode = PHY_MODE_SPECIAL;
if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
err = 0;
} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
} else if (efx->state != STATE_READY || netif_running(efx->net_dev)) {
err = -EBUSY;
} else {
/* Reset the PHY, reconfigure the MAC and enable/disable

View File

@ -91,29 +91,31 @@ struct efx_special_buffer {
};
/**
* struct efx_tx_buffer - An Efx TX buffer
* @skb: The associated socket buffer.
* Set only on the final fragment of a packet; %NULL for all other
* fragments. When this fragment completes, then we can free this
* skb.
* @tsoh: The associated TSO header structure, or %NULL if this
* buffer is not a TSO header.
* struct efx_tx_buffer - buffer state for a TX descriptor
* @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
* freed when descriptor completes
* @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
* freed when descriptor completes.
* @dma_addr: DMA address of the fragment.
* @flags: Flags for allocation and DMA mapping type
* @len: Length of this fragment.
* This field is zero when the queue slot is empty.
* @continuation: True if this fragment is not the end of a packet.
* @unmap_single: True if dma_unmap_single should be used.
* @unmap_len: Length of this fragment to unmap
*/
struct efx_tx_buffer {
const struct sk_buff *skb;
struct efx_tso_header *tsoh;
union {
const struct sk_buff *skb;
void *heap_buf;
};
dma_addr_t dma_addr;
unsigned short flags;
unsigned short len;
bool continuation;
bool unmap_single;
unsigned short unmap_len;
};
#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
/**
* struct efx_tx_queue - An Efx TX queue
@ -133,6 +135,7 @@ struct efx_tx_buffer {
* @channel: The associated channel
* @core_txq: The networking core TX queue structure
* @buffer: The software buffer ring
* @tsoh_page: Array of pages of TSO header buffers
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
* @initialised: Has hardware queue been initialised?
@ -156,9 +159,6 @@ struct efx_tx_buffer {
* variable indicates that the queue is full. This is to
* avoid cache-line ping-pong between the xmit path and the
* completion path.
* @tso_headers_free: A list of TSO headers allocated for this TX queue
* that are not in use, and so available for new TSO sends. The list
* is protected by the TX queue lock.
* @tso_bursts: Number of times TSO xmit invoked by kernel
* @tso_long_headers: Number of packets with headers too long for standard
* blocks
@ -175,6 +175,7 @@ struct efx_tx_queue {
struct efx_channel *channel;
struct netdev_queue *core_txq;
struct efx_tx_buffer *buffer;
struct efx_buffer *tsoh_page;
struct efx_special_buffer txd;
unsigned int ptr_mask;
bool initialised;
@ -187,7 +188,6 @@ struct efx_tx_queue {
unsigned int insert_count ____cacheline_aligned_in_smp;
unsigned int write_count;
unsigned int old_read_count;
struct efx_tso_header *tso_headers_free;
unsigned int tso_bursts;
unsigned int tso_long_headers;
unsigned int tso_packets;
@ -430,11 +430,9 @@ enum efx_int_mode {
#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
enum nic_state {
STATE_INIT = 0,
STATE_RUNNING = 1,
STATE_FINI = 2,
STATE_DISABLED = 3,
STATE_MAX,
STATE_UNINIT = 0, /* device being probed/removed or is frozen */
STATE_READY = 1, /* hardware ready and netdev registered */
STATE_DISABLED = 2, /* device disabled due to hardware errors */
};
/*
@ -654,7 +652,7 @@ struct vfdi_status;
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
* @irq_rx_moderation: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags
* @state: Device state flag. Serialised by the rtnl_lock.
* @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
* @reset_pending: Bitmask for pending resets
* @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues
@ -664,6 +662,8 @@ struct vfdi_status;
* should be allocated for this NIC
* @rxq_entries: Size of receive queues requested by user.
* @txq_entries: Size of transmit queues requested by user.
* @txq_stop_thresh: TX queue fill level at or above which we stop it.
* @txq_wake_thresh: TX queue fill level at or below which we wake it.
* @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
* @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
* @sram_lim_qw: Qword address limit of SRAM
@ -774,6 +774,9 @@ struct efx_nic {
unsigned rxq_entries;
unsigned txq_entries;
unsigned int txq_stop_thresh;
unsigned int txq_wake_thresh;
unsigned tx_dc_base;
unsigned rx_dc_base;
unsigned sram_lim_qw;

View File

@ -298,7 +298,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
/**************************************************************************
*
* Generic buffer handling
* These buffers are used for interrupt status and MAC stats
* These buffers are used for interrupt status, MAC stats, etc.
*
**************************************************************************/
@ -401,8 +401,10 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
++tx_queue->write_count;
/* Create TX descriptor ring entry */
BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
EFX_POPULATE_QWORD_4(*txd,
FSF_AZ_TX_KER_CONT, buffer->continuation,
FSF_AZ_TX_KER_CONT,
buffer->flags & EFX_TX_BUF_CONT,
FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
FSF_AZ_TX_KER_BUF_REGION, 0,
FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);

File diff suppressed because it is too large Load Diff

View File

@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
#ifndef __COMMON_H__
#define __COMMON_H__
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops;
#endif /* __COMMON_H__ */

View File

@ -20,6 +20,10 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
#ifndef __DESCS_H__
#define __DESCS_H__
struct dma_desc {
/* Receive descriptor */
union {
@ -166,3 +170,5 @@ enum tdes_csum_insertion {
* is not calculated */
cic_full = 3, /* IP header and pseudoheader */
};
#endif /* __DESCS_H__ */

View File

@ -27,6 +27,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
#ifndef __DESC_COM_H__
#define __DESC_COM_H__
#if defined(CONFIG_STMMAC_RING)
static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
{
@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
p->des01.tx.buffer1_size = len;
}
#endif
#endif /* __DESC_COM_H__ */

View File

@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
#ifndef __DWMAC100_H__
#define __DWMAC100_H__
#include <linux/phy.h>
#include "common.h"
@ -119,3 +122,5 @@ enum ttc_control {
#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
extern const struct stmmac_dma_ops dwmac100_dma_ops;
#endif /* __DWMAC100_H__ */

View File

@ -19,6 +19,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
#ifndef __DWMAC1000_H__
#define __DWMAC1000_H__
#include <linux/phy.h>
#include "common.h"
@ -229,6 +231,7 @@ enum rtc_control {
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
/* Synopsys Core versions */
#define DWMAC_CORE_3_40 34
#define DWMAC_CORE_3_40 0x34
extern const struct stmmac_dma_ops dwmac1000_dma_ops;
#endif /* __DWMAC1000_H__ */

View File

@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
#ifndef __DWMAC_DMA_H__
#define __DWMAC_DMA_H__
/* DMA CRS Control and Status Register Mapping */
#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr);
extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
extern int dwmac_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x);
#endif /* __DWMAC_DMA_H__ */

View File

@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
#ifndef __MMC_H__
#define __MMC_H__
/* MMC control register */
/* When set, all counter are reset */
#define MMC_CNTRL_COUNTER_RESET 0x1
@ -129,3 +132,5 @@ struct stmmac_counters {
extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
#endif /* __MMC_H__ */

View File

@ -33,7 +33,7 @@
#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
#define MMC_DEFAUL_MASK 0xffffffff
#define MMC_DEFAULT_MASK 0xffffffff
/* MMC TX counter registers */
@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
/* To mask all all interrupts.*/
void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
{
writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK);
writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK);
writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
}
/* This reads the MAC core counters (if actaully supported).

View File

@ -20,6 +20,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
#ifndef __STMMAC_H__
#define __STMMAC_H__
#define STMMAC_RESOURCE_NAME "stmmaceth"
#define DRV_MODULE_VERSION "March_2012"
@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void)
{
}
#endif /* CONFIG_STMMAC_PCI */
#endif /* __STMMAC_H__ */

View File

@ -177,7 +177,7 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->write = &stmmac_mdio_write;
new_bus->reset = &stmmac_mdio_reset;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
new_bus->name, mdio_bus_data->bus_id);
new_bus->name, priv->plat->bus_id);
new_bus->priv = ndev;
new_bus->irq = irqlist;
new_bus->phy_mask = mdio_bus_data->phy_mask;
@ -213,12 +213,10 @@ int stmmac_mdio_register(struct net_device *ndev)
* and no PHY number was provided to the MAC,
* use the one probed here.
*/
if ((priv->plat->bus_id == mdio_bus_data->bus_id) &&
(priv->plat->phy_addr == -1))
if (priv->plat->phy_addr == -1)
priv->plat->phy_addr = addr;
act = (priv->plat->bus_id == mdio_bus_data->bus_id) &&
(priv->plat->phy_addr == addr);
act = (priv->plat->phy_addr == addr);
switch (phydev->irq) {
case PHY_POLL:
irq_str = "POLL";
@ -258,6 +256,9 @@ int stmmac_mdio_unregister(struct net_device *ndev)
{
struct stmmac_priv *priv = netdev_priv(ndev);
if (!priv->mii)
return 0;
mdiobus_unregister(priv->mii);
priv->mii->priv = NULL;
mdiobus_free(priv->mii);

View File

@ -40,7 +40,6 @@ static void stmmac_default_data(void)
plat_dat.has_gmac = 1;
plat_dat.force_sf_dma_mode = 1;
mdio_data.bus_id = 1;
mdio_data.phy_reset = NULL;
mdio_data.phy_mask = 0;
plat_dat.mdio_bus_data = &mdio_data;

View File

@ -78,6 +78,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
{
int ret = 0;
struct resource *res;
struct device *dev = &pdev->dev;
void __iomem *addr = NULL;
struct stmmac_priv *priv = NULL;
struct plat_stmmacenet_data *plat_dat = NULL;
@ -87,18 +88,10 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
pr_err("%s: ERROR: memory allocation failed"
"cannot get the I/O addr 0x%x\n",
__func__, (unsigned int)res->start);
return -EBUSY;
}
addr = ioremap(res->start, resource_size(res));
addr = devm_request_and_ioremap(dev, res);
if (!addr) {
pr_err("%s: ERROR: memory mapping failed", __func__);
ret = -ENOMEM;
goto out_release_region;
return -ENOMEM;
}
if (pdev->dev.of_node) {
@ -107,14 +100,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!plat_dat) {
pr_err("%s: ERROR: no memory", __func__);
ret = -ENOMEM;
goto out_unmap;
return -ENOMEM;
}
ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
if (ret) {
pr_err("%s: main dt probe failed", __func__);
goto out_unmap;
return ret;
}
} else {
plat_dat = pdev->dev.platform_data;
@ -124,13 +116,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
if (plat_dat->init) {
ret = plat_dat->init(pdev);
if (unlikely(ret))
goto out_unmap;
return ret;
}
priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
if (!priv) {
pr_err("%s: main driver probe failed", __func__);
goto out_unmap;
return -ENODEV;
}
/* Get MAC address if available (DT) */
@ -142,8 +134,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
if (priv->dev->irq == -ENXIO) {
pr_err("%s: ERROR: MAC IRQ configuration "
"information not found\n", __func__);
ret = -ENXIO;
goto out_unmap;
return -ENXIO;
}
/*
@ -165,15 +156,6 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
pr_debug("STMMAC platform driver registration completed");
return 0;
out_unmap:
iounmap(addr);
platform_set_drvdata(pdev, NULL);
out_release_region:
release_mem_region(res->start, resource_size(res));
return ret;
}
/**
@ -186,7 +168,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
struct resource *res;
int ret = stmmac_dvr_remove(ndev);
if (priv->plat->exit)
@ -194,10 +175,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
iounmap((void __force __iomem *)priv->ioaddr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
return ret;
}

View File

@ -21,6 +21,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
#ifndef __STMMAC_TIMER_H__
#define __STMMAC_TIMER_H__
struct stmmac_timer {
void (*timer_start) (unsigned int new_freq);
@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev);
extern int tmu2_register_user(void *fnt, void *data);
extern void tmu2_unregister_user(void);
#endif
#endif /* __STMMAC_TIMER_H__ */

View File

@ -1358,7 +1358,6 @@ static int tsi108_open(struct net_device *dev)
break;
}
data->rxskbs[i] = skb;
data->rxskbs[i] = skb;
data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;

View File

@ -637,8 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev)
if (data && is_valid_ether_addr(data->mac_addr)) {
memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
} else {
eth_random_addr(ndev->dev_addr);
ndev->addr_assign_type |= NET_ADDR_RANDOM;
eth_hw_addr_random(ndev);
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);

View File

@ -557,8 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev)
if (data && is_valid_ether_addr(data->mac_addr)) {
memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
} else {
eth_random_addr(ndev->dev_addr);
ndev->addr_assign_type |= NET_ADDR_RANDOM;
eth_hw_addr_random(ndev);
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);

View File

@ -952,17 +952,7 @@ static struct spi_driver at86rf230_driver = {
.resume = at86rf230_resume,
};
static int __init at86rf230_init(void)
{
return spi_register_driver(&at86rf230_driver);
}
module_init(at86rf230_init);
static void __exit at86rf230_exit(void)
{
spi_unregister_driver(&at86rf230_driver);
}
module_exit(at86rf230_exit);
module_spi_driver(at86rf230_driver);
MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
MODULE_LICENSE("GPL v2");

View File

@ -446,4 +446,3 @@ static __exit void fake_exit(void)
module_init(fake_init);
module_exit(fake_exit);
MODULE_LICENSE("GPL");

View File

@ -159,6 +159,19 @@ config MDIO_BUS_MUX_GPIO
several child MDIO busses to a parent bus. Child bus
selection is under the control of GPIO lines.
config MDIO_BUS_MUX_MMIOREG
tristate "Support for MMIO device-controlled MDIO bus multiplexers"
depends on OF_MDIO
select MDIO_BUS_MUX
help
This module provides a driver for MDIO bus multiplexers that
are controlled via a simple memory-mapped device, like an FPGA.
The multiplexer connects one of several child MDIO busses to a
parent bus. Child bus selection is under the control of one of
the FPGA's registers.
Currently, only 8-bit registers are supported.
endif # PHYLIB
config MICREL_KS8995MA

View File

@ -28,3 +28,4 @@ obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o

Some files were not shown because too many files have changed in this diff Show More