Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Trivial conflict in CAN on file rename.

Conflicts:
	drivers/net/can/m_can/tcan4x5x-core.c

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-01-08 13:28:00 -08:00
commit 833d22f2f9
112 changed files with 1305 additions and 479 deletions

View File

@ -19,7 +19,9 @@ description: |
properties:
compatible:
enum:
- nxp,pf8x00
- nxp,pf8100
- nxp,pf8121a
- nxp,pf8200
reg:
maxItems: 1
@ -118,7 +120,7 @@ examples:
#size-cells = <0>;
pmic@8 {
compatible = "nxp,pf8x00";
compatible = "nxp,pf8100";
reg = <0x08>;
regulators {

View File

@ -44,6 +44,7 @@ First Level Nodes - PMIC
Definition: Must be one of below:
"qcom,pm8005-rpmh-regulators"
"qcom,pm8009-rpmh-regulators"
"qcom,pm8009-1-rpmh-regulators"
"qcom,pm8150-rpmh-regulators"
"qcom,pm8150l-rpmh-regulators"
"qcom,pm8350-rpmh-regulators"

View File

@ -164,46 +164,56 @@ Devlink health reporters
NPA Reporters
-------------
The NPA reporters are responsible for reporting and recovering the following group of errors
The NPA reporters are responsible for reporting and recovering the following group of errors:
1. GENERAL events
- Error due to operation of unmapped PF.
- Error due to disabled alloc/free for other HW blocks (NIX, SSO, TIM, DPI and AURA).
2. ERROR events
- Fault due to NPA_AQ_INST_S read or NPA_AQ_RES_S write.
- AQ Doorbell Error.
3. RAS events
- RAS Error Reporting for NPA_AQ_INST_S/NPA_AQ_RES_S.
4. RVU events
- Error due to unmapped slot.
Sample Output
-------------
~# devlink health
pci/0002:01:00.0:
reporter hw_npa_intr
state healthy error 2872 recover 2872 last_dump_date 2020-12-10 last_dump_time 09:39:09 grace_period 0 auto_recover true auto_dump true
reporter hw_npa_gen
state healthy error 2872 recover 2872 last_dump_date 2020-12-11 last_dump_time 04:43:04 grace_period 0 auto_recover true auto_dump true
reporter hw_npa_err
state healthy error 2871 recover 2871 last_dump_date 2020-12-10 last_dump_time 09:39:17 grace_period 0 auto_recover true auto_dump true
reporter hw_npa_ras
state healthy error 0 recover 0 last_dump_date 2020-12-10 last_dump_time 09:32:40 grace_period 0 auto_recover true auto_dump true
Sample Output::
~# devlink health
pci/0002:01:00.0:
reporter hw_npa_intr
state healthy error 2872 recover 2872 last_dump_date 2020-12-10 last_dump_time 09:39:09 grace_period 0 auto_recover true auto_dump true
reporter hw_npa_gen
state healthy error 2872 recover 2872 last_dump_date 2020-12-11 last_dump_time 04:43:04 grace_period 0 auto_recover true auto_dump true
reporter hw_npa_err
state healthy error 2871 recover 2871 last_dump_date 2020-12-10 last_dump_time 09:39:17 grace_period 0 auto_recover true auto_dump true
reporter hw_npa_ras
state healthy error 0 recover 0 last_dump_date 2020-12-10 last_dump_time 09:32:40 grace_period 0 auto_recover true auto_dump true
Each reporter dumps the
- Error Type
- Error Register value
- Reason in words
For eg:
~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_gen
NPA_AF_GENERAL:
NPA General Interrupt Reg : 1
NIX0: free disabled RX
~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_intr
NPA_AF_RVU:
NPA RVU Interrupt Reg : 1
Unmap Slot Error
~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_err
NPA_AF_ERR:
NPA Error Interrupt Reg : 4096
AQ Doorbell Error
For example::
~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_gen
NPA_AF_GENERAL:
NPA General Interrupt Reg : 1
NIX0: free disabled RX
~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_intr
NPA_AF_RVU:
NPA RVU Interrupt Reg : 1
Unmap Slot Error
~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_err
NPA_AF_ERR:
NPA Error Interrupt Reg : 4096
AQ Doorbell Error

View File

@ -64,8 +64,8 @@ ndo_do_ioctl:
Context: process
ndo_get_stats:
Synchronization: dev_base_lock rwlock.
Context: nominally process, but don't sleep inside an rwlock
Synchronization: rtnl_lock() semaphore, dev_base_lock rwlock, or RCU.
Context: atomic (can't sleep under rwlock or RCU)
ndo_start_xmit:
Synchronization: __netif_tx_lock spinlock.

View File

@ -10848,7 +10848,7 @@ F: drivers/media/radio/radio-maxiradio*
MCAN MMIO DEVICE DRIVER
M: Dan Murphy <dmurphy@ti.com>
M: Sriram Dash <sriram.dash@samsung.com>
M: Pankaj Sharma <pankj.sharma@samsung.com>
L: linux-can@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/can/bosch,m_can.yaml

View File

@ -60,6 +60,7 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
chacha_block_xor_neon(state, d, s, nrounds);
if (d != dst)
memcpy(dst, buf, bytes);
state[12]++;
}
}

View File

@ -39,7 +39,8 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
struct ecdh params;
unsigned int ndigits;
if (crypto_ecdh_decode_key(buf, len, &params) < 0)
if (crypto_ecdh_decode_key(buf, len, &params) < 0 ||
params.key_size > sizeof(ctx->private_key))
return -EINVAL;
ndigits = ecdh_supported_curve(params.curve_id);

View File

@ -582,8 +582,12 @@ void regmap_debugfs_init(struct regmap *map)
devname = dev_name(map->dev);
if (name) {
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
if (!map->debugfs_name) {
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
devname, name);
if (!map->debugfs_name)
return;
}
name = map->debugfs_name;
} else {
name = devname;
@ -591,9 +595,10 @@ void regmap_debugfs_init(struct regmap *map)
if (!strcmp(name, "dummy")) {
kfree(map->debugfs_name);
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
dummy_index);
if (!map->debugfs_name)
return;
name = map->debugfs_name;
dummy_index++;
}

View File

@ -13,6 +13,7 @@ if MISDN != n
config MISDN_DSP
tristate "Digital Audio Processing of transparent data"
depends on MISDN
select BITREVERSE
help
Enable support for digital audio processing capability.

View File

@ -645,11 +645,20 @@ static int bareudp_link_config(struct net_device *dev,
return 0;
}
static void bareudp_dellink(struct net_device *dev, struct list_head *head)
{
struct bareudp_dev *bareudp = netdev_priv(dev);
list_del(&bareudp->next);
unregister_netdevice_queue(dev, head);
}
static int bareudp_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct bareudp_conf conf;
LIST_HEAD(list_kill);
int err;
err = bareudp2info(data, &conf, extack);
@ -662,17 +671,14 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
err = bareudp_link_config(dev, tb);
if (err)
return err;
goto err_unconfig;
return 0;
}
static void bareudp_dellink(struct net_device *dev, struct list_head *head)
{
struct bareudp_dev *bareudp = netdev_priv(dev);
list_del(&bareudp->next);
unregister_netdevice_queue(dev, head);
err_unconfig:
bareudp_dellink(dev, &list_kill);
unregister_netdevice_many(&list_kill);
return err;
}
static size_t bareudp_get_size(const struct net_device *dev)

View File

@ -123,6 +123,7 @@ config CAN_JANZ_ICAN3
config CAN_KVASER_PCIEFD
depends on PCI
tristate "Kvaser PCIe FD cards"
select CRC32
help
This is a driver for the Kvaser PCI Express CAN FD family.

View File

@ -1852,8 +1852,6 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
unregister_candev(cdev->net);
m_can_clk_stop(cdev);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);

View File

@ -108,30 +108,6 @@ static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev)
}
static struct can_bittiming_const tcan4x5x_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 2,
.tseg1_max = 31,
.tseg2_min = 2,
.tseg2_max = 16,
.sjw_max = 16,
.brp_min = 1,
.brp_max = 32,
.brp_inc = 1,
};
static struct can_bittiming_const tcan4x5x_data_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 32,
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
.brp_min = 1,
.brp_max = 32,
.brp_inc = 1,
};
static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
{
int wake_state = 0;
@ -373,8 +349,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
mcan_class->dev = &spi->dev;
mcan_class->ops = &tcan4x5x_ops;
mcan_class->is_peripheral = true;
mcan_class->bit_timing = &tcan4x5x_bittiming_const;
mcan_class->data_timing = &tcan4x5x_data_bittiming_const;
mcan_class->net->irq = spi->irq;
spi_set_drvdata(spi, priv);

View File

@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
config CAN_RCAR
tristate "Renesas R-Car CAN controller"
tristate "Renesas R-Car and RZ/G CAN controller"
depends on ARCH_RENESAS || ARM
help
Say Y here if you want to use CAN controller found on Renesas R-Car
SoCs.
or RZ/G SoCs.
To compile this driver as a module, choose M here: the module will
be called rcar_can.

View File

@ -1368,13 +1368,10 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
struct spi_transfer *last_xfer;
tx_ring->tail += len;
/* Increment the TEF FIFO tail pointer 'len' times in
* a single SPI message.
*/
/* Note:
*
* Note:
*
* "cs_change == 1" on the last transfer results in an
* active chip select after the complete SPI
@ -1391,6 +1388,8 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
if (err)
return err;
tx_ring->tail += len;
err = mcp251xfd_check_tef_tail(priv);
if (err)
return err;
@ -1553,10 +1552,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
/* Increment the RX FIFO tail pointer 'len' times in a
* single SPI message.
*/
ring->tail += len;
/* Note:
*
* Note:
*
* "cs_change == 1" on the last transfer results in an
* active chip select after the complete SPI
@ -1572,6 +1569,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
last_xfer->cs_change = 1;
if (err)
return err;
ring->tail += len;
}
return 0;

View File

@ -4,6 +4,7 @@ config NET_DSA_HIRSCHMANN_HELLCREEK
depends on HAS_IOMEM
depends on NET_DSA
depends on PTP_1588_CLOCK
depends on LEDS_CLASS
select NET_DSA_TAG_HELLCREEK
help
This driver adds support for Hirschmann Hellcreek TSN switches.

View File

@ -1436,11 +1436,12 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, Pause);
phylink_set(mask, Asym_Pause);
/* With the exclusion of MII and Reverse MII, we support Gigabit,
* including Half duplex
/* With the exclusion of MII, Reverse MII and Reduced MII, we
* support Gigabit, including Half duplex
*/
if (state->interface != PHY_INTERFACE_MODE_MII &&
state->interface != PHY_INTERFACE_MODE_REVMII) {
state->interface != PHY_INTERFACE_MODE_REVMII &&
state->interface != PHY_INTERFACE_MODE_RMII) {
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseT_Half);
}

View File

@ -621,7 +621,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
while (!skb_queue_empty(&listen_ctx->synq)) {
struct chtls_sock *csk =
container_of((struct synq *)__skb_dequeue
container_of((struct synq *)skb_peek
(&listen_ctx->synq), struct chtls_sock, synq);
struct sock *child = csk->sk;
@ -1109,6 +1109,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev)
{
struct adapter *adap = pci_get_drvdata(cdev->pdev);
struct neighbour *n = NULL;
struct inet_sock *newinet;
const struct iphdr *iph;
@ -1118,9 +1119,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
struct dst_entry *dst;
struct tcp_sock *tp;
struct sock *newsk;
bool found = false;
u16 port_id;
int rxq_idx;
int step;
int step, i;
iph = (const struct iphdr *)network_hdr;
newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
@ -1152,7 +1154,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
n = dst_neigh_lookup(dst, &ip6h->saddr);
#endif
}
if (!n)
if (!n || !n->dev)
goto free_sk;
ndev = n->dev;
@ -1161,6 +1163,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev);
for_each_port(adap, i)
if (cdev->ports[i] == ndev)
found = true;
if (!found)
goto free_dst;
port_id = cxgb4_port_idx(ndev);
csk = chtls_sock_create(cdev);
@ -1238,6 +1247,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
free_csk:
chtls_sock_release(&csk->kref);
free_dst:
neigh_release(n);
dst_release(dst);
free_sk:
inet_csk_prepare_forced_close(newsk);
@ -1387,7 +1397,7 @@ static void chtls_pass_accept_request(struct sock *sk,
newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
if (!newsk)
goto free_oreq;
goto reject;
if (chtls_get_module(newsk))
goto reject;
@ -1403,8 +1413,6 @@ static void chtls_pass_accept_request(struct sock *sk,
kfree_skb(skb);
return;
free_oreq:
chtls_reqsk_free(oreq);
reject:
mk_tid_release(reply_skb, 0, tid);
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
@ -1589,6 +1597,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
sk_wake_async(sk, 0, POLL_OUT);
data = lookup_stid(cdev->tids, stid);
if (!data) {
/* listening server close */
kfree_skb(skb);
goto unlock;
}
lsk = ((struct listen_ctx *)data)->lsk;
bh_lock_sock(lsk);
@ -1997,39 +2010,6 @@ static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
spin_unlock_bh(&cdev->deferq.lock);
}
static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev, int status, int queue)
{
struct cpl_abort_req_rss *req = cplhdr(skb);
struct sk_buff *reply_skb;
struct chtls_sock *csk;
csk = rcu_dereference_sk_user_data(sk);
reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
GFP_KERNEL);
if (!reply_skb) {
req->status = (queue << 1);
t4_defer_reply(skb, cdev, send_defer_abort_rpl);
return;
}
set_abort_rpl_wr(reply_skb, GET_TID(req), status);
kfree_skb(skb);
set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
if (csk_conn_inline(csk)) {
struct l2t_entry *e = csk->l2t_entry;
if (e && sk->sk_state != TCP_SYN_RECV) {
cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
return;
}
}
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
}
static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev,
int status, int queue)
@ -2078,9 +2058,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
queue = csk->txq_idx;
skb->sk = NULL;
chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(child, lsk);
send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
CPL_ABORT_NO_RST, queue);
}
static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
@ -2110,8 +2090,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
if (!sock_owned_by_user(psk)) {
int queue = csk->txq_idx;
chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(sk, psk);
send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
} else {
skb->sk = sk;
BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
@ -2129,9 +2109,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
int queue = csk->txq_idx;
if (is_neg_adv(req->status)) {
if (sk->sk_state == TCP_SYN_RECV)
chtls_set_tcb_tflag(sk, 0, 0);
kfree_skb(skb);
return;
}
@ -2158,12 +2135,12 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
return;
chtls_release_resources(sk);
chtls_conn_done(sk);
}
chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
rst_status, queue);
chtls_release_resources(sk);
chtls_conn_done(sk);
}
static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)

View File

@ -223,3 +223,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
};
module_platform_driver(fs_enet_bb_mdio_driver);
MODULE_LICENSE("GPL");

View File

@ -224,3 +224,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
};
module_platform_driver(fs_enet_fec_mdio_driver);
MODULE_LICENSE("GPL");

View File

@ -169,7 +169,7 @@ struct hclgevf_mbx_arq_ring {
#define hclge_mbx_ring_ptr_move_crq(crq) \
(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
#define hclge_mbx_tail_ptr_move_arq(arq) \
(arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
(arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#define hclge_mbx_head_ptr_move_arq(arq) \
(arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
(arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#endif

View File

@ -752,7 +752,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
if (hdev->hw.mac.phydev) {
if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
hdev->hw.mac.phydev->drv->set_loopback) {
count += 1;
handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
}
@ -4537,8 +4538,8 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
(nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
@ -4730,6 +4731,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
vport[i].rss_tuple_sets.ipv6_udp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv6_sctp_en =
hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
HCLGE_RSS_INPUT_TUPLE_SCTP;
vport[i].rss_tuple_sets.ipv6_fragment_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;

View File

@ -107,6 +107,8 @@
#define HCLGE_D_IP_BIT BIT(2)
#define HCLGE_S_IP_BIT BIT(3)
#define HCLGE_V_TAG_BIT BIT(4)
#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \
(HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
#define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2

View File

@ -917,8 +917,8 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
(nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
@ -2502,7 +2502,10 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
tuple_sets->ipv6_sctp_en =
hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
HCLGEVF_RSS_INPUT_TUPLE_SCTP;
tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
}

View File

@ -122,6 +122,8 @@
#define HCLGEVF_D_IP_BIT BIT(2)
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
(HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
#define HCLGEVF_STATS_TIMER_INTERVAL 36U

View File

@ -4432,7 +4432,7 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
struct bpf_prog *old_prog;
if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
return -EOPNOTSUPP;
}

View File

@ -871,8 +871,10 @@ static int cgx_lmac_init(struct cgx *cgx)
if (!lmac)
return -ENOMEM;
lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
if (!lmac->name)
return -ENOMEM;
if (!lmac->name) {
err = -ENOMEM;
goto err_lmac_free;
}
sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
lmac->lmac_id = i;
lmac->cgx = cgx;
@ -883,7 +885,7 @@ static int cgx_lmac_init(struct cgx *cgx)
CGX_LMAC_FWI + i * 9),
cgx_fwi_event_handler, 0, lmac->name, lmac);
if (err)
return err;
goto err_irq;
/* Enable interrupt */
cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
@ -895,6 +897,12 @@ static int cgx_lmac_init(struct cgx *cgx)
}
return cgx_lmac_verify_fwi_version(cgx);
err_irq:
kfree(lmac->name);
err_lmac_free:
kfree(lmac);
return err;
}
static int cgx_lmac_exit(struct cgx *cgx)

View File

@ -626,6 +626,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
if (!reg_c0)
return true;
/* If reg_c0 is not equal to the default flow tag then skb->mark
* is not supported and must be reset back to 0.
*/
skb->mark = 0;
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;

View File

@ -118,16 +118,17 @@ struct mlx5_ct_tuple {
u16 zone;
};
struct mlx5_ct_shared_counter {
struct mlx5_ct_counter {
struct mlx5_fc *counter;
refcount_t refcount;
bool is_shared;
};
struct mlx5_ct_entry {
struct rhash_head node;
struct rhash_head tuple_node;
struct rhash_head tuple_nat_node;
struct mlx5_ct_shared_counter *shared_counter;
struct mlx5_ct_counter *counter;
unsigned long cookie;
unsigned long restore_cookie;
struct mlx5_ct_tuple tuple;
@ -394,13 +395,14 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
}
static void
mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
{
if (!refcount_dec_and_test(&entry->shared_counter->refcount))
if (entry->counter->is_shared &&
!refcount_dec_and_test(&entry->counter->refcount))
return;
mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
kfree(entry->shared_counter);
mlx5_fc_destroy(ct_priv->dev, entry->counter->counter);
kfree(entry->counter);
}
static void
@ -699,7 +701,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->dest_ft = ct_priv->post_ct;
attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
attr->outer_match_level = MLX5_MATCH_L4;
attr->counter = entry->shared_counter->counter;
attr->counter = entry->counter->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
@ -732,13 +734,34 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
return err;
}
static struct mlx5_ct_shared_counter *
static struct mlx5_ct_counter *
mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
{
struct mlx5_ct_counter *counter;
int ret;
counter = kzalloc(sizeof(*counter), GFP_KERNEL);
if (!counter)
return ERR_PTR(-ENOMEM);
counter->is_shared = false;
counter->counter = mlx5_fc_create(ct_priv->dev, true);
if (IS_ERR(counter->counter)) {
ct_dbg("Failed to create counter for ct entry");
ret = PTR_ERR(counter->counter);
kfree(counter);
return ERR_PTR(ret);
}
return counter;
}
static struct mlx5_ct_counter *
mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
struct mlx5_ct_tuple rev_tuple = entry->tuple;
struct mlx5_ct_shared_counter *shared_counter;
struct mlx5_core_dev *dev = ct_priv->dev;
struct mlx5_ct_counter *shared_counter;
struct mlx5_ct_entry *rev_entry;
__be16 tmp_port;
int ret;
@ -767,25 +790,20 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
tuples_ht_params);
if (rev_entry) {
if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
mutex_unlock(&ct_priv->shared_counter_lock);
return rev_entry->shared_counter;
return rev_entry->counter;
}
}
mutex_unlock(&ct_priv->shared_counter_lock);
shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
if (!shared_counter)
return ERR_PTR(-ENOMEM);
shared_counter->counter = mlx5_fc_create(dev, true);
if (IS_ERR(shared_counter->counter)) {
ct_dbg("Failed to create counter for ct entry");
ret = PTR_ERR(shared_counter->counter);
kfree(shared_counter);
shared_counter = mlx5_tc_ct_counter_create(ct_priv);
if (IS_ERR(shared_counter)) {
ret = PTR_ERR(shared_counter);
return ERR_PTR(ret);
}
shared_counter->is_shared = true;
refcount_set(&shared_counter->refcount, 1);
return shared_counter;
}
@ -798,10 +816,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
{
int err;
entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
if (IS_ERR(entry->shared_counter)) {
err = PTR_ERR(entry->shared_counter);
ct_dbg("Failed to create counter for ct entry");
if (nf_ct_acct_enabled(dev_net(ct_priv->netdev)))
entry->counter = mlx5_tc_ct_counter_create(ct_priv);
else
entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
if (IS_ERR(entry->counter)) {
err = PTR_ERR(entry->counter);
return err;
}
@ -820,7 +841,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
err_nat:
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
mlx5_tc_ct_shared_counter_put(ct_priv, entry);
mlx5_tc_ct_counter_put(ct_priv, entry);
return err;
}
@ -918,7 +939,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
tuples_ht_params);
mutex_unlock(&ct_priv->shared_counter_lock);
mlx5_tc_ct_shared_counter_put(ct_priv, entry);
mlx5_tc_ct_counter_put(ct_priv, entry);
}
@ -956,7 +977,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
if (!entry)
return -ENOENT;
mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);

View File

@ -371,6 +371,15 @@ struct mlx5e_swp_spec {
u8 tun_l4_proto;
};
static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
{
/* SWP offsets are in 2-bytes words */
eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
}
static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
struct mlx5e_swp_spec *swp_spec)

View File

@ -51,7 +51,7 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
}
static inline void
mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
struct mlx5e_swp_spec swp_spec = {};
unsigned int offset = 0;
@ -85,6 +85,8 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
}
mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
if (skb_vlan_tag_present(skb) && ihs)
mlx5e_eseg_swp_offsets_add_vlan(eseg);
}
#else
@ -163,7 +165,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
#ifdef CONFIG_MLX5_EN_IPSEC
if (xfrm_offload(skb))
@ -172,7 +174,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
#if IS_ENABLED(CONFIG_GENEVE)
if (skb->encapsulation)
mlx5e_tx_tunnel_accel(skb, eseg);
mlx5e_tx_tunnel_accel(skb, eseg, ihs);
#endif
return true;

View File

@ -1010,6 +1010,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
}
static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
const unsigned long link_modes, u8 autoneg)
{
/* Extended link-mode has no speed limitations. */
if (ext)
return 0;
if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
autoneg != AUTONEG_ENABLE) {
netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n",
__func__);
return -EINVAL;
}
return 0;
}
static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
@ -1103,13 +1119,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
mlx5e_port_speed2linkmodes(mdev, speed, !ext);
if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
autoneg != AUTONEG_ENABLE) {
netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
__func__);
err = -EINVAL;
err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
if (err)
goto out;
}
link_modes = link_modes & eproto.cap;
if (!link_modes) {

View File

@ -942,6 +942,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
ft->g = NULL;
return -ENOMEM;
}
@ -1087,6 +1088,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
ft->g = NULL;
return -ENOMEM;
}
@ -1390,6 +1392,7 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
ft->g[ft->num_groups] = NULL;
mlx5e_destroy_groups(ft);
kvfree(in);
kfree(ft->g);
return err;
}

View File

@ -3161,7 +3161,8 @@ static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
mlx5_set_port_admin_status(mdev, state);
if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY)
if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
!MLX5_CAP_GEN(mdev, uplink_follow))
return;
if (state == MLX5_PORT_UP)

View File

@ -682,9 +682,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg)
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
return false;
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
@ -714,7 +714,8 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
struct mlx5_wqe_eth_seg eseg = {};
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg)))
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
@ -731,7 +732,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May update the WQE, but may not post other WQEs. */
mlx5e_accel_tx_finish(sq, wqe, &accel,
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth)))
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());

View File

@ -95,22 +95,21 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
return 0;
}
if (!IS_ERR_OR_NULL(vport->egress.acl))
return 0;
if (!vport->egress.acl) {
vport->egress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
table_size);
if (IS_ERR(vport->egress.acl)) {
err = PTR_ERR(vport->egress.acl);
vport->egress.acl = NULL;
goto out;
}
vport->egress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
table_size);
if (IS_ERR(vport->egress.acl)) {
err = PTR_ERR(vport->egress.acl);
vport->egress.acl = NULL;
goto out;
err = esw_acl_egress_lgcy_groups_create(esw, vport);
if (err)
goto out;
}
err = esw_acl_egress_lgcy_groups_create(esw, vport);
if (err)
goto out;
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);

View File

@ -564,7 +564,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
struct mlx5_core_dev *tmp_dev;
int i, err;
if (!MLX5_CAP_GEN(dev, vport_group_manager))
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
return;
tmp_dev = mlx5_get_next_phys_dev(dev);
@ -582,12 +584,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
return;
for (i = 0; i < MLX5_MAX_PORTS; i++) {
tmp_dev = ldev->pf[i].dev;
if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
for (i = 0; i < MLX5_MAX_PORTS; i++)
if (!ldev->pf[i].dev)
break;
}
if (i >= MLX5_MAX_PORTS)
ldev->flags |= MLX5_LAG_FLAG_READY;

View File

@ -1368,8 +1368,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
MLX5_COREDEV_VF : MLX5_COREDEV_PF;
dev->priv.adev_idx = mlx5_adev_idx_alloc();
if (dev->priv.adev_idx < 0)
return dev->priv.adev_idx;
if (dev->priv.adev_idx < 0) {
err = dev->priv.adev_idx;
goto adev_init_err;
}
err = mlx5_mdev_init(dev, prof_sel);
if (err)
@ -1403,6 +1405,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mlx5_mdev_uninit(dev);
mdev_init_err:
mlx5_adev_idx_free(dev->priv.adev_idx);
adev_init_err:
mlx5_devlink_free(devlink);
return err;

View File

@ -116,7 +116,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
{
mlx5_core_roce_gid_set(dev, 0, 0, 0,
NULL, NULL, false, 0, 0);
NULL, NULL, false, 0, 1);
}
static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)

View File

@ -506,10 +506,14 @@ static int mac_sonic_platform_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err)
goto out;
goto undo_probe;
return 0;
undo_probe:
dma_free_coherent(lp->device,
SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(dev);
@ -584,12 +588,16 @@ static int mac_sonic_nubus_probe(struct nubus_board *board)
err = register_netdev(ndev);
if (err)
goto out;
goto undo_probe;
nubus_set_drvdata(board, ndev);
return 0;
undo_probe:
dma_free_coherent(lp->device,
SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(ndev);
return err;

View File

@ -229,11 +229,14 @@ int xtsonic_probe(struct platform_device *pdev)
sonic_msg_init(dev);
if ((err = register_netdev(dev)))
goto out1;
goto undo_probe1;
return 0;
out1:
undo_probe1:
dma_free_coherent(lp->device,
SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
lp->descriptors, lp->descriptors_laddr);
release_region(dev->base_addr, SONIC_MEM_SIZE);
out:
free_netdev(dev);

View File

@ -78,6 +78,7 @@ config QED
depends on PCI
select ZLIB_INFLATE
select CRC8
select CRC32
select NET_DEVLINK
help
This enables the support for Marvell FastLinQ adapters family.

View File

@ -64,6 +64,7 @@ struct emac_variant {
* @variant: reference to the current board variant
* @regmap: regmap for using the syscon
* @internal_phy_powered: Does the internal PHY is enabled
* @use_internal_phy: Is the internal PHY selected for use
* @mux_handle: Internal pointer used by mdio-mux lib
*/
struct sunxi_priv_data {
@ -74,6 +75,7 @@ struct sunxi_priv_data {
const struct emac_variant *variant;
struct regmap_field *regmap_field;
bool internal_phy_powered;
bool use_internal_phy;
void *mux_handle;
};
@ -539,8 +541,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
.dma_interrupt = sun8i_dwmac_dma_interrupt,
};
static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct sunxi_priv_data *gmac = priv;
int ret;
@ -554,13 +559,25 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
ret = clk_prepare_enable(gmac->tx_clk);
if (ret) {
if (gmac->regulator)
regulator_disable(gmac->regulator);
dev_err(&pdev->dev, "Could not enable AHB clock\n");
return ret;
goto err_disable_regulator;
}
if (gmac->use_internal_phy) {
ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
if (ret)
goto err_disable_clk;
}
return 0;
err_disable_clk:
clk_disable_unprepare(gmac->tx_clk);
err_disable_regulator:
if (gmac->regulator)
regulator_disable(gmac->regulator);
return ret;
}
static void sun8i_dwmac_core_init(struct mac_device_info *hw,
@ -831,7 +848,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
u32 reg, val;
int ret = 0;
bool need_power_ephy = false;
if (current_child ^ desired_child) {
regmap_field_read(gmac->regmap_field, &reg);
@ -839,13 +855,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
dev_info(priv->device, "Switch mux to internal PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT;
need_power_ephy = true;
gmac->use_internal_phy = true;
break;
case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID:
dev_info(priv->device, "Switch mux to external PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN;
need_power_ephy = false;
gmac->use_internal_phy = false;
break;
default:
dev_err(priv->device, "Invalid child ID %x\n",
@ -853,7 +868,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
return -EINVAL;
}
regmap_field_write(gmac->regmap_field, val);
if (need_power_ephy) {
if (gmac->use_internal_phy) {
ret = sun8i_dwmac_power_internal_phy(priv);
if (ret)
return ret;
@ -883,22 +898,23 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
return ret;
}
static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
static int sun8i_dwmac_set_syscon(struct device *dev,
struct plat_stmmacenet_data *plat)
{
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
struct device_node *node = priv->device->of_node;
struct sunxi_priv_data *gmac = plat->bsp_priv;
struct device_node *node = dev->of_node;
int ret;
u32 reg, val;
ret = regmap_field_read(gmac->regmap_field, &val);
if (ret) {
dev_err(priv->device, "Fail to read from regmap field.\n");
dev_err(dev, "Fail to read from regmap field.\n");
return ret;
}
reg = gmac->variant->default_syscon_value;
if (reg != val)
dev_warn(priv->device,
dev_warn(dev,
"Current syscon value is not the default %x (expect %x)\n",
val, reg);
@ -911,9 +927,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
/* Force EPHY xtal frequency to 24MHz. */
reg |= H3_EPHY_CLK_SEL;
ret = of_mdio_parse_addr(priv->device, priv->plat->phy_node);
ret = of_mdio_parse_addr(dev, plat->phy_node);
if (ret < 0) {
dev_err(priv->device, "Could not parse MDIO addr\n");
dev_err(dev, "Could not parse MDIO addr\n");
return ret;
}
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
@ -929,17 +945,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
if (val % 100) {
dev_err(priv->device, "tx-delay must be a multiple of 100\n");
dev_err(dev, "tx-delay must be a multiple of 100\n");
return -EINVAL;
}
val /= 100;
dev_dbg(priv->device, "set tx-delay to %x\n", val);
dev_dbg(dev, "set tx-delay to %x\n", val);
if (val <= gmac->variant->tx_delay_max) {
reg &= ~(gmac->variant->tx_delay_max <<
SYSCON_ETXDC_SHIFT);
reg |= (val << SYSCON_ETXDC_SHIFT);
} else {
dev_err(priv->device, "Invalid TX clock delay: %d\n",
dev_err(dev, "Invalid TX clock delay: %d\n",
val);
return -EINVAL;
}
@ -947,17 +963,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
if (val % 100) {
dev_err(priv->device, "rx-delay must be a multiple of 100\n");
dev_err(dev, "rx-delay must be a multiple of 100\n");
return -EINVAL;
}
val /= 100;
dev_dbg(priv->device, "set rx-delay to %x\n", val);
dev_dbg(dev, "set rx-delay to %x\n", val);
if (val <= gmac->variant->rx_delay_max) {
reg &= ~(gmac->variant->rx_delay_max <<
SYSCON_ERXDC_SHIFT);
reg |= (val << SYSCON_ERXDC_SHIFT);
} else {
dev_err(priv->device, "Invalid RX clock delay: %d\n",
dev_err(dev, "Invalid RX clock delay: %d\n",
val);
return -EINVAL;
}
@ -968,7 +984,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (gmac->variant->support_rmii)
reg &= ~SYSCON_RMII_EN;
switch (priv->plat->interface) {
switch (plat->interface) {
case PHY_INTERFACE_MODE_MII:
/* default */
break;
@ -982,8 +998,8 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
break;
default:
dev_err(priv->device, "Unsupported interface mode: %s",
phy_modes(priv->plat->interface));
dev_err(dev, "Unsupported interface mode: %s",
phy_modes(plat->interface));
return -EINVAL;
}
@ -1004,17 +1020,10 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
struct sunxi_priv_data *gmac = priv;
if (gmac->variant->soc_has_internal_phy) {
/* sun8i_dwmac_exit could be called with mdiomux uninit */
if (gmac->mux_handle)
mdio_mux_uninit(gmac->mux_handle);
if (gmac->internal_phy_powered)
sun8i_dwmac_unpower_internal_phy(gmac);
}
sun8i_dwmac_unset_syscon(gmac);
reset_control_put(gmac->rst_ephy);
clk_disable_unprepare(gmac->tx_clk);
if (gmac->regulator)
@ -1049,16 +1058,11 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
{
struct mac_device_info *mac;
struct stmmac_priv *priv = ppriv;
int ret;
mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
if (!mac)
return NULL;
ret = sun8i_dwmac_set_syscon(priv);
if (ret)
return NULL;
mac->pcsr = priv->ioaddr;
mac->mac = &sun8i_dwmac_ops;
mac->dma = &sun8i_dwmac_dma_ops;
@ -1134,10 +1138,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
if (!gmac)
return -ENOMEM;
@ -1201,11 +1201,15 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ret = of_get_phy_mode(dev->of_node, &interface);
if (ret)
return -EINVAL;
plat_dat->interface = interface;
plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
plat_dat->interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
plat_dat->has_sun8i = true;
@ -1214,9 +1218,13 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
plat_dat->exit = sun8i_dwmac_exit;
plat_dat->setup = sun8i_dwmac_setup;
ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
if (ret)
goto dwmac_deconfig;
ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
if (ret)
return ret;
goto dwmac_syscon;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@ -1230,7 +1238,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (gmac->variant->soc_has_internal_phy) {
ret = get_ephy_nodes(priv);
if (ret)
goto dwmac_exit;
goto dwmac_remove;
ret = sun8i_dwmac_register_mdio_mux(priv);
if (ret) {
dev_err(&pdev->dev, "Failed to register mux\n");
@ -1239,15 +1247,42 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
} else {
ret = sun8i_dwmac_reset(priv);
if (ret)
goto dwmac_exit;
goto dwmac_remove;
}
return ret;
dwmac_mux:
sun8i_dwmac_unset_syscon(gmac);
reset_control_put(gmac->rst_ephy);
clk_put(gmac->ephy_clk);
dwmac_remove:
stmmac_dvr_remove(&pdev->dev);
dwmac_exit:
sun8i_dwmac_exit(pdev, gmac);
dwmac_syscon:
sun8i_dwmac_unset_syscon(gmac);
dwmac_deconfig:
stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
static int sun8i_dwmac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
if (gmac->variant->soc_has_internal_phy) {
mdio_mux_uninit(gmac->mux_handle);
sun8i_dwmac_unpower_internal_phy(gmac);
reset_control_put(gmac->rst_ephy);
clk_put(gmac->ephy_clk);
}
stmmac_pltfr_remove(pdev);
return ret;
sun8i_dwmac_unset_syscon(gmac);
return 0;
}
static const struct of_device_id sun8i_dwmac_match[] = {
@ -1269,7 +1304,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
static struct platform_driver sun8i_dwmac_driver = {
.probe = sun8i_dwmac_probe,
.remove = stmmac_pltfr_remove,
.remove = sun8i_dwmac_remove,
.driver = {
.name = "dwmac-sun8i",
.pm = &stmmac_pltfr_pm_ops,

View File

@ -1199,7 +1199,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* accordingly. Otherwise, we should check here.
*/
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
delayed_ndp_size = ctx->max_ndp_size +
max_t(u32,
ctx->tx_ndp_modulus,
ctx->tx_modulus + ctx->tx_remainder) - 1;
else
delayed_ndp_size = 0;
@ -1410,7 +1413,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
skb_out->len > ctx->min_tx_pkt) {
padding_count = ctx->tx_curr_size - skb_out->len;
skb_put_zero(skb_out, padding_count);
if (!WARN_ON(padding_count > ctx->tx_curr_size))
skb_put_zero(skb_out, padding_count);
} else if (skb_out->len < ctx->tx_curr_size &&
(skb_out->len % dev->maxpacket) == 0) {
skb_put_u8(skb_out, 0); /* force short packet */

View File

@ -282,6 +282,7 @@ config SLIC_DS26522
tristate "Slic Maxim ds26522 card support"
depends on SPI
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
select BITREVERSE
help
This module initializes and configures the slic maxim card
in T1 or E1 mode.

View File

@ -2,6 +2,7 @@
config WIL6210
tristate "Wilocity 60g WiFi card wil6210 support"
select WANT_DEV_COREDUMP
select CRC32
depends on CFG80211
depends on PCI
default n

View File

@ -64,6 +64,7 @@ config DP83640_PHY
depends on NETWORK_PHY_TIMESTAMPING
depends on PHYLIB
depends on PTP_1588_CLOCK
select CRC32
help
Supports the DP83640 PHYTER with IEEE 1588 features.
@ -78,6 +79,7 @@ config DP83640_PHY
config PTP_1588_CLOCK_INES
tristate "ZHAW InES PTP time stamping IP core"
depends on NETWORK_PHY_TIMESTAMPING
depends on HAS_IOMEM
depends on PHYLIB
depends on PTP_1588_CLOCK
help

View File

@ -881,6 +881,7 @@ config REGULATOR_QCOM_RPM
config REGULATOR_QCOM_RPMH
tristate "Qualcomm Technologies, Inc. RPMh regulator driver"
depends on QCOM_RPMH || (QCOM_RPMH=n && COMPILE_TEST)
depends on QCOM_COMMAND_DB || (QCOM_COMMAND_DB=n && COMPILE_TEST)
help
This driver supports control of PMIC regulators via the RPMh hardware
block found on Qualcomm Technologies Inc. SoCs. RPMh regulator

View File

@ -15,6 +15,36 @@
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
/* Typical regulator startup times as per data sheet in uS */
#define BD71847_BUCK1_STARTUP_TIME 144
#define BD71847_BUCK2_STARTUP_TIME 162
#define BD71847_BUCK3_STARTUP_TIME 162
#define BD71847_BUCK4_STARTUP_TIME 240
#define BD71847_BUCK5_STARTUP_TIME 270
#define BD71847_BUCK6_STARTUP_TIME 200
#define BD71847_LDO1_STARTUP_TIME 440
#define BD71847_LDO2_STARTUP_TIME 370
#define BD71847_LDO3_STARTUP_TIME 310
#define BD71847_LDO4_STARTUP_TIME 400
#define BD71847_LDO5_STARTUP_TIME 530
#define BD71847_LDO6_STARTUP_TIME 400
#define BD71837_BUCK1_STARTUP_TIME 160
#define BD71837_BUCK2_STARTUP_TIME 180
#define BD71837_BUCK3_STARTUP_TIME 180
#define BD71837_BUCK4_STARTUP_TIME 180
#define BD71837_BUCK5_STARTUP_TIME 160
#define BD71837_BUCK6_STARTUP_TIME 240
#define BD71837_BUCK7_STARTUP_TIME 220
#define BD71837_BUCK8_STARTUP_TIME 200
#define BD71837_LDO1_STARTUP_TIME 440
#define BD71837_LDO2_STARTUP_TIME 370
#define BD71837_LDO3_STARTUP_TIME 310
#define BD71837_LDO4_STARTUP_TIME 400
#define BD71837_LDO5_STARTUP_TIME 310
#define BD71837_LDO6_STARTUP_TIME 400
#define BD71837_LDO7_STARTUP_TIME 530
/*
* BD718(37/47/50) have two "enable control modes". ON/OFF can either be
* controlled by software - or by PMIC internal HW state machine. Whether
@ -613,6 +643,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71847_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@ -646,6 +677,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71847_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@ -680,6 +712,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_buck3_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71847_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -706,6 +739,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_range_mask = BD71847_BUCK4_RANGE_MASK,
.linear_range_selectors = bd71847_buck4_volt_range_sel,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71847_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -727,6 +761,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71847_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -750,6 +785,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71847_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -775,6 +811,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71847_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -796,6 +833,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71847_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -818,6 +856,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71847_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -840,6 +879,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71847_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -865,6 +905,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_ldo5_volt_range_sel,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71847_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -889,6 +930,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71847_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -942,6 +984,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@ -975,6 +1018,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@ -1005,6 +1049,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK3_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@ -1033,6 +1078,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK4_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@ -1065,6 +1111,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd71837_buck5_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1088,6 +1135,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_BUCK6_MASK,
.enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1109,6 +1157,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1132,6 +1181,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK8_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1157,6 +1207,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71837_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1178,6 +1229,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71837_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1200,6 +1252,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71837_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1222,6 +1275,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71837_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1246,6 +1300,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO5_MASK,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71837_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1272,6 +1327,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71837_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1296,6 +1352,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO7_MASK,
.enable_reg = BD71837_REG_LDO7_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71837_LDO7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {

View File

@ -469,13 +469,17 @@ static int pf8x00_i2c_probe(struct i2c_client *client)
}
static const struct of_device_id pf8x00_dt_ids[] = {
{ .compatible = "nxp,pf8x00",},
{ .compatible = "nxp,pf8100",},
{ .compatible = "nxp,pf8121a",},
{ .compatible = "nxp,pf8200",},
{ }
};
MODULE_DEVICE_TABLE(of, pf8x00_dt_ids);
static const struct i2c_device_id pf8x00_i2c_id[] = {
{ "pf8x00", 0 },
{ "pf8100", 0 },
{ "pf8121a", 0 },
{ "pf8200", 0 },
{},
};
MODULE_DEVICE_TABLE(i2c, pf8x00_i2c_id);

View File

@ -726,7 +726,7 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
.voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600),
.voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
.n_voltages = 5,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,

View File

@ -1079,7 +1079,8 @@ struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask);
int qeth_threads_running(struct qeth_card *, unsigned long);
int qeth_set_offline(struct qeth_card *card, bool resetting);
int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
bool resetting);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)

View File

@ -5507,12 +5507,12 @@ static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
return rc;
}
static int qeth_set_online(struct qeth_card *card)
static int qeth_set_online(struct qeth_card *card,
const struct qeth_discipline *disc)
{
bool carrier_ok;
int rc;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 2, "setonlin");
@ -5529,7 +5529,7 @@ static int qeth_set_online(struct qeth_card *card)
/* no need for locking / error handling at this early stage: */
qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
rc = card->discipline->set_online(card, carrier_ok);
rc = disc->set_online(card, carrier_ok);
if (rc)
goto err_online;
@ -5537,7 +5537,6 @@ static int qeth_set_online(struct qeth_card *card)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
return 0;
err_online:
@ -5552,15 +5551,14 @@ static int qeth_set_online(struct qeth_card *card)
qdio_free(CARD_DDEV(card));
mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
return rc;
}
int qeth_set_offline(struct qeth_card *card, bool resetting)
int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
bool resetting)
{
int rc, rc2, rc3;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 3, "setoffl");
@ -5581,7 +5579,7 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
cancel_work_sync(&card->rx_mode_work);
card->discipline->set_offline(card);
disc->set_offline(card);
qeth_qdio_clear_card(card, 0);
qeth_drain_output_queues(card);
@ -5602,16 +5600,19 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(qeth_set_offline);
static int qeth_do_reset(void *data)
{
const struct qeth_discipline *disc;
struct qeth_card *card = data;
int rc;
/* Lock-free, other users will block until we are done. */
disc = card->discipline;
QETH_CARD_TEXT(card, 2, "recover1");
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
@ -5619,8 +5620,8 @@ static int qeth_do_reset(void *data)
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
qeth_set_offline(card, true);
rc = qeth_set_online(card);
qeth_set_offline(card, disc, true);
rc = qeth_set_online(card, disc);
if (!rc) {
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
@ -6584,6 +6585,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break;
default:
card->info.layer_enforced = true;
/* It's so early that we don't need the discipline_mutex yet. */
rc = qeth_core_load_discipline(card, enforced_disc);
if (rc)
goto err_load;
@ -6616,10 +6618,12 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
QETH_CARD_TEXT(card, 2, "removedv");
mutex_lock(&card->discipline_mutex);
if (card->discipline) {
card->discipline->remove(gdev);
qeth_core_free_discipline(card);
}
mutex_unlock(&card->discipline_mutex);
qeth_free_qdio_queues(card);
@ -6634,6 +6638,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
int rc = 0;
enum qeth_discipline_id def_discipline;
mutex_lock(&card->discipline_mutex);
if (!card->discipline) {
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
QETH_DISCIPLINE_LAYER2;
@ -6647,16 +6652,23 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
}
}
rc = qeth_set_online(card);
rc = qeth_set_online(card, card->discipline);
err:
mutex_unlock(&card->discipline_mutex);
return rc;
}
static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
return qeth_set_offline(card, false);
mutex_lock(&card->discipline_mutex);
rc = qeth_set_offline(card, card->discipline, false);
mutex_unlock(&card->discipline_mutex);
return rc;
}
static void qeth_core_shutdown(struct ccwgroup_device *gdev)

View File

@ -2208,7 +2208,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, false);
qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)

View File

@ -1813,7 +1813,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
if (qeth_get_ip_version(skb) != 4)
if (vlan_get_protocol(skb) != htons(ETH_P_IP))
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return qeth_features_check(skb, dev, features);
}
@ -1971,7 +1971,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, false);
qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)

View File

@ -189,24 +189,26 @@ static int altera_spi_txrx(struct spi_master *master,
/* send the first byte */
altera_spi_tx_word(hw);
} else {
while (hw->count < hw->len) {
altera_spi_tx_word(hw);
for (;;) {
altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
if (val & ALTERA_SPI_STATUS_RRDY_MSK)
break;
cpu_relax();
}
altera_spi_rx_word(hw);
}
spi_finalize_current_transfer(master);
return 1;
}
return t->len;
while (hw->count < hw->len) {
altera_spi_tx_word(hw);
for (;;) {
altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
if (val & ALTERA_SPI_STATUS_RRDY_MSK)
break;
cpu_relax();
}
altera_spi_rx_word(hw);
}
spi_finalize_current_transfer(master);
return 0;
}
static irqreturn_t altera_spi_irq(int irq, void *dev)

View File

@ -83,6 +83,7 @@ struct spi_geni_master {
spinlock_t lock;
int irq;
bool cs_flag;
bool abort_failed;
};
static int get_spi_clk_cfg(unsigned int speed_hz,
@ -141,8 +142,49 @@ static void handle_fifo_timeout(struct spi_master *spi,
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
if (!time_left)
if (!time_left) {
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
/*
* No need for a lock since SPI core has a lock and we never
* access this from an interrupt.
*/
mas->abort_failed = true;
}
}
static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
u32 m_irq, m_irq_en;
if (!mas->abort_failed)
return false;
/*
* The only known case where a transfer times out and then a cancel
* times out then an abort times out is if something is blocking our
* interrupt handler from running. Avoid starting any new transfers
* until that sorts itself out.
*/
spin_lock_irq(&mas->lock);
m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
spin_unlock_irq(&mas->lock);
if (m_irq & m_irq_en) {
dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
m_irq & m_irq_en);
return true;
}
/*
* If we're here the problem resolved itself so no need to check more
* on future transfers.
*/
mas->abort_failed = false;
return false;
}
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
@ -158,10 +200,21 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
if (set_flag == mas->cs_flag)
return;
mas->cs_flag = set_flag;
pm_runtime_get_sync(mas->dev);
if (spi_geni_is_abort_still_pending(mas)) {
dev_err(mas->dev, "Can't set chip select\n");
goto exit;
}
spin_lock_irq(&mas->lock);
if (mas->cur_xfer) {
dev_err(mas->dev, "Can't set CS when prev xfer running\n");
spin_unlock_irq(&mas->lock);
goto exit;
}
mas->cs_flag = set_flag;
reinit_completion(&mas->cs_done);
if (set_flag)
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
@ -170,9 +223,12 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
if (!time_left)
if (!time_left) {
dev_warn(mas->dev, "Timeout setting chip select\n");
handle_fifo_timeout(spi, NULL);
}
exit:
pm_runtime_put(mas->dev);
}
@ -280,6 +336,9 @@ static int spi_geni_prepare_message(struct spi_master *spi,
int ret;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
if (spi_geni_is_abort_still_pending(mas))
return -EBUSY;
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@ -354,6 +413,12 @@ static bool geni_spi_handle_tx(struct spi_geni_master *mas)
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0;
/* Stop the watermark IRQ if nothing to send */
if (!mas->cur_xfer) {
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
return false;
}
max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
if (mas->tx_rem_bytes < max_bytes)
max_bytes = mas->tx_rem_bytes;
@ -396,6 +461,14 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
if (rx_last_byte_valid && rx_last_byte_valid < 4)
rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
}
/* Clear out the FIFO and bail if nowhere to put it */
if (!mas->cur_xfer) {
for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
readl(se->base + SE_GENI_RX_FIFOn);
return;
}
if (mas->rx_rem_bytes < rx_bytes)
rx_bytes = mas->rx_rem_bytes;
@ -495,6 +568,9 @@ static int spi_geni_transfer_one(struct spi_master *spi,
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
if (spi_geni_is_abort_still_pending(mas))
return -EBUSY;
/* Terminate and return success for 0 byte length transfer */
if (!xfer->len)
return 0;

View File

@ -493,9 +493,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
/* align packet size with data registers access */
if (spi->cur_bpw > 8)
fthlv -= (fthlv % 2); /* multiple of 2 */
fthlv += (fthlv % 2) ? 1 : 0;
else
fthlv -= (fthlv % 4); /* multiple of 4 */
fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0;
if (!fthlv)
fthlv = 1;

View File

@ -1108,6 +1108,7 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
{
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
if (spi_controller_is_slave(ctlr)) {
@ -1116,8 +1117,11 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
return -EINTR;
}
} else {
if (!speed_hz)
speed_hz = 100000;
ms = 8LL * 1000LL * xfer->len;
do_div(ms, xfer->speed_hz);
do_div(ms, speed_hz);
ms += ms + 200; /* some tolerance */
if (ms > UINT_MAX)
@ -3378,8 +3382,9 @@ int spi_setup(struct spi_device *spi)
if (status)
return status;
if (!spi->max_speed_hz ||
spi->max_speed_hz > spi->controller->max_speed_hz)
if (spi->controller->max_speed_hz &&
(!spi->max_speed_hz ||
spi->max_speed_hz > spi->controller->max_speed_hz))
spi->max_speed_hz = spi->controller->max_speed_hz;
mutex_lock(&spi->controller->io_mutex);

View File

@ -401,6 +401,20 @@ config MIPS_EJTAG_FDC_KGDB_CHAN
help
FDC channel number to use for KGDB.
config NULL_TTY
tristate "NULL TTY driver"
help
Say Y here if you want a NULL TTY which simply discards messages.
This is useful to allow userspace applications which expect a console
device to work without modifications even when no console is
available or desired.
In order to use this driver, you should redirect the console to this
TTY, or boot the kernel with console=ttynull.
If unsure, say N.
config TRACE_ROUTER
tristate "Trace data router for MIPI P1149.7 cJTAG standard"
depends on TRACE_SINK

View File

@ -2,7 +2,7 @@
obj-$(CONFIG_TTY) += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
tty_buffer.o tty_port.o tty_mutex.o \
tty_ldsem.o tty_baudrate.o tty_jobctrl.o \
n_null.o ttynull.o
n_null.o
obj-$(CONFIG_LEGACY_PTYS) += pty.o
obj-$(CONFIG_UNIX98_PTYS) += pty.o
obj-$(CONFIG_AUDIT) += tty_audit.o
@ -25,6 +25,7 @@ obj-$(CONFIG_ISI) += isicom.o
obj-$(CONFIG_MOXA_INTELLIO) += moxa.o
obj-$(CONFIG_MOXA_SMARTIO) += mxser.o
obj-$(CONFIG_NOZOMI) += nozomi.o
obj-$(CONFIG_NULL_TTY) += ttynull.o
obj-$(CONFIG_ROCKETPORT) += rocket.o
obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o

View File

@ -2,13 +2,6 @@
/*
* Copyright (C) 2019 Axis Communications AB
*
* The console is useful for userspace applications which expect a console
* device to work without modifications even when no console is available
* or desired.
*
* In order to use this driver, you should redirect the console to this
* TTY, or boot the kernel with console=ttynull.
*
* Based on ttyprintk.c:
* Copyright (C) 2010 Samo Pogacnik
*/
@ -66,17 +59,6 @@ static struct console ttynull_console = {
.device = ttynull_device,
};
void __init register_ttynull_console(void)
{
if (!ttynull_driver)
return;
if (add_preferred_console(ttynull_console.name, 0, NULL))
return;
register_console(&ttynull_console);
}
static int __init ttynull_init(void)
{
struct tty_driver *driver;

View File

@ -30,7 +30,12 @@
#define VHOST_VSOCK_PKT_WEIGHT 256
enum {
VHOST_VSOCK_FEATURES = VHOST_FEATURES,
VHOST_VSOCK_FEATURES = VHOST_FEATURES |
(1ULL << VIRTIO_F_ACCESS_PLATFORM)
};
enum {
VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
};
/* Used to track all the vhost_vsock instances on the system. */
@ -94,6 +99,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
if (!vhost_vq_get_backend(vq))
goto out;
if (!vq_meta_prefetch(vq))
goto out;
/* Avoid further vmexits, we're already processing the virtqueue */
vhost_disable_notify(&vsock->dev, vq);
@ -449,6 +457,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
if (!vhost_vq_get_backend(vq))
goto out;
if (!vq_meta_prefetch(vq))
goto out;
vhost_disable_notify(&vsock->dev, vq);
do {
u32 len;
@ -766,8 +777,12 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
mutex_lock(&vsock->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
!vhost_log_access_ok(&vsock->dev)) {
mutex_unlock(&vsock->dev.mutex);
return -EFAULT;
goto err;
}
if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
if (vhost_init_device_iotlb(&vsock->dev, true))
goto err;
}
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
@ -778,6 +793,10 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
}
mutex_unlock(&vsock->dev.mutex);
return 0;
err:
mutex_unlock(&vsock->dev.mutex);
return -EFAULT;
}
static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
@ -811,6 +830,18 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
if (copy_from_user(&features, argp, sizeof(features)))
return -EFAULT;
return vhost_vsock_set_features(vsock, features);
case VHOST_GET_BACKEND_FEATURES:
features = VHOST_VSOCK_BACKEND_FEATURES;
if (copy_to_user(argp, &features, sizeof(features)))
return -EFAULT;
return 0;
case VHOST_SET_BACKEND_FEATURES:
if (copy_from_user(&features, argp, sizeof(features)))
return -EFAULT;
if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
return -EOPNOTSUPP;
vhost_set_backend_features(&vsock->dev, features);
return 0;
default:
mutex_lock(&vsock->dev.mutex);
r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
@ -823,6 +854,34 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
}
}
static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct vhost_vsock *vsock = file->private_data;
struct vhost_dev *dev = &vsock->dev;
int noblock = file->f_flags & O_NONBLOCK;
return vhost_chr_read_iter(dev, to, noblock);
}
static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct vhost_vsock *vsock = file->private_data;
struct vhost_dev *dev = &vsock->dev;
return vhost_chr_write_iter(dev, from);
}
static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
{
struct vhost_vsock *vsock = file->private_data;
struct vhost_dev *dev = &vsock->dev;
return vhost_chr_poll(file, dev, wait);
}
static const struct file_operations vhost_vsock_fops = {
.owner = THIS_MODULE,
.open = vhost_vsock_dev_open,
@ -830,6 +889,9 @@ static const struct file_operations vhost_vsock_fops = {
.llseek = noop_llseek,
.unlocked_ioctl = vhost_vsock_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.read_iter = vhost_vsock_chr_read_iter,
.write_iter = vhost_vsock_chr_write_iter,
.poll = vhost_vsock_chr_poll,
};
static struct miscdevice vhost_vsock_misc = {

View File

@ -42,6 +42,15 @@ enum {
* to an inode.
*/
BTRFS_INODE_NO_XATTRS,
/*
* Set when we are in a context where we need to start a transaction and
* have dirty pages with the respective file range locked. This is to
* ensure that when reserving space for the transaction, if we are low
* on available space and need to flush delalloc, we will not flush
* delalloc for this inode, because that could result in a deadlock (on
* the file range, inode's io_tree).
*/
BTRFS_INODE_NO_DELALLOC_FLUSH,
};
/* in memory btrfs inode */

View File

@ -2555,8 +2555,14 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
* @p: Holds all btree nodes along the search path
* @root: The root node of the tree
* @key: The key we are looking for
* @ins_len: Indicates purpose of search, for inserts it is 1, for
* deletions it's -1. 0 for plain searches
* @ins_len: Indicates purpose of search:
* >0 for inserts it's size of item inserted (*)
* <0 for deletions
* 0 for plain searches, not modifying the tree
*
* (*) If size of item inserted doesn't include
* sizeof(struct btrfs_item), then p->search_for_extension must
* be set.
* @cow: boolean should CoW operations be performed. Must always be 1
* when modifying the tree.
*
@ -2717,6 +2723,20 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (level == 0) {
p->slots[level] = slot;
/*
* Item key already exists. In this case, if we are
* allowed to insert the item (for example, in dir_item
* case, item key collision is allowed), it will be
* merged with the original item. Only the item size
* grows, no new btrfs item will be added. If
* search_for_extension is not set, ins_len already
* accounts the size btrfs_item, deduct it here so leaf
* space check will be correct.
*/
if (ret == 0 && ins_len > 0 && !p->search_for_extension) {
ASSERT(ins_len >= sizeof(struct btrfs_item));
ins_len -= sizeof(struct btrfs_item);
}
if (ins_len > 0 &&
btrfs_leaf_free_space(b) < ins_len) {
if (write_lock_level < 1) {

View File

@ -131,6 +131,8 @@ enum {
* defrag
*/
BTRFS_FS_STATE_REMOUNTING,
/* Filesystem in RO mode */
BTRFS_FS_STATE_RO,
/* Track if a transaction abort has been reported on this filesystem */
BTRFS_FS_STATE_TRANS_ABORTED,
/*
@ -367,6 +369,12 @@ struct btrfs_path {
unsigned int search_commit_root:1;
unsigned int need_commit_sem:1;
unsigned int skip_release_on_error:1;
/*
* Indicate that new item (btrfs_search_slot) is extending already
* existing item and ins_len contains only the data size and not item
* header (ie. sizeof(struct btrfs_item) is not included).
*/
unsigned int search_for_extension:1;
};
#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
sizeof(struct btrfs_item))
@ -2885,10 +2893,26 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
* If we remount the fs to be R/O or umount the fs, the cleaner needn't do
* anything except sleeping. This function is used to check the status of
* the fs.
* We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount,
* since setting and checking for SB_RDONLY in the superblock's flags is not
* atomic.
*/
static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
{
return fs_info->sb->s_flags & SB_RDONLY || btrfs_fs_closing(fs_info);
return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) ||
btrfs_fs_closing(fs_info);
}
static inline void btrfs_set_sb_rdonly(struct super_block *sb)
{
sb->s_flags |= SB_RDONLY;
set_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state);
}
static inline void btrfs_clear_sb_rdonly(struct super_block *sb)
{
sb->s_flags &= ~SB_RDONLY;
clear_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state);
}
/* tree mod log functions from ctree.c */
@ -3073,7 +3097,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
u32 min_type);
int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr);
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
bool in_reclaim_context);
int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
unsigned int extra_bits,
struct extent_state **cached_state);

View File

@ -715,7 +715,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
* flush all outstanding I/O and inode extent mappings before the
* copy operation is declared as being finished
*/
ret = btrfs_start_delalloc_roots(fs_info, U64_MAX);
ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false);
if (ret) {
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return ret;

View File

@ -199,16 +199,15 @@ static struct btrfs_block_group *find_next_block_group(
static struct btrfs_block_group *peek_discard_list(
struct btrfs_discard_ctl *discard_ctl,
enum btrfs_discard_state *discard_state,
int *discard_index)
int *discard_index, u64 now)
{
struct btrfs_block_group *block_group;
const u64 now = ktime_get_ns();
spin_lock(&discard_ctl->lock);
again:
block_group = find_next_block_group(discard_ctl, now);
if (block_group && now > block_group->discard_eligible_time) {
if (block_group && now >= block_group->discard_eligible_time) {
if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
block_group->used != 0) {
if (btrfs_is_block_group_data_only(block_group))
@ -222,12 +221,11 @@ static struct btrfs_block_group *peek_discard_list(
block_group->discard_state = BTRFS_DISCARD_EXTENTS;
}
discard_ctl->block_group = block_group;
}
if (block_group) {
*discard_state = block_group->discard_state;
*discard_index = block_group->discard_index;
} else {
block_group = NULL;
}
spin_unlock(&discard_ctl->lock);
return block_group;
@ -330,28 +328,15 @@ void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl,
btrfs_discard_schedule_work(discard_ctl, false);
}
/**
* btrfs_discard_schedule_work - responsible for scheduling the discard work
* @discard_ctl: discard control
* @override: override the current timer
*
* Discards are issued by a delayed workqueue item. @override is used to
* update the current delay as the baseline delay interval is reevaluated on
* transaction commit. This is also maxed with any other rate limit.
*/
void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
bool override)
static void __btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
u64 now, bool override)
{
struct btrfs_block_group *block_group;
const u64 now = ktime_get_ns();
spin_lock(&discard_ctl->lock);
if (!btrfs_run_discard_work(discard_ctl))
goto out;
return;
if (!override && delayed_work_pending(&discard_ctl->work))
goto out;
return;
block_group = find_next_block_group(discard_ctl, now);
if (block_group) {
@ -393,7 +378,24 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
mod_delayed_work(discard_ctl->discard_workers,
&discard_ctl->work, nsecs_to_jiffies(delay));
}
out:
}
/*
* btrfs_discard_schedule_work - responsible for scheduling the discard work
* @discard_ctl: discard control
* @override: override the current timer
*
* Discards are issued by a delayed workqueue item. @override is used to
* update the current delay as the baseline delay interval is reevaluated on
* transaction commit. This is also maxed with any other rate limit.
*/
void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
bool override)
{
const u64 now = ktime_get_ns();
spin_lock(&discard_ctl->lock);
__btrfs_discard_schedule_work(discard_ctl, now, override);
spin_unlock(&discard_ctl->lock);
}
@ -438,13 +440,18 @@ static void btrfs_discard_workfn(struct work_struct *work)
int discard_index = 0;
u64 trimmed = 0;
u64 minlen = 0;
u64 now = ktime_get_ns();
discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
block_group = peek_discard_list(discard_ctl, &discard_state,
&discard_index);
&discard_index, now);
if (!block_group || !btrfs_run_discard_work(discard_ctl))
return;
if (now < block_group->discard_eligible_time) {
btrfs_discard_schedule_work(discard_ctl, false);
return;
}
/* Perform discarding */
minlen = discard_minlen[discard_index];
@ -474,13 +481,6 @@ static void btrfs_discard_workfn(struct work_struct *work)
discard_ctl->discard_extent_bytes += trimmed;
}
/*
* Updated without locks as this is inside the workfn and nothing else
* is reading the values
*/
discard_ctl->prev_discard = trimmed;
discard_ctl->prev_discard_time = ktime_get_ns();
/* Determine next steps for a block_group */
if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) {
if (discard_state == BTRFS_DISCARD_BITMAPS) {
@ -496,11 +496,13 @@ static void btrfs_discard_workfn(struct work_struct *work)
}
}
now = ktime_get_ns();
spin_lock(&discard_ctl->lock);
discard_ctl->prev_discard = trimmed;
discard_ctl->prev_discard_time = now;
discard_ctl->block_group = NULL;
__btrfs_discard_schedule_work(discard_ctl, now, false);
spin_unlock(&discard_ctl->lock);
btrfs_discard_schedule_work(discard_ctl, false);
}
/**

View File

@ -1729,7 +1729,7 @@ static int cleaner_kthread(void *arg)
*/
btrfs_delete_unused_bgs(fs_info);
sleep:
clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
if (kthread_should_park())
kthread_parkme();
if (kthread_should_stop())
@ -2830,6 +2830,9 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
return -ENOMEM;
btrfs_init_delayed_root(fs_info->delayed_root);
if (sb_rdonly(sb))
set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
return btrfs_alloc_stripe_hash_table(fs_info);
}
@ -2969,6 +2972,7 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
}
}
ret = btrfs_find_orphan_roots(fs_info);
out:
return ret;
}
@ -3383,10 +3387,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
}
}
ret = btrfs_find_orphan_roots(fs_info);
if (ret)
goto fail_qgroup;
fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
if (IS_ERR(fs_info->fs_root)) {
err = PTR_ERR(fs_info->fs_root);
@ -4181,6 +4181,9 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
btrfs_stop_all_workers(fs_info);
/* We shouldn't have any transaction open at this point */
ASSERT(list_empty(&fs_info->trans_list));
clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
free_root_pointers(fs_info, true);
btrfs_free_fs_roots(fs_info);

View File

@ -844,6 +844,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
want = extent_ref_type(parent, owner);
if (insert) {
extra_size = btrfs_extent_inline_ref_size(want);
path->search_for_extension = 1;
path->keep_locks = 1;
} else
extra_size = -1;
@ -996,6 +997,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
out:
if (insert) {
path->keep_locks = 0;
path->search_for_extension = 0;
btrfs_unlock_up_safe(path, 1);
}
return err;

View File

@ -1016,8 +1016,10 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
path->search_for_extension = 1;
ret = btrfs_search_slot(trans, root, &file_key, path,
csum_size, 1);
path->search_for_extension = 0;
if (ret < 0)
goto out;

View File

@ -9390,7 +9390,8 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
* some fairly slow code that needs optimization. This walks the list
* of all the inodes with pending delalloc and forces them to disk.
*/
static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot)
static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot,
bool in_reclaim_context)
{
struct btrfs_inode *binode;
struct inode *inode;
@ -9411,6 +9412,11 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
list_move_tail(&binode->delalloc_inodes,
&root->delalloc_inodes);
if (in_reclaim_context &&
test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
continue;
inode = igrab(&binode->vfs_inode);
if (!inode) {
cond_resched_lock(&root->delalloc_lock);
@ -9464,10 +9470,11 @@ int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
return -EROFS;
return start_delalloc_inodes(root, &nr, true);
return start_delalloc_inodes(root, &nr, true, false);
}
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
bool in_reclaim_context)
{
struct btrfs_root *root;
struct list_head splice;
@ -9490,7 +9497,7 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
&fs_info->delalloc_roots);
spin_unlock(&fs_info->delalloc_root_lock);
ret = start_delalloc_inodes(root, &nr, false);
ret = start_delalloc_inodes(root, &nr, false, in_reclaim_context);
btrfs_put_root(root);
if (ret < 0)
goto out;

View File

@ -4951,7 +4951,7 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_SYNC: {
int ret;
ret = btrfs_start_delalloc_roots(fs_info, U64_MAX);
ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false);
if (ret)
return ret;
ret = btrfs_sync_fs(inode->i_sb, 1);

View File

@ -3190,6 +3190,12 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
return ret;
}
static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
{
return btrfs_fs_closing(fs_info) ||
test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
}
static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
{
struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
@ -3198,6 +3204,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
struct btrfs_trans_handle *trans = NULL;
int err = -ENOMEM;
int ret = 0;
bool stopped = false;
path = btrfs_alloc_path();
if (!path)
@ -3210,7 +3217,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
path->skip_locking = 1;
err = 0;
while (!err && !btrfs_fs_closing(fs_info)) {
while (!err && !(stopped = rescan_should_stop(fs_info))) {
trans = btrfs_start_transaction(fs_info->fs_root, 0);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
@ -3253,7 +3260,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
}
mutex_lock(&fs_info->qgroup_rescan_lock);
if (!btrfs_fs_closing(fs_info))
if (!stopped)
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
if (trans) {
ret = update_qgroup_status_item(trans);
@ -3272,7 +3279,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
btrfs_end_transaction(trans);
if (btrfs_fs_closing(fs_info)) {
if (stopped) {
btrfs_info(fs_info, "qgroup scan paused");
} else if (err >= 0) {
btrfs_info(fs_info, "qgroup scan completed%s",
@ -3530,16 +3537,6 @@ static int try_flush_qgroup(struct btrfs_root *root)
int ret;
bool can_commit = true;
/*
* We don't want to run flush again and again, so if there is a running
* one, we won't try to start a new flush, but exit directly.
*/
if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
wait_event(root->qgroup_flush_wait,
!test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
return 0;
}
/*
* If current process holds a transaction, we shouldn't flush, as we
* assume all space reservation happens before a transaction handle is
@ -3554,6 +3551,26 @@ static int try_flush_qgroup(struct btrfs_root *root)
current->journal_info != BTRFS_SEND_TRANS_STUB)
can_commit = false;
/*
* We don't want to run flush again and again, so if there is a running
* one, we won't try to start a new flush, but exit directly.
*/
if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
/*
* We are already holding a transaction, thus we can block other
* threads from flushing. So exit right now. This increases
* the chance of EDQUOT for heavy load and near limit cases.
* But we can argue that if we're already near limit, EDQUOT is
* unavoidable anyway.
*/
if (!can_commit)
return 0;
wait_event(root->qgroup_flush_wait,
!test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
return 0;
}
ret = btrfs_start_delalloc_snapshot(root);
if (ret < 0)
goto out;

View File

@ -89,6 +89,19 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
if (ret)
goto out_unlock;
/*
* After dirtying the page our caller will need to start a transaction,
* and if we are low on metadata free space, that can cause flushing of
* delalloc for all inodes in order to get metadata space released.
* However we are holding the range locked for the whole duration of
* the clone/dedupe operation, so we may deadlock if that happens and no
* other task releases enough space. So mark this inode as not being
* possible to flush to avoid such deadlock. We will clear that flag
* when we finish cloning all extents, since a transaction is started
* after finding each extent to clone.
*/
set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
if (comp_type == BTRFS_COMPRESS_NONE) {
char *map;
@ -549,6 +562,8 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
out:
btrfs_free_path(path);
kvfree(buf);
clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags);
return ret;
}

View File

@ -236,6 +236,7 @@ struct waiting_dir_move {
* after this directory is moved, we can try to rmdir the ino rmdir_ino.
*/
u64 rmdir_ino;
u64 rmdir_gen;
bool orphanized;
};
@ -316,7 +317,7 @@ static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
static struct waiting_dir_move *
get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
static int need_send_hole(struct send_ctx *sctx)
{
@ -2299,7 +2300,7 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
fs_path_reset(name);
if (is_waiting_for_rm(sctx, ino)) {
if (is_waiting_for_rm(sctx, ino, gen)) {
ret = gen_unique_name(sctx, ino, gen, name);
if (ret < 0)
goto out;
@ -2858,8 +2859,8 @@ static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
return ret;
}
static struct orphan_dir_info *
add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
u64 dir_ino, u64 dir_gen)
{
struct rb_node **p = &sctx->orphan_dirs.rb_node;
struct rb_node *parent = NULL;
@ -2868,20 +2869,23 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
while (*p) {
parent = *p;
entry = rb_entry(parent, struct orphan_dir_info, node);
if (dir_ino < entry->ino) {
if (dir_ino < entry->ino)
p = &(*p)->rb_left;
} else if (dir_ino > entry->ino) {
else if (dir_ino > entry->ino)
p = &(*p)->rb_right;
} else {
else if (dir_gen < entry->gen)
p = &(*p)->rb_left;
else if (dir_gen > entry->gen)
p = &(*p)->rb_right;
else
return entry;
}
}
odi = kmalloc(sizeof(*odi), GFP_KERNEL);
if (!odi)
return ERR_PTR(-ENOMEM);
odi->ino = dir_ino;
odi->gen = 0;
odi->gen = dir_gen;
odi->last_dir_index_offset = 0;
rb_link_node(&odi->node, parent, p);
@ -2889,8 +2893,8 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
return odi;
}
static struct orphan_dir_info *
get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
u64 dir_ino, u64 gen)
{
struct rb_node *n = sctx->orphan_dirs.rb_node;
struct orphan_dir_info *entry;
@ -2901,15 +2905,19 @@ get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
n = n->rb_left;
else if (dir_ino > entry->ino)
n = n->rb_right;
else if (gen < entry->gen)
n = n->rb_left;
else if (gen > entry->gen)
n = n->rb_right;
else
return entry;
}
return NULL;
}
static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
{
struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
return odi != NULL;
}
@ -2954,7 +2962,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
key.type = BTRFS_DIR_INDEX_KEY;
key.offset = 0;
odi = get_orphan_dir_info(sctx, dir);
odi = get_orphan_dir_info(sctx, dir, dir_gen);
if (odi)
key.offset = odi->last_dir_index_offset;
@ -2985,7 +2993,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
dm = get_waiting_dir_move(sctx, loc.objectid);
if (dm) {
odi = add_orphan_dir_info(sctx, dir);
odi = add_orphan_dir_info(sctx, dir, dir_gen);
if (IS_ERR(odi)) {
ret = PTR_ERR(odi);
goto out;
@ -2993,12 +3001,13 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
odi->gen = dir_gen;
odi->last_dir_index_offset = found_key.offset;
dm->rmdir_ino = dir;
dm->rmdir_gen = dir_gen;
ret = 0;
goto out;
}
if (loc.objectid > send_progress) {
odi = add_orphan_dir_info(sctx, dir);
odi = add_orphan_dir_info(sctx, dir, dir_gen);
if (IS_ERR(odi)) {
ret = PTR_ERR(odi);
goto out;
@ -3038,6 +3047,7 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
return -ENOMEM;
dm->ino = ino;
dm->rmdir_ino = 0;
dm->rmdir_gen = 0;
dm->orphanized = orphanized;
while (*p) {
@ -3183,7 +3193,7 @@ static int path_loop(struct send_ctx *sctx, struct fs_path *name,
while (ino != BTRFS_FIRST_FREE_OBJECTID) {
fs_path_reset(name);
if (is_waiting_for_rm(sctx, ino))
if (is_waiting_for_rm(sctx, ino, gen))
break;
if (is_waiting_for_move(sctx, ino)) {
if (*ancestor_ino == 0)
@ -3223,6 +3233,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
u64 parent_ino, parent_gen;
struct waiting_dir_move *dm = NULL;
u64 rmdir_ino = 0;
u64 rmdir_gen;
u64 ancestor;
bool is_orphan;
int ret;
@ -3237,6 +3248,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
dm = get_waiting_dir_move(sctx, pm->ino);
ASSERT(dm);
rmdir_ino = dm->rmdir_ino;
rmdir_gen = dm->rmdir_gen;
is_orphan = dm->orphanized;
free_waiting_dir_move(sctx, dm);
@ -3273,6 +3285,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
dm = get_waiting_dir_move(sctx, pm->ino);
ASSERT(dm);
dm->rmdir_ino = rmdir_ino;
dm->rmdir_gen = rmdir_gen;
}
goto out;
}
@ -3291,7 +3304,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
struct orphan_dir_info *odi;
u64 gen;
odi = get_orphan_dir_info(sctx, rmdir_ino);
odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
if (!odi) {
/* already deleted */
goto finish;

View File

@ -532,7 +532,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
loops = 0;
while ((delalloc_bytes || dio_bytes) && loops < 3) {
btrfs_start_delalloc_roots(fs_info, items);
btrfs_start_delalloc_roots(fs_info, items, true);
loops++;
if (wait_ordered && !trans) {

View File

@ -175,7 +175,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
btrfs_discard_stop(fs_info);
/* btrfs handle error by forcing the filesystem readonly */
sb->s_flags |= SB_RDONLY;
btrfs_set_sb_rdonly(sb);
btrfs_info(fs_info, "forced readonly");
/*
* Note that a running device replace operation is not canceled here
@ -1953,7 +1953,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
/* avoid complains from lockdep et al. */
up(&fs_info->uuid_tree_rescan_sem);
sb->s_flags |= SB_RDONLY;
btrfs_set_sb_rdonly(sb);
/*
* Setting SB_RDONLY will put the cleaner thread to
@ -1964,10 +1964,42 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
*/
btrfs_delete_unused_bgs(fs_info);
/*
* The cleaner task could be already running before we set the
* flag BTRFS_FS_STATE_RO (and SB_RDONLY in the superblock).
* We must make sure that after we finish the remount, i.e. after
* we call btrfs_commit_super(), the cleaner can no longer start
* a transaction - either because it was dropping a dead root,
* running delayed iputs or deleting an unused block group (the
* cleaner picked a block group from the list of unused block
* groups before we were able to in the previous call to
* btrfs_delete_unused_bgs()).
*/
wait_on_bit(&fs_info->flags, BTRFS_FS_CLEANER_RUNNING,
TASK_UNINTERRUPTIBLE);
/*
* We've set the superblock to RO mode, so we might have made
* the cleaner task sleep without running all pending delayed
* iputs. Go through all the delayed iputs here, so that if an
* unmount happens without remounting RW we don't end up at
* finishing close_ctree() with a non-empty list of delayed
* iputs.
*/
btrfs_run_delayed_iputs(fs_info);
btrfs_dev_replace_suspend_for_unmount(fs_info);
btrfs_scrub_cancel(fs_info);
btrfs_pause_balance(fs_info);
/*
* Pause the qgroup rescan worker if it is running. We don't want
* it to be still running after we are in RO mode, as after that,
* by the time we unmount, it might have left a transaction open,
* so we would leak the transaction and/or crash.
*/
btrfs_qgroup_wait_for_completion(fs_info, false);
ret = btrfs_commit_super(fs_info);
if (ret)
goto restore;
@ -2006,7 +2038,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
if (ret)
goto restore;
sb->s_flags &= ~SB_RDONLY;
btrfs_clear_sb_rdonly(sb);
set_bit(BTRFS_FS_OPEN, &fs_info->flags);
}
@ -2028,6 +2060,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
/* We've hit an error - don't reset SB_RDONLY */
if (sb_rdonly(sb))
old_flags |= SB_RDONLY;
if (!(old_flags & SB_RDONLY))
clear_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
sb->s_flags = old_flags;
fs_info->mount_opt = old_opts;
fs_info->compress_type = old_compress_type;

View File

@ -55,8 +55,14 @@ struct inode *btrfs_new_test_inode(void)
struct inode *inode;
inode = new_inode(test_mnt->mnt_sb);
if (inode)
inode_init_owner(inode, NULL, S_IFREG);
if (!inode)
return NULL;
inode->i_mode = S_IFREG;
BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
inode_init_owner(inode, NULL, S_IFREG);
return inode;
}

View File

@ -232,11 +232,6 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
return ret;
}
inode->i_mode = S_IFREG;
BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_std_err(TEST_ALLOC_FS_INFO);
@ -835,10 +830,6 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
return ret;
}
BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_std_err(TEST_ALLOC_FS_INFO);

View File

@ -2592,7 +2592,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
if (seeding_dev) {
sb->s_flags &= ~SB_RDONLY;
btrfs_clear_sb_rdonly(sb);
ret = btrfs_prepare_sprout(fs_info);
if (ret) {
btrfs_abort_transaction(trans, ret);
@ -2728,7 +2728,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
error_trans:
if (seeding_dev)
sb->s_flags |= SB_RDONLY;
btrfs_set_sb_rdonly(sb);
if (trans)
btrfs_end_transaction(trans);
error_free_zone:

View File

@ -1011,14 +1011,17 @@ static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
fdcount = do_poll(head, &table, end_time);
poll_freewait(&table);
if (!user_write_access_begin(ufds, nfds * sizeof(*ufds)))
goto out_fds;
for (walk = head; walk; walk = walk->next) {
struct pollfd *fds = walk->entries;
int j;
for (j = 0; j < walk->len; j++, ufds++)
if (__put_user(fds[j].revents, &ufds->revents))
goto out_fds;
for (j = walk->len; j; fds++, ufds++, j--)
unsafe_put_user(fds->revents, &ufds->revents, Efault);
}
user_write_access_end();
err = fdcount;
out_fds:
@ -1030,6 +1033,11 @@ static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
}
return err;
Efault:
user_write_access_end();
err = -EFAULT;
goto out_fds;
}
static long do_restart_poll(struct restart_block *restart_block)

View File

@ -186,12 +186,9 @@ extern int braille_register_console(struct console *, int index,
extern int braille_unregister_console(struct console *);
#ifdef CONFIG_TTY
extern void console_sysfs_notify(void);
extern void register_ttynull_console(void);
#else
static inline void console_sysfs_notify(void)
{ }
static inline void register_ttynull_console(void)
{ }
#endif
extern bool console_suspend_enabled;

View File

@ -1280,7 +1280,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 ece_support[0x1];
u8 reserved_at_a4[0x7];
u8 log_max_srq[0x5];
u8 reserved_at_b0[0x2];
u8 reserved_at_b0[0x1];
u8 uplink_follow[0x1];
u8 ts_cqe_to_dest_cqn[0x1];
u8 reserved_at_b3[0xd];

View File

@ -75,8 +75,9 @@ struct rtnl_link_stats {
*
* @rx_dropped: Number of packets received but not processed,
* e.g. due to lack of resources or unsupported protocol.
* For hardware interfaces this counter should not include packets
* dropped by the device which are counted separately in
* For hardware interfaces this counter may include packets discarded
* due to L2 address filtering but should not include packets dropped
* by the device due to buffer exhaustion which are counted separately in
* @rx_missed_errors (since procfs folds those two counters together).
*
* @tx_dropped: Number of packets dropped on their way to transmission,

View File

@ -1480,14 +1480,8 @@ void __init console_on_rootfs(void)
struct file *file = filp_open("/dev/console", O_RDWR, 0);
if (IS_ERR(file)) {
pr_err("Warning: unable to open an initial console. Fallback to ttynull.\n");
register_ttynull_console();
file = filp_open("/dev/console", O_RDWR, 0);
if (IS_ERR(file)) {
pr_err("Warning: Failed to add ttynull console. No stdin, stdout, and stderr for the init process!\n");
return;
}
pr_err("Warning: unable to open an initial console.\n");
return;
}
init_dup(file);
init_dup(file);

View File

@ -159,6 +159,7 @@ task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info)
}
/* set info->task and info->tid */
info->task = curr_task;
if (curr_tid == info->tid) {
curr_fd = info->fd;
} else {

View File

@ -284,7 +284,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
return 0;
out_free_newdev:
if (new_dev->reg_state == NETREG_UNINITIALIZED)
if (new_dev->reg_state == NETREG_UNINITIALIZED ||
new_dev->reg_state == NETREG_UNREGISTERED)
free_netdev(new_dev);
return err;
}

View File

@ -302,7 +302,7 @@ static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *
if (skb_is_gso(skb))
return ip_finish_output_gso(net, sk, skb, mtu);
if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
if (skb->len > mtu || IPCB(skb)->frag_max_size)
return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
return ip_finish_output2(net, sk, skb);

View File

@ -759,8 +759,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph,
0, 0, false)) {
df = tnl_params->frag_off;
if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
df |= (inner_iph->frag_off & htons(IP_DF));
if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, 0, 0, false)) {
ip_rt_put(rt);
goto tx_error;
}
@ -788,10 +791,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
ttl = ip4_dst_hoplimit(&rt->dst);
}
df = tnl_params->frag_off;
if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
df |= (inner_iph->frag_off&htons(IP_DF));
max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
if (max_headroom > dev->needed_headroom)

View File

@ -627,7 +627,7 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
for (i = NHA_GROUP_TYPE + 1; i < __NHA_MAX; ++i) {
if (!tb[i])
continue;
if (tb[NHA_FDB])
if (i == NHA_FDB)
continue;
NL_SET_ERR_MSG(extack,
"No other attributes can be set in nexthop groups");
@ -1459,8 +1459,10 @@ static struct nexthop *nexthop_create_group(struct net *net,
return nh;
out_no_nh:
for (; i >= 0; --i)
for (i--; i >= 0; --i) {
list_del(&nhg->nh_entries[i].nh_list);
nexthop_put(nhg->nh_entries[i].nh);
}
kfree(nhg->spare);
kfree(nhg);

View File

@ -1025,6 +1025,8 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
{
struct fib6_table *table = rt->fib6_table;
/* Flush all cached dst in exception table */
rt6_flush_exceptions(rt);
fib6_drop_pcpu_from(rt, table);
if (rt->nh && !list_empty(&rt->nh_list))
@ -1927,9 +1929,6 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
net->ipv6.rt6_stats->fib_rt_entries--;
net->ipv6.rt6_stats->fib_discarded_routes++;
/* Flush all cached dst in exception table */
rt6_flush_exceptions(rt);
/* Reset round-robin state, if necessary */
if (rcu_access_pointer(fn->rr_ptr) == rt)
fn->rr_ptr = NULL;

View File

@ -755,7 +755,7 @@ static void qrtr_ns_data_ready(struct sock *sk)
queue_work(qrtr_ns.workqueue, &qrtr_ns.work);
}
void qrtr_ns_init(void)
int qrtr_ns_init(void)
{
struct sockaddr_qrtr sq;
int ret;
@ -766,7 +766,7 @@ void qrtr_ns_init(void)
ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM,
PF_QIPCRTR, &qrtr_ns.sock);
if (ret < 0)
return;
return ret;
ret = kernel_getsockname(qrtr_ns.sock, (struct sockaddr *)&sq);
if (ret < 0) {
@ -797,12 +797,13 @@ void qrtr_ns_init(void)
if (ret < 0)
goto err_wq;
return;
return 0;
err_wq:
destroy_workqueue(qrtr_ns.workqueue);
err_sock:
sock_release(qrtr_ns.sock);
return ret;
}
EXPORT_SYMBOL_GPL(qrtr_ns_init);

View File

@ -1287,13 +1287,19 @@ static int __init qrtr_proto_init(void)
return rc;
rc = sock_register(&qrtr_family);
if (rc) {
proto_unregister(&qrtr_proto);
return rc;
}
if (rc)
goto err_proto;
qrtr_ns_init();
rc = qrtr_ns_init();
if (rc)
goto err_sock;
return 0;
err_sock:
sock_unregister(qrtr_family.family);
err_proto:
proto_unregister(&qrtr_proto);
return rc;
}
postcore_initcall(qrtr_proto_init);

View File

@ -29,7 +29,7 @@ void qrtr_endpoint_unregister(struct qrtr_endpoint *ep);
int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len);
void qrtr_ns_init(void);
int qrtr_ns_init(void);
void qrtr_ns_remove(void);

View File

@ -21,6 +21,7 @@ config CFG80211
tristate "cfg80211 - wireless configuration API"
depends on RFKILL || !RFKILL
select FW_LOADER
select CRC32
# may need to update this when certificates are changed and are
# using a different algorithm, though right now they shouldn't
# (this is here rather than below to allow it to be a module)

View File

@ -22,9 +22,9 @@ always-y += $(GCC_PLUGIN)
GCC_PLUGINS_DIR = $(shell $(CC) -print-file-name=plugin)
plugin_cxxflags = -Wp,-MMD,$(depfile) $(KBUILD_HOSTCXXFLAGS) -fPIC \
-I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++98 \
-I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++11 \
-fno-rtti -fno-exceptions -fasynchronous-unwind-tables \
-ggdb -Wno-narrowing -Wno-unused-variable -Wno-c++11-compat \
-ggdb -Wno-narrowing -Wno-unused-variable \
-Wno-format-diag
plugin_ldflags = -shared

View File

@ -2220,8 +2220,6 @@ static const struct snd_pci_quirk power_save_denylist[] = {
SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1581607 */
SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
SND_PCI_QUIRK(0x1558, 0x6504, "Clevo W65_67SB", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */

View File

@ -1070,6 +1070,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
static const struct hda_device_id snd_hda_id_conexant[] = {
HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto),

View File

@ -1733,7 +1733,7 @@ static void silent_stream_disable(struct hda_codec *codec,
per_pin->silent_stream = false;
unlock_out:
mutex_unlock(&spec->pcm_lock);
mutex_unlock(&per_pin->lock);
}
/* update ELD and jack state via audio component */

Some files were not shown because too many files have changed in this diff Show More