linux/net/bluetooth/rfcomm/sock.c

1078 lines
22 KiB
C
Raw Normal View History

/*
RFCOMM implementation for Linux Bluetooth stack (BlueZ).
Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/*
* RFCOMM sockets.
*/
#include <linux/export.h>
#include <linux/debugfs.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/rfcomm.h>
static const struct proto_ops rfcomm_sock_ops;
static struct bt_sock_list rfcomm_sk_list = {
.lock = __RW_LOCK_UNLOCKED(rfcomm_sk_list.lock)
};
static void rfcomm_sock_close(struct sock *sk);
static void rfcomm_sock_kill(struct sock *sk);
/* ---- DLC callbacks ----
*
* called under rfcomm_dlc_lock()
*/
static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
{
struct sock *sk = d->owner;
if (!sk)
return;
atomic_add(skb->len, &sk->sk_rmem_alloc);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, skb->len);
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
rfcomm_dlc_throttle(d);
}
static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
{
struct sock *sk = d->owner, *parent;
Bluetooth: Fix inconsistent lock state with RFCOMM When receiving a rfcomm connection with the old dund deamon a inconsistent lock state happens. That's because interrupts were already disabled by l2cap_conn_start() when rfcomm_sk_state_change() try to lock the spin_lock. As result we may have a inconsistent lock state for l2cap_conn_start() after rfcomm_sk_state_change() calls bh_lock_sock() and disable interrupts as well. [ 2833.151999] [ 2833.151999] ================================= [ 2833.151999] [ INFO: inconsistent lock state ] [ 2833.151999] 2.6.36-rc3 #2 [ 2833.151999] --------------------------------- [ 2833.151999] inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage. [ 2833.151999] krfcommd/2306 [HC0[0]:SC0[0]:HE1:SE1] takes: [ 2833.151999] (slock-AF_BLUETOOTH){+.?...}, at: [<ffffffffa00bcb56>] rfcomm_sk_state_change+0x46/0x170 [rfcomm] [ 2833.151999] {IN-SOFTIRQ-W} state was registered at: [ 2833.151999] [<ffffffff81094346>] __lock_acquire+0x5b6/0x1560 [ 2833.151999] [<ffffffff8109534a>] lock_acquire+0x5a/0x70 [ 2833.151999] [<ffffffff81392b6c>] _raw_spin_lock+0x2c/0x40 [ 2833.151999] [<ffffffffa00a5092>] l2cap_conn_start+0x92/0x640 [l2cap] [ 2833.151999] [<ffffffffa00a6a3f>] l2cap_sig_channel+0x6bf/0x1320 [l2cap] [ 2833.151999] [<ffffffffa00a9173>] l2cap_recv_frame+0x133/0x770 [l2cap] [ 2833.151999] [<ffffffffa00a997b>] l2cap_recv_acldata+0x1cb/0x390 [l2cap] [ 2833.151999] [<ffffffffa000db4b>] hci_rx_task+0x2ab/0x450 [bluetooth] [ 2833.151999] [<ffffffff8106b22b>] tasklet_action+0xcb/0xe0 [ 2833.151999] [<ffffffff8106b91e>] __do_softirq+0xae/0x150 [ 2833.151999] [<ffffffff8102bc0c>] call_softirq+0x1c/0x30 [ 2833.151999] [<ffffffff8102ddb5>] do_softirq+0x75/0xb0 [ 2833.151999] [<ffffffff8106b56d>] irq_exit+0x8d/0xa0 [ 2833.151999] [<ffffffff8104484b>] smp_apic_timer_interrupt+0x6b/0xa0 [ 2833.151999] [<ffffffff8102b6d3>] apic_timer_interrupt+0x13/0x20 [ 2833.151999] [<ffffffff81029dfa>] cpu_idle+0x5a/0xb0 [ 2833.151999] [<ffffffff81381ded>] rest_init+0xad/0xc0 [ 2833.151999] [<ffffffff817ebc4d>] start_kernel+0x2dd/0x2e8 [ 2833.151999] [<ffffffff817eb2e6>] x86_64_start_reservations+0xf6/0xfa [ 2833.151999] [<ffffffff817eb3ce>] x86_64_start_kernel+0xe4/0xeb [ 2833.151999] irq event stamp: 731 [ 2833.151999] hardirqs last enabled at (731): [<ffffffff8106b762>] local_bh_enable_ip+0x82/0xe0 [ 2833.151999] hardirqs last disabled at (729): [<ffffffff8106b93e>] __do_softirq+0xce/0x150 [ 2833.151999] softirqs last enabled at (730): [<ffffffff8106b96e>] __do_softirq+0xfe/0x150 [ 2833.151999] softirqs last disabled at (711): [<ffffffff8102bc0c>] call_softirq+0x1c/0x30 [ 2833.151999] [ 2833.151999] other info that might help us debug this: [ 2833.151999] 2 locks held by krfcommd/2306: [ 2833.151999] #0: (rfcomm_mutex){+.+.+.}, at: [<ffffffffa00bb744>] rfcomm_run+0x174/0xb20 [rfcomm] [ 2833.151999] #1: (&(&d->lock)->rlock){+.+...}, at: [<ffffffffa00b9223>] rfcomm_dlc_accept+0x53/0x100 [rfcomm] [ 2833.151999] [ 2833.151999] stack backtrace: [ 2833.151999] Pid: 2306, comm: krfcommd Tainted: G W 2.6.36-rc3 #2 [ 2833.151999] Call Trace: [ 2833.151999] [<ffffffff810928e1>] print_usage_bug+0x171/0x180 [ 2833.151999] [<ffffffff810936c3>] mark_lock+0x333/0x400 [ 2833.151999] [<ffffffff810943ca>] __lock_acquire+0x63a/0x1560 [ 2833.151999] [<ffffffff810948b5>] ? __lock_acquire+0xb25/0x1560 [ 2833.151999] [<ffffffff8109534a>] lock_acquire+0x5a/0x70 [ 2833.151999] [<ffffffffa00bcb56>] ? rfcomm_sk_state_change+0x46/0x170 [rfcomm] [ 2833.151999] [<ffffffff81392b6c>] _raw_spin_lock+0x2c/0x40 [ 2833.151999] [<ffffffffa00bcb56>] ? rfcomm_sk_state_change+0x46/0x170 [rfcomm] [ 2833.151999] [<ffffffffa00bcb56>] rfcomm_sk_state_change+0x46/0x170 [rfcomm] [ 2833.151999] [<ffffffffa00b9239>] rfcomm_dlc_accept+0x69/0x100 [rfcomm] [ 2833.151999] [<ffffffffa00b9a49>] rfcomm_check_accept+0x59/0xd0 [rfcomm] [ 2833.151999] [<ffffffffa00bacab>] rfcomm_recv_frame+0x9fb/0x1320 [rfcomm] [ 2833.151999] [<ffffffff813932bb>] ? _raw_spin_unlock_irqrestore+0x3b/0x60 [ 2833.151999] [<ffffffff81093acd>] ? trace_hardirqs_on_caller+0x13d/0x180 [ 2833.151999] [<ffffffff81093b1d>] ? trace_hardirqs_on+0xd/0x10 [ 2833.151999] [<ffffffffa00bb7f1>] rfcomm_run+0x221/0xb20 [rfcomm] [ 2833.151999] [<ffffffff813905e7>] ? schedule+0x287/0x780 [ 2833.151999] [<ffffffffa00bb5d0>] ? rfcomm_run+0x0/0xb20 [rfcomm] [ 2833.151999] [<ffffffff81081026>] kthread+0x96/0xa0 [ 2833.151999] [<ffffffff8102bb14>] kernel_thread_helper+0x4/0x10 [ 2833.151999] [<ffffffff813936bc>] ? restore_args+0x0/0x30 [ 2833.151999] [<ffffffff81080f90>] ? kthread+0x0/0xa0 [ 2833.151999] [<ffffffff8102bb10>] ? kernel_thread_helper+0x0/0x10 Signed-off-by: Gustavo F. Padovan <padovan@profusion.mobi>
2010-08-14 11:48:07 +08:00
unsigned long flags;
if (!sk)
return;
BT_DBG("dlc %p state %ld err %d", d, d->state, err);
Bluetooth: Fix inconsistent lock state with RFCOMM When receiving a rfcomm connection with the old dund deamon a inconsistent lock state happens. That's because interrupts were already disabled by l2cap_conn_start() when rfcomm_sk_state_change() try to lock the spin_lock. As result we may have a inconsistent lock state for l2cap_conn_start() after rfcomm_sk_state_change() calls bh_lock_sock() and disable interrupts as well. [ 2833.151999] [ 2833.151999] ================================= [ 2833.151999] [ INFO: inconsistent lock state ] [ 2833.151999] 2.6.36-rc3 #2 [ 2833.151999] --------------------------------- [ 2833.151999] inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage. [ 2833.151999] krfcommd/2306 [HC0[0]:SC0[0]:HE1:SE1] takes: [ 2833.151999] (slock-AF_BLUETOOTH){+.?...}, at: [<ffffffffa00bcb56>] rfcomm_sk_state_change+0x46/0x170 [rfcomm] [ 2833.151999] {IN-SOFTIRQ-W} state was registered at: [ 2833.151999] [<ffffffff81094346>] __lock_acquire+0x5b6/0x1560 [ 2833.151999] [<ffffffff8109534a>] lock_acquire+0x5a/0x70 [ 2833.151999] [<ffffffff81392b6c>] _raw_spin_lock+0x2c/0x40 [ 2833.151999] [<ffffffffa00a5092>] l2cap_conn_start+0x92/0x640 [l2cap] [ 2833.151999] [<ffffffffa00a6a3f>] l2cap_sig_channel+0x6bf/0x1320 [l2cap] [ 2833.151999] [<ffffffffa00a9173>] l2cap_recv_frame+0x133/0x770 [l2cap] [ 2833.151999] [<ffffffffa00a997b>] l2cap_recv_acldata+0x1cb/0x390 [l2cap] [ 2833.151999] [<ffffffffa000db4b>] hci_rx_task+0x2ab/0x450 [bluetooth] [ 2833.151999] [<ffffffff8106b22b>] tasklet_action+0xcb/0xe0 [ 2833.151999] [<ffffffff8106b91e>] __do_softirq+0xae/0x150 [ 2833.151999] [<ffffffff8102bc0c>] call_softirq+0x1c/0x30 [ 2833.151999] [<ffffffff8102ddb5>] do_softirq+0x75/0xb0 [ 2833.151999] [<ffffffff8106b56d>] irq_exit+0x8d/0xa0 [ 2833.151999] [<ffffffff8104484b>] smp_apic_timer_interrupt+0x6b/0xa0 [ 2833.151999] [<ffffffff8102b6d3>] apic_timer_interrupt+0x13/0x20 [ 2833.151999] [<ffffffff81029dfa>] cpu_idle+0x5a/0xb0 [ 2833.151999] [<ffffffff81381ded>] rest_init+0xad/0xc0 [ 2833.151999] [<ffffffff817ebc4d>] start_kernel+0x2dd/0x2e8 [ 2833.151999] [<ffffffff817eb2e6>] x86_64_start_reservations+0xf6/0xfa [ 2833.151999] [<ffffffff817eb3ce>] x86_64_start_kernel+0xe4/0xeb [ 2833.151999] irq event stamp: 731 [ 2833.151999] hardirqs last enabled at (731): [<ffffffff8106b762>] local_bh_enable_ip+0x82/0xe0 [ 2833.151999] hardirqs last disabled at (729): [<ffffffff8106b93e>] __do_softirq+0xce/0x150 [ 2833.151999] softirqs last enabled at (730): [<ffffffff8106b96e>] __do_softirq+0xfe/0x150 [ 2833.151999] softirqs last disabled at (711): [<ffffffff8102bc0c>] call_softirq+0x1c/0x30 [ 2833.151999] [ 2833.151999] other info that might help us debug this: [ 2833.151999] 2 locks held by krfcommd/2306: [ 2833.151999] #0: (rfcomm_mutex){+.+.+.}, at: [<ffffffffa00bb744>] rfcomm_run+0x174/0xb20 [rfcomm] [ 2833.151999] #1: (&(&d->lock)->rlock){+.+...}, at: [<ffffffffa00b9223>] rfcomm_dlc_accept+0x53/0x100 [rfcomm] [ 2833.151999] [ 2833.151999] stack backtrace: [ 2833.151999] Pid: 2306, comm: krfcommd Tainted: G W 2.6.36-rc3 #2 [ 2833.151999] Call Trace: [ 2833.151999] [<ffffffff810928e1>] print_usage_bug+0x171/0x180 [ 2833.151999] [<ffffffff810936c3>] mark_lock+0x333/0x400 [ 2833.151999] [<ffffffff810943ca>] __lock_acquire+0x63a/0x1560 [ 2833.151999] [<ffffffff810948b5>] ? __lock_acquire+0xb25/0x1560 [ 2833.151999] [<ffffffff8109534a>] lock_acquire+0x5a/0x70 [ 2833.151999] [<ffffffffa00bcb56>] ? rfcomm_sk_state_change+0x46/0x170 [rfcomm] [ 2833.151999] [<ffffffff81392b6c>] _raw_spin_lock+0x2c/0x40 [ 2833.151999] [<ffffffffa00bcb56>] ? rfcomm_sk_state_change+0x46/0x170 [rfcomm] [ 2833.151999] [<ffffffffa00bcb56>] rfcomm_sk_state_change+0x46/0x170 [rfcomm] [ 2833.151999] [<ffffffffa00b9239>] rfcomm_dlc_accept+0x69/0x100 [rfcomm] [ 2833.151999] [<ffffffffa00b9a49>] rfcomm_check_accept+0x59/0xd0 [rfcomm] [ 2833.151999] [<ffffffffa00bacab>] rfcomm_recv_frame+0x9fb/0x1320 [rfcomm] [ 2833.151999] [<ffffffff813932bb>] ? _raw_spin_unlock_irqrestore+0x3b/0x60 [ 2833.151999] [<ffffffff81093acd>] ? trace_hardirqs_on_caller+0x13d/0x180 [ 2833.151999] [<ffffffff81093b1d>] ? trace_hardirqs_on+0xd/0x10 [ 2833.151999] [<ffffffffa00bb7f1>] rfcomm_run+0x221/0xb20 [rfcomm] [ 2833.151999] [<ffffffff813905e7>] ? schedule+0x287/0x780 [ 2833.151999] [<ffffffffa00bb5d0>] ? rfcomm_run+0x0/0xb20 [rfcomm] [ 2833.151999] [<ffffffff81081026>] kthread+0x96/0xa0 [ 2833.151999] [<ffffffff8102bb14>] kernel_thread_helper+0x4/0x10 [ 2833.151999] [<ffffffff813936bc>] ? restore_args+0x0/0x30 [ 2833.151999] [<ffffffff81080f90>] ? kthread+0x0/0xa0 [ 2833.151999] [<ffffffff8102bb10>] ? kernel_thread_helper+0x0/0x10 Signed-off-by: Gustavo F. Padovan <padovan@profusion.mobi>
2010-08-14 11:48:07 +08:00
local_irq_save(flags);
bh_lock_sock(sk);
if (err)
sk->sk_err = err;
sk->sk_state = d->state;
parent = bt_sk(sk)->parent;
if (parent) {
if (d->state == BT_CLOSED) {
sock_set_flag(sk, SOCK_ZAPPED);
bt_accept_unlink(sk);
}
parent->sk_data_ready(parent, 0);
} else {
if (d->state == BT_CONNECTED)
rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL);
sk->sk_state_change(sk);
}
bh_unlock_sock(sk);
Bluetooth: Fix inconsistent lock state with RFCOMM When receiving a rfcomm connection with the old dund deamon a inconsistent lock state happens. That's because interrupts were already disabled by l2cap_conn_start() when rfcomm_sk_state_change() try to lock the spin_lock. As result we may have a inconsistent lock state for l2cap_conn_start() after rfcomm_sk_state_change() calls bh_lock_sock() and disable interrupts as well. [ 2833.151999] [ 2833.151999] ================================= [ 2833.151999] [ INFO: inconsistent lock state ] [ 2833.151999] 2.6.36-rc3 #2 [ 2833.151999] --------------------------------- [ 2833.151999] inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage. [ 2833.151999] krfcommd/2306 [HC0[0]:SC0[0]:HE1:SE1] takes: [ 2833.151999] (slock-AF_BLUETOOTH){+.?...}, at: [<ffffffffa00bcb56>] rfcomm_sk_state_change+0x46/0x170 [rfcomm] [ 2833.151999] {IN-SOFTIRQ-W} state was registered at: [ 2833.151999] [<ffffffff81094346>] __lock_acquire+0x5b6/0x1560 [ 2833.151999] [<ffffffff8109534a>] lock_acquire+0x5a/0x70 [ 2833.151999] [<ffffffff81392b6c>] _raw_spin_lock+0x2c/0x40 [ 2833.151999] [<ffffffffa00a5092>] l2cap_conn_start+0x92/0x640 [l2cap] [ 2833.151999] [<ffffffffa00a6a3f>] l2cap_sig_channel+0x6bf/0x1320 [l2cap] [ 2833.151999] [<ffffffffa00a9173>] l2cap_recv_frame+0x133/0x770 [l2cap] [ 2833.151999] [<ffffffffa00a997b>] l2cap_recv_acldata+0x1cb/0x390 [l2cap] [ 2833.151999] [<ffffffffa000db4b>] hci_rx_task+0x2ab/0x450 [bluetooth] [ 2833.151999] [<ffffffff8106b22b>] tasklet_action+0xcb/0xe0 [ 2833.151999] [<ffffffff8106b91e>] __do_softirq+0xae/0x150 [ 2833.151999] [<ffffffff8102bc0c>] call_softirq+0x1c/0x30 [ 2833.151999] [<ffffffff8102ddb5>] do_softirq+0x75/0xb0 [ 2833.151999] [<ffffffff8106b56d>] irq_exit+0x8d/0xa0 [ 2833.151999] [<ffffffff8104484b>] smp_apic_timer_interrupt+0x6b/0xa0 [ 2833.151999] [<ffffffff8102b6d3>] apic_timer_interrupt+0x13/0x20 [ 2833.151999] [<ffffffff81029dfa>] cpu_idle+0x5a/0xb0 [ 2833.151999] [<ffffffff81381ded>] rest_init+0xad/0xc0 [ 2833.151999] [<ffffffff817ebc4d>] start_kernel+0x2dd/0x2e8 [ 2833.151999] [<ffffffff817eb2e6>] x86_64_start_reservations+0xf6/0xfa [ 2833.151999] [<ffffffff817eb3ce>] x86_64_start_kernel+0xe4/0xeb [ 2833.151999] irq event stamp: 731 [ 2833.151999] hardirqs last enabled at (731): [<ffffffff8106b762>] local_bh_enable_ip+0x82/0xe0 [ 2833.151999] hardirqs last disabled at (729): [<ffffffff8106b93e>] __do_softirq+0xce/0x150 [ 2833.151999] softirqs last enabled at (730): [<ffffffff8106b96e>] __do_softirq+0xfe/0x150 [ 2833.151999] softirqs last disabled at (711): [<ffffffff8102bc0c>] call_softirq+0x1c/0x30 [ 2833.151999] [ 2833.151999] other info that might help us debug this: [ 2833.151999] 2 locks held by krfcommd/2306: [ 2833.151999] #0: (rfcomm_mutex){+.+.+.}, at: [<ffffffffa00bb744>] rfcomm_run+0x174/0xb20 [rfcomm] [ 2833.151999] #1: (&(&d->lock)->rlock){+.+...}, at: [<ffffffffa00b9223>] rfcomm_dlc_accept+0x53/0x100 [rfcomm] [ 2833.151999] [ 2833.151999] stack backtrace: [ 2833.151999] Pid: 2306, comm: krfcommd Tainted: G W 2.6.36-rc3 #2 [ 2833.151999] Call Trace: [ 2833.151999] [<ffffffff810928e1>] print_usage_bug+0x171/0x180 [ 2833.151999] [<ffffffff810936c3>] mark_lock+0x333/0x400 [ 2833.151999] [<ffffffff810943ca>] __lock_acquire+0x63a/0x1560 [ 2833.151999] [<ffffffff810948b5>] ? __lock_acquire+0xb25/0x1560 [ 2833.151999] [<ffffffff8109534a>] lock_acquire+0x5a/0x70 [ 2833.151999] [<ffffffffa00bcb56>] ? rfcomm_sk_state_change+0x46/0x170 [rfcomm] [ 2833.151999] [<ffffffff81392b6c>] _raw_spin_lock+0x2c/0x40 [ 2833.151999] [<ffffffffa00bcb56>] ? rfcomm_sk_state_change+0x46/0x170 [rfcomm] [ 2833.151999] [<ffffffffa00bcb56>] rfcomm_sk_state_change+0x46/0x170 [rfcomm] [ 2833.151999] [<ffffffffa00b9239>] rfcomm_dlc_accept+0x69/0x100 [rfcomm] [ 2833.151999] [<ffffffffa00b9a49>] rfcomm_check_accept+0x59/0xd0 [rfcomm] [ 2833.151999] [<ffffffffa00bacab>] rfcomm_recv_frame+0x9fb/0x1320 [rfcomm] [ 2833.151999] [<ffffffff813932bb>] ? _raw_spin_unlock_irqrestore+0x3b/0x60 [ 2833.151999] [<ffffffff81093acd>] ? trace_hardirqs_on_caller+0x13d/0x180 [ 2833.151999] [<ffffffff81093b1d>] ? trace_hardirqs_on+0xd/0x10 [ 2833.151999] [<ffffffffa00bb7f1>] rfcomm_run+0x221/0xb20 [rfcomm] [ 2833.151999] [<ffffffff813905e7>] ? schedule+0x287/0x780 [ 2833.151999] [<ffffffffa00bb5d0>] ? rfcomm_run+0x0/0xb20 [rfcomm] [ 2833.151999] [<ffffffff81081026>] kthread+0x96/0xa0 [ 2833.151999] [<ffffffff8102bb14>] kernel_thread_helper+0x4/0x10 [ 2833.151999] [<ffffffff813936bc>] ? restore_args+0x0/0x30 [ 2833.151999] [<ffffffff81080f90>] ? kthread+0x0/0xa0 [ 2833.151999] [<ffffffff8102bb10>] ? kernel_thread_helper+0x0/0x10 Signed-off-by: Gustavo F. Padovan <padovan@profusion.mobi>
2010-08-14 11:48:07 +08:00
local_irq_restore(flags);
if (parent && sock_flag(sk, SOCK_ZAPPED)) {
/* We have to drop DLC lock here, otherwise
* rfcomm_sock_destruct() will dead lock. */
rfcomm_dlc_unlock(d);
rfcomm_sock_kill(sk);
rfcomm_dlc_lock(d);
}
}
/* ---- Socket functions ---- */
static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
{
struct sock *sk = NULL;
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
sk_for_each(sk, &rfcomm_sk_list.head) {
if (rfcomm_pi(sk)->channel == channel &&
!bacmp(&bt_sk(sk)->src, src))
break;
}
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
return sk ? sk : NULL;
}
/* Find socket with channel and source bdaddr.
* Returns closest match.
*/
static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
read_lock(&rfcomm_sk_list.lock);
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
sk_for_each(sk, &rfcomm_sk_list.head) {
if (state && sk->sk_state != state)
continue;
if (rfcomm_pi(sk)->channel == channel) {
/* Exact match. */
if (!bacmp(&bt_sk(sk)->src, src))
break;
/* Closest match */
if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
sk1 = sk;
}
}
read_unlock(&rfcomm_sk_list.lock);
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
return sk ? sk : sk1;
}
static void rfcomm_sock_destruct(struct sock *sk)
{
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
BT_DBG("sk %p dlc %p", sk, d);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
rfcomm_dlc_lock(d);
rfcomm_pi(sk)->dlc = NULL;
/* Detach DLC if it's owned by this socket */
if (d->owner == sk)
d->owner = NULL;
rfcomm_dlc_unlock(d);
rfcomm_dlc_put(d);
}
static void rfcomm_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
BT_DBG("parent %p", parent);
/* Close not yet accepted dlcs */
while ((sk = bt_accept_dequeue(parent, NULL))) {
rfcomm_sock_close(sk);
rfcomm_sock_kill(sk);
}
parent->sk_state = BT_CLOSED;
sock_set_flag(parent, SOCK_ZAPPED);
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
static void rfcomm_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
/* Kill poor orphan */
bt_sock_unlink(&rfcomm_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
static void __rfcomm_sock_close(struct sock *sk)
{
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
switch (sk->sk_state) {
case BT_LISTEN:
rfcomm_sock_cleanup_listen(sk);
break;
case BT_CONNECT:
case BT_CONNECT2:
case BT_CONFIG:
case BT_CONNECTED:
rfcomm_dlc_close(d, 0);
default:
sock_set_flag(sk, SOCK_ZAPPED);
break;
}
}
/* Close socket.
* Must be called on unlocked socket.
*/
static void rfcomm_sock_close(struct sock *sk)
{
lock_sock(sk);
__rfcomm_sock_close(sk);
release_sock(sk);
}
static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
{
struct rfcomm_pinfo *pi = rfcomm_pi(sk);
BT_DBG("sk %p", sk);
if (parent) {
sk->sk_type = parent->sk_type;
pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP,
&bt_sk(parent)->flags);
pi->sec_level = rfcomm_pi(parent)->sec_level;
pi->role_switch = rfcomm_pi(parent)->role_switch;
security_sk_clone(parent, sk);
} else {
pi->dlc->defer_setup = 0;
pi->sec_level = BT_SECURITY_LOW;
pi->role_switch = 0;
}
pi->dlc->sec_level = pi->sec_level;
pi->dlc->role_switch = pi->role_switch;
}
static struct proto rfcomm_proto = {
.name = "RFCOMM",
.owner = THIS_MODULE,
.obj_size = sizeof(struct rfcomm_pinfo)
};
static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
{
struct rfcomm_dlc *d;
struct sock *sk;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto);
if (!sk)
return NULL;
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
d = rfcomm_dlc_alloc(prio);
if (!d) {
sk_free(sk);
return NULL;
}
d->data_ready = rfcomm_sk_data_ready;
d->state_change = rfcomm_sk_state_change;
rfcomm_pi(sk)->dlc = d;
d->owner = sk;
sk->sk_destruct = rfcomm_sock_destruct;
sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT;
sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
bt_sock_link(&rfcomm_sk_list, sk);
BT_DBG("sk %p", sk);
return sk;
}
static int rfcomm_sock_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_STREAM && sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
sock->ops = &rfcomm_sock_ops;
sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC);
if (!sk)
return -ENOMEM;
rfcomm_sock_init(sk, NULL);
return 0;
}
static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
write_lock(&rfcomm_sk_list.lock);
if (sa->rc_channel && __rfcomm_get_sock_by_addr(sa->rc_channel, &sa->rc_bdaddr)) {
err = -EADDRINUSE;
} else {
/* Save source address */
bacpy(&bt_sk(sk)->src, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
sk->sk_state = BT_BOUND;
}
write_unlock(&rfcomm_sk_list.lock);
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
int err = 0;
BT_DBG("sk %p", sk);
if (alen < sizeof(struct sockaddr_rc) ||
addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
sk->sk_state = BT_CONNECT;
bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
d->sec_level = rfcomm_pi(sk)->sec_level;
d->role_switch = rfcomm_pi(sk)->role_switch;
err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel);
if (!err)
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
if (!rfcomm_pi(sk)->channel) {
bdaddr_t *src = &bt_sk(sk)->src;
u8 channel;
err = -EINVAL;
write_lock(&rfcomm_sk_list.lock);
for (channel = 1; channel < 31; channel++)
if (!__rfcomm_get_sock_by_addr(channel, src)) {
rfcomm_pi(sk)->channel = channel;
err = 0;
break;
}
write_unlock(&rfcomm_sk_list.lock);
if (err < 0)
goto done;
}
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *nsk;
long timeo;
int err = 0;
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
nsk = bt_accept_dequeue(sk, newsock);
if (nsk)
break;
if (!timeo) {
err = -EAGAIN;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
BT_DBG("new socket %p", nsk);
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
BT_DBG("sock %p, sk %p", sock, sk);
memset(sa, 0, sizeof(*sa));
sa->rc_family = AF_BLUETOOTH;
sa->rc_channel = rfcomm_pi(sk)->channel;
if (peer)
bacpy(&sa->rc_bdaddr, &bt_sk(sk)->dst);
else
bacpy(&sa->rc_bdaddr, &bt_sk(sk)->src);
*len = sizeof(struct sockaddr_rc);
return 0;
}
static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
struct sk_buff *skb;
int sent;
if (test_bit(RFCOMM_DEFER_SETUP, &d->flags))
return -ENOTCONN;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
if (sk->sk_shutdown & SEND_SHUTDOWN)
return -EPIPE;
BT_DBG("sock %p, sk %p", sock, sk);
lock_sock(sk);
sent = bt_sock_wait_ready(sk, msg->msg_flags);
if (sent)
goto done;
while (len) {
size_t size = min_t(size_t, len, d->mtu);
int err;
skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb) {
if (sent == 0)
sent = err;
break;
}
skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE);
err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
if (err) {
kfree_skb(skb);
if (sent == 0)
sent = err;
break;
}
skb->priority = sk->sk_priority;
err = rfcomm_dlc_send(d, skb);
if (err < 0) {
kfree_skb(skb);
if (sent == 0)
sent = err;
break;
}
sent += size;
len -= size;
}
done:
release_sock(sk);
return sent;
}
static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
int len;
if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
rfcomm_dlc_accept(d);
msg->msg_namelen = 0;
return 0;
}
len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags);
lock_sock(sk);
if (!(flags & MSG_PEEK) && len > 0)
atomic_sub(len, &sk->sk_rmem_alloc);
if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2))
rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc);
release_sock(sk);
return len;
}
static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0;
u32 opt;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
case RFCOMM_LM:
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
if (opt & RFCOMM_LM_AUTH)
rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW;
if (opt & RFCOMM_LM_ENCRYPT)
rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
if (opt & RFCOMM_LM_SECURE)
rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH;
rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER);
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct bt_security sec;
int err = 0;
size_t len;
u32 opt;
BT_DBG("sk %p", sk);
if (level == SOL_RFCOMM)
return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen);
if (level != SOL_BLUETOOTH)
return -ENOPROTOOPT;
lock_sock(sk);
switch (optname) {
case BT_SECURITY:
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
break;
}
sec.level = BT_SECURITY_LOW;
len = min_t(unsigned int, sizeof(sec), optlen);
if (copy_from_user((char *) &sec, optval, len)) {
err = -EFAULT;
break;
}
if (sec.level > BT_SECURITY_HIGH) {
err = -EINVAL;
break;
}
rfcomm_pi(sk)->sec_level = sec.level;
break;
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
else
clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct rfcomm_conninfo cinfo;
struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
int len, err = 0;
u32 opt;
BT_DBG("sk %p", sk);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case RFCOMM_LM:
switch (rfcomm_pi(sk)->sec_level) {
case BT_SECURITY_LOW:
opt = RFCOMM_LM_AUTH;
break;
case BT_SECURITY_MEDIUM:
opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT;
break;
case BT_SECURITY_HIGH:
opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT |
RFCOMM_LM_SECURE;
break;
default:
opt = 0;
break;
}
if (rfcomm_pi(sk)->role_switch)
opt |= RFCOMM_LM_MASTER;
if (put_user(opt, (u32 __user *) optval))
err = -EFAULT;
break;
case RFCOMM_CONNINFO:
if (sk->sk_state != BT_CONNECTED &&
!rfcomm_pi(sk)->dlc->defer_setup) {
err = -ENOTCONN;
break;
}
memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = conn->hcon->handle;
memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct bt_security sec;
int len, err = 0;
BT_DBG("sk %p", sk);
if (level == SOL_RFCOMM)
return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen);
if (level != SOL_BLUETOOTH)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case BT_SECURITY:
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
break;
}
sec.level = rfcomm_pi(sk)->sec_level;
sec.key_size = 0;
len = min_t(unsigned int, len, sizeof(sec));
if (copy_to_user(optval, (char *) &sec, len))
err = -EFAULT;
break;
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
(u32 __user *) optval))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk __maybe_unused = sock->sk;
int err;
BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
err = bt_sock_ioctl(sock, cmd, arg);
if (err == -ENOIOCTLCMD) {
#ifdef CONFIG_BT_RFCOMM_TTY
lock_sock(sk);
err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg);
release_sock(sk);
#else
err = -EOPNOTSUPP;
#endif
}
return err;
}
static int rfcomm_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
lock_sock(sk);
if (!sk->sk_shutdown) {
sk->sk_shutdown = SHUTDOWN_MASK;
__rfcomm_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
}
release_sock(sk);
return err;
}
static int rfcomm_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
err = rfcomm_sock_shutdown(sock, 2);
sock_orphan(sk);
rfcomm_sock_kill(sk);
return err;
}
/* ---- RFCOMM core layer callbacks ----
*
* called under rfcomm_lock()
*/
int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d)
{
struct sock *sk, *parent;
bdaddr_t src, dst;
int result = 0;
BT_DBG("session %p channel %d", s, channel);
rfcomm_session_getaddr(s, &src, &dst);
/* Check if we have socket listening on channel */
parent = rfcomm_get_sock_by_channel(BT_LISTEN, channel, &src);
if (!parent)
return 0;
bh_lock_sock(parent);
/* Check for backlog size */
if (sk_acceptq_is_full(parent)) {
BT_DBG("backlog full %d", parent->sk_ack_backlog);
goto done;
}
sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC);
if (!sk)
goto done;
Bluetooth: silence lockdep warning Since bluetooth uses multiple protocols types, to avoid lockdep warnings, we need to use different lockdep classes (one for each protocol type). This is already done in bt_sock_create but it misses a couple of cases when new connections are created. This patch corrects that to fix the following warning: <4>[ 1864.732366] ======================================================= <4>[ 1864.733030] [ INFO: possible circular locking dependency detected ] <4>[ 1864.733544] 3.0.16-mid3-00007-gc9a0f62 #3 <4>[ 1864.733883] ------------------------------------------------------- <4>[ 1864.734408] t.android.btclc/4204 is trying to acquire lock: <4>[ 1864.734869] (rfcomm_mutex){+.+.+.}, at: [<c14970ea>] rfcomm_dlc_close+0x15/0x30 <4>[ 1864.735541] <4>[ 1864.735549] but task is already holding lock: <4>[ 1864.736045] (sk_lock-AF_BLUETOOTH){+.+.+.}, at: [<c1498bf7>] lock_sock+0xa/0xc <4>[ 1864.736732] <4>[ 1864.736740] which lock already depends on the new lock. <4>[ 1864.736750] <4>[ 1864.737428] <4>[ 1864.737437] the existing dependency chain (in reverse order) is: <4>[ 1864.738016] <4>[ 1864.738023] -> #1 (sk_lock-AF_BLUETOOTH){+.+.+.}: <4>[ 1864.738549] [<c1062273>] lock_acquire+0x104/0x140 <4>[ 1864.738977] [<c13d35c1>] lock_sock_nested+0x58/0x68 <4>[ 1864.739411] [<c1493c33>] l2cap_sock_sendmsg+0x3e/0x76 <4>[ 1864.739858] [<c13d06c3>] __sock_sendmsg+0x50/0x59 <4>[ 1864.740279] [<c13d0ea2>] sock_sendmsg+0x94/0xa8 <4>[ 1864.740687] [<c13d0ede>] kernel_sendmsg+0x28/0x37 <4>[ 1864.741106] [<c14969ca>] rfcomm_send_frame+0x30/0x38 <4>[ 1864.741542] [<c1496a2a>] rfcomm_send_ua+0x58/0x5a <4>[ 1864.741959] [<c1498447>] rfcomm_run+0x441/0xb52 <4>[ 1864.742365] [<c104f095>] kthread+0x63/0x68 <4>[ 1864.742742] [<c14d5182>] kernel_thread_helper+0x6/0xd <4>[ 1864.743187] <4>[ 1864.743193] -> #0 (rfcomm_mutex){+.+.+.}: <4>[ 1864.743667] [<c1061ada>] __lock_acquire+0x988/0xc00 <4>[ 1864.744100] [<c1062273>] lock_acquire+0x104/0x140 <4>[ 1864.744519] [<c14d2c70>] __mutex_lock_common+0x3b/0x33f <4>[ 1864.744975] [<c14d303e>] mutex_lock_nested+0x2d/0x36 <4>[ 1864.745412] [<c14970ea>] rfcomm_dlc_close+0x15/0x30 <4>[ 1864.745842] [<c14990d9>] __rfcomm_sock_close+0x5f/0x6b <4>[ 1864.746288] [<c1499114>] rfcomm_sock_shutdown+0x2f/0x62 <4>[ 1864.746737] [<c13d275d>] sys_socketcall+0x1db/0x422 <4>[ 1864.747165] [<c14d42f0>] syscall_call+0x7/0xb Signed-off-by: Octavian Purdila <octavian.purdila@intel.com> Acked-by: Marcel Holtmann <marcel@holtmann.org> Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2012-01-22 06:28:34 +08:00
bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM);
rfcomm_sock_init(sk, parent);
bacpy(&bt_sk(sk)->src, &src);
bacpy(&bt_sk(sk)->dst, &dst);
rfcomm_pi(sk)->channel = channel;
sk->sk_state = BT_CONFIG;
bt_accept_enqueue(parent, sk);
/* Accept connection and return socket DLC */
*d = rfcomm_pi(sk)->dlc;
result = 1;
done:
bh_unlock_sock(parent);
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
parent->sk_state_change(parent);
return result;
}
static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
read_lock(&rfcomm_sk_list.lock);
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
sk_for_each(sk, &rfcomm_sk_list.head) {
seq_printf(f, "%pMR %pMR %d %d\n",
&bt_sk(sk)->src, &bt_sk(sk)->dst,
sk->sk_state, rfcomm_pi(sk)->channel);
}
read_unlock(&rfcomm_sk_list.lock);
return 0;
}
static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, rfcomm_sock_debugfs_show, inode->i_private);
}
static const struct file_operations rfcomm_sock_debugfs_fops = {
.open = rfcomm_sock_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *rfcomm_sock_debugfs;
static const struct proto_ops rfcomm_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = rfcomm_sock_release,
.bind = rfcomm_sock_bind,
.connect = rfcomm_sock_connect,
.listen = rfcomm_sock_listen,
.accept = rfcomm_sock_accept,
.getname = rfcomm_sock_getname,
.sendmsg = rfcomm_sock_sendmsg,
.recvmsg = rfcomm_sock_recvmsg,
.shutdown = rfcomm_sock_shutdown,
.setsockopt = rfcomm_sock_setsockopt,
.getsockopt = rfcomm_sock_getsockopt,
.ioctl = rfcomm_sock_ioctl,
.poll = bt_sock_poll,
.socketpair = sock_no_socketpair,
.mmap = sock_no_mmap
};
static const struct net_proto_family rfcomm_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = rfcomm_sock_create
};
int __init rfcomm_init_sockets(void)
{
int err;
err = proto_register(&rfcomm_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops);
if (err < 0) {
BT_ERR("RFCOMM socket layer registration failed");
goto error;
}
err = bt_procfs_init(&init_net, "rfcomm", &rfcomm_sk_list, NULL);
if (err < 0) {
BT_ERR("Failed to create RFCOMM proc file");
bt_sock_unregister(BTPROTO_RFCOMM);
goto error;
}
if (bt_debugfs) {
rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
bt_debugfs, NULL, &rfcomm_sock_debugfs_fops);
if (!rfcomm_sock_debugfs)
BT_ERR("Failed to create RFCOMM debug file");
}
BT_INFO("RFCOMM socket layer initialized");
return 0;
error:
proto_unregister(&rfcomm_proto);
return err;
}
void __exit rfcomm_cleanup_sockets(void)
{
bt_procfs_cleanup(&init_net, "rfcomm");
debugfs_remove(rfcomm_sock_debugfs);
bt_sock_unregister(BTPROTO_RFCOMM);
proto_unregister(&rfcomm_proto);
}