Merge git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next
This commit is contained in:
@@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
|
||||
}
|
||||
|
||||
if (sk->sk_state == BT_CONNECTED || !newsock ||
|
||||
bt_sk(parent)->defer_setup) {
|
||||
test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) {
|
||||
bt_accept_unlink(sk);
|
||||
if (newsock)
|
||||
sock_graft(sk, newsock);
|
||||
@@ -410,8 +410,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
|
||||
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
|
||||
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
|
||||
if (sk->sk_state == BT_CONNECTED ||
|
||||
(bt_sk(parent)->defer_setup &&
|
||||
sk->sk_state == BT_CONNECT2))
|
||||
(test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
|
||||
sk->sk_state == BT_CONNECT2))
|
||||
return POLLIN | POLLRDNORM;
|
||||
}
|
||||
|
||||
@@ -450,7 +450,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wa
|
||||
sk->sk_state == BT_CONFIG)
|
||||
return mask;
|
||||
|
||||
if (!bt_sk(sk)->suspended && sock_writeable(sk))
|
||||
if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
|
||||
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
|
||||
else
|
||||
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
|
||||
|
@@ -340,7 +340,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
/* Strip 802.1p header */
|
||||
if (ntohs(s->eh.h_proto) == 0x8100) {
|
||||
if (ntohs(s->eh.h_proto) == ETH_P_8021Q) {
|
||||
if (!skb_pull(skb, 4))
|
||||
goto badframe;
|
||||
s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
|
||||
|
@@ -223,36 +223,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
|
||||
}
|
||||
EXPORT_SYMBOL(hci_le_start_enc);
|
||||
|
||||
void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
struct hci_cp_le_ltk_reply cp;
|
||||
|
||||
BT_DBG("%p", conn);
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
|
||||
cp.handle = cpu_to_le16(conn->handle);
|
||||
memcpy(cp.ltk, ltk, sizeof(ltk));
|
||||
|
||||
hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
|
||||
}
|
||||
EXPORT_SYMBOL(hci_le_ltk_reply);
|
||||
|
||||
void hci_le_ltk_neg_reply(struct hci_conn *conn)
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
struct hci_cp_le_ltk_neg_reply cp;
|
||||
|
||||
BT_DBG("%p", conn);
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
|
||||
cp.handle = cpu_to_le16(conn->handle);
|
||||
|
||||
hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
/* Device _must_ be locked */
|
||||
void hci_sco_setup(struct hci_conn *conn, __u8 status)
|
||||
{
|
||||
@@ -513,7 +483,8 @@ EXPORT_SYMBOL(hci_get_route);
|
||||
|
||||
/* Create SCO, ACL or LE connection.
|
||||
* Device _must_ be locked */
|
||||
struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
|
||||
struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
|
||||
__u8 dst_type, __u8 sec_level, __u8 auth_type)
|
||||
{
|
||||
struct hci_conn *acl;
|
||||
struct hci_conn *sco;
|
||||
@@ -522,23 +493,18 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
|
||||
BT_DBG("%s dst %s", hdev->name, batostr(dst));
|
||||
|
||||
if (type == LE_LINK) {
|
||||
struct adv_entry *entry;
|
||||
|
||||
le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
|
||||
if (le)
|
||||
return ERR_PTR(-EBUSY);
|
||||
if (!le) {
|
||||
le = hci_conn_add(hdev, LE_LINK, dst);
|
||||
if (!le)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
entry = hci_find_adv_entry(hdev, dst);
|
||||
if (!entry)
|
||||
return ERR_PTR(-EHOSTUNREACH);
|
||||
le->dst_type = bdaddr_to_le(dst_type);
|
||||
hci_le_connect(le);
|
||||
}
|
||||
|
||||
le = hci_conn_add(hdev, LE_LINK, dst);
|
||||
if (!le)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
le->dst_type = entry->bdaddr_type;
|
||||
|
||||
hci_le_connect(le);
|
||||
le->pending_sec_level = sec_level;
|
||||
le->auth_type = auth_type;
|
||||
|
||||
hci_conn_hold(le);
|
||||
|
||||
|
@@ -83,6 +83,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
|
||||
*/
|
||||
if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
|
||||
struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
|
||||
u16 opcode = __le16_to_cpu(sent->opcode);
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* Some CSR based controllers generate a spontaneous
|
||||
@@ -92,7 +93,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
|
||||
* command.
|
||||
*/
|
||||
|
||||
if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
|
||||
if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
|
||||
return;
|
||||
|
||||
skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
|
||||
@@ -251,6 +252,9 @@ static void amp_init(struct hci_dev *hdev)
|
||||
|
||||
/* Read Local Version */
|
||||
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
|
||||
|
||||
/* Read Local AMP Info */
|
||||
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
|
||||
}
|
||||
|
||||
static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
|
||||
@@ -384,7 +388,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
|
||||
case DISCOVERY_STOPPED:
|
||||
if (hdev->discovery.state != DISCOVERY_STARTING)
|
||||
mgmt_discovering(hdev, 0);
|
||||
hdev->discovery.type = 0;
|
||||
break;
|
||||
case DISCOVERY_STARTING:
|
||||
break;
|
||||
@@ -1089,32 +1092,6 @@ static const struct rfkill_ops hci_rfkill_ops = {
|
||||
.set_block = hci_rfkill_set_block,
|
||||
};
|
||||
|
||||
/* Alloc HCI device */
|
||||
struct hci_dev *hci_alloc_dev(void)
|
||||
{
|
||||
struct hci_dev *hdev;
|
||||
|
||||
hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
|
||||
if (!hdev)
|
||||
return NULL;
|
||||
|
||||
hci_init_sysfs(hdev);
|
||||
skb_queue_head_init(&hdev->driver_init);
|
||||
|
||||
return hdev;
|
||||
}
|
||||
EXPORT_SYMBOL(hci_alloc_dev);
|
||||
|
||||
/* Free HCI device */
|
||||
void hci_free_dev(struct hci_dev *hdev)
|
||||
{
|
||||
skb_queue_purge(&hdev->driver_init);
|
||||
|
||||
/* will free via device release */
|
||||
put_device(&hdev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL(hci_free_dev);
|
||||
|
||||
static void hci_power_on(struct work_struct *work)
|
||||
{
|
||||
struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
|
||||
@@ -1336,7 +1313,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
|
||||
}
|
||||
|
||||
int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
|
||||
int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
|
||||
int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
|
||||
ediv, u8 rand[8])
|
||||
{
|
||||
struct smp_ltk *key, *old_key;
|
||||
@@ -1544,75 +1521,6 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
|
||||
return mgmt_device_unblocked(hdev, bdaddr, type);
|
||||
}
|
||||
|
||||
static void hci_clear_adv_cache(struct work_struct *work)
|
||||
{
|
||||
struct hci_dev *hdev = container_of(work, struct hci_dev,
|
||||
adv_work.work);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
hci_adv_entries_clear(hdev);
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
int hci_adv_entries_clear(struct hci_dev *hdev)
|
||||
{
|
||||
struct adv_entry *entry, *tmp;
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
|
||||
list_del(&entry->list);
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
BT_DBG("%s adv cache cleared", hdev->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
||||
{
|
||||
struct adv_entry *entry;
|
||||
|
||||
list_for_each_entry(entry, &hdev->adv_entries, list)
|
||||
if (bacmp(bdaddr, &entry->bdaddr) == 0)
|
||||
return entry;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int is_connectable_adv(u8 evt_type)
|
||||
{
|
||||
if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hci_add_adv_entry(struct hci_dev *hdev,
|
||||
struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
|
||||
return -EINVAL;
|
||||
|
||||
/* Only new entries should be added to adv_entries. So, if
|
||||
* bdaddr was found, don't add it. */
|
||||
if (hci_find_adv_entry(hdev, &ev->bdaddr))
|
||||
return 0;
|
||||
|
||||
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
bacpy(&entry->bdaddr, &ev->bdaddr);
|
||||
entry->bdaddr_type = ev->bdaddr_type;
|
||||
|
||||
list_add(&entry->list, &hdev->adv_entries);
|
||||
|
||||
BT_DBG("%s adv entry added: address %s type %u", hdev->name,
|
||||
batostr(&entry->bdaddr), entry->bdaddr_type);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
|
||||
{
|
||||
struct le_scan_params *param = (struct le_scan_params *) opt;
|
||||
@@ -1670,6 +1578,24 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hci_cancel_le_scan(struct hci_dev *hdev)
|
||||
{
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
|
||||
return -EALREADY;
|
||||
|
||||
if (cancel_delayed_work(&hdev->le_scan_disable)) {
|
||||
struct hci_cp_le_set_scan_enable cp;
|
||||
|
||||
/* Send HCI command to disable LE Scan */
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void le_scan_disable_work(struct work_struct *work)
|
||||
{
|
||||
struct hci_dev *hdev = container_of(work, struct hci_dev,
|
||||
@@ -1714,95 +1640,103 @@ int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Register HCI device */
|
||||
int hci_register_dev(struct hci_dev *hdev)
|
||||
/* Alloc HCI device */
|
||||
struct hci_dev *hci_alloc_dev(void)
|
||||
{
|
||||
struct list_head *head = &hci_dev_list, *p;
|
||||
int i, id, error;
|
||||
struct hci_dev *hdev;
|
||||
|
||||
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
|
||||
hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
|
||||
if (!hdev)
|
||||
return NULL;
|
||||
|
||||
if (!hdev->open || !hdev->close)
|
||||
return -EINVAL;
|
||||
|
||||
/* Do not allow HCI_AMP devices to register at index 0,
|
||||
* so the index can be used as the AMP controller ID.
|
||||
*/
|
||||
id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
|
||||
|
||||
write_lock(&hci_dev_list_lock);
|
||||
|
||||
/* Find first available device id */
|
||||
list_for_each(p, &hci_dev_list) {
|
||||
if (list_entry(p, struct hci_dev, list)->id != id)
|
||||
break;
|
||||
head = p; id++;
|
||||
}
|
||||
|
||||
sprintf(hdev->name, "hci%d", id);
|
||||
hdev->id = id;
|
||||
list_add_tail(&hdev->list, head);
|
||||
|
||||
mutex_init(&hdev->lock);
|
||||
|
||||
hdev->flags = 0;
|
||||
hdev->dev_flags = 0;
|
||||
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
|
||||
hdev->esco_type = (ESCO_HV1);
|
||||
hdev->link_mode = (HCI_LM_ACCEPT);
|
||||
hdev->io_capability = 0x03; /* No Input No Output */
|
||||
|
||||
hdev->idle_timeout = 0;
|
||||
hdev->sniff_max_interval = 800;
|
||||
hdev->sniff_min_interval = 80;
|
||||
|
||||
mutex_init(&hdev->lock);
|
||||
mutex_init(&hdev->req_lock);
|
||||
|
||||
INIT_LIST_HEAD(&hdev->mgmt_pending);
|
||||
INIT_LIST_HEAD(&hdev->blacklist);
|
||||
INIT_LIST_HEAD(&hdev->uuids);
|
||||
INIT_LIST_HEAD(&hdev->link_keys);
|
||||
INIT_LIST_HEAD(&hdev->long_term_keys);
|
||||
INIT_LIST_HEAD(&hdev->remote_oob_data);
|
||||
|
||||
INIT_WORK(&hdev->rx_work, hci_rx_work);
|
||||
INIT_WORK(&hdev->cmd_work, hci_cmd_work);
|
||||
INIT_WORK(&hdev->tx_work, hci_tx_work);
|
||||
INIT_WORK(&hdev->power_on, hci_power_on);
|
||||
INIT_WORK(&hdev->le_scan, le_scan_work);
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
|
||||
INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
|
||||
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
|
||||
|
||||
skb_queue_head_init(&hdev->driver_init);
|
||||
skb_queue_head_init(&hdev->rx_q);
|
||||
skb_queue_head_init(&hdev->cmd_q);
|
||||
skb_queue_head_init(&hdev->raw_q);
|
||||
|
||||
init_waitqueue_head(&hdev->req_wait_q);
|
||||
|
||||
setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
|
||||
|
||||
for (i = 0; i < NUM_REASSEMBLY; i++)
|
||||
hdev->reassembly[i] = NULL;
|
||||
|
||||
init_waitqueue_head(&hdev->req_wait_q);
|
||||
mutex_init(&hdev->req_lock);
|
||||
|
||||
hci_init_sysfs(hdev);
|
||||
discovery_init(hdev);
|
||||
|
||||
hci_conn_hash_init(hdev);
|
||||
|
||||
INIT_LIST_HEAD(&hdev->mgmt_pending);
|
||||
return hdev;
|
||||
}
|
||||
EXPORT_SYMBOL(hci_alloc_dev);
|
||||
|
||||
INIT_LIST_HEAD(&hdev->blacklist);
|
||||
/* Free HCI device */
|
||||
void hci_free_dev(struct hci_dev *hdev)
|
||||
{
|
||||
skb_queue_purge(&hdev->driver_init);
|
||||
|
||||
INIT_LIST_HEAD(&hdev->uuids);
|
||||
/* will free via device release */
|
||||
put_device(&hdev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL(hci_free_dev);
|
||||
|
||||
INIT_LIST_HEAD(&hdev->link_keys);
|
||||
INIT_LIST_HEAD(&hdev->long_term_keys);
|
||||
/* Register HCI device */
|
||||
int hci_register_dev(struct hci_dev *hdev)
|
||||
{
|
||||
struct list_head *head, *p;
|
||||
int id, error;
|
||||
|
||||
INIT_LIST_HEAD(&hdev->remote_oob_data);
|
||||
if (!hdev->open || !hdev->close)
|
||||
return -EINVAL;
|
||||
|
||||
INIT_LIST_HEAD(&hdev->adv_entries);
|
||||
write_lock(&hci_dev_list_lock);
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
|
||||
INIT_WORK(&hdev->power_on, hci_power_on);
|
||||
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
|
||||
/* Do not allow HCI_AMP devices to register at index 0,
|
||||
* so the index can be used as the AMP controller ID.
|
||||
*/
|
||||
id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
|
||||
head = &hci_dev_list;
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
|
||||
/* Find first available device id */
|
||||
list_for_each(p, &hci_dev_list) {
|
||||
int nid = list_entry(p, struct hci_dev, list)->id;
|
||||
if (nid > id)
|
||||
break;
|
||||
if (nid == id)
|
||||
id++;
|
||||
head = p;
|
||||
}
|
||||
|
||||
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
|
||||
sprintf(hdev->name, "hci%d", id);
|
||||
hdev->id = id;
|
||||
|
||||
atomic_set(&hdev->promisc, 0);
|
||||
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
|
||||
|
||||
INIT_WORK(&hdev->le_scan, le_scan_work);
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
|
||||
list_add(&hdev->list, head);
|
||||
|
||||
write_unlock(&hci_dev_list_lock);
|
||||
|
||||
@@ -1884,8 +1818,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
|
||||
|
||||
hci_del_sysfs(hdev);
|
||||
|
||||
cancel_delayed_work_sync(&hdev->adv_work);
|
||||
|
||||
destroy_workqueue(hdev->workqueue);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
@@ -1894,7 +1826,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
|
||||
hci_link_keys_clear(hdev);
|
||||
hci_smp_ltks_clear(hdev);
|
||||
hci_remote_oob_data_clear(hdev);
|
||||
hci_adv_entries_clear(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
hci_dev_put(hdev);
|
||||
@@ -2231,6 +2162,12 @@ static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
struct sk_buff *list;
|
||||
|
||||
skb->len = skb_headlen(skb);
|
||||
skb->data_len = 0;
|
||||
|
||||
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
|
||||
hci_add_acl_hdr(skb, conn->handle, flags);
|
||||
|
||||
list = skb_shinfo(skb)->frag_list;
|
||||
if (!list) {
|
||||
/* Non fragmented */
|
||||
@@ -2274,8 +2211,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
|
||||
BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
|
||||
|
||||
skb->dev = (void *) hdev;
|
||||
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
|
||||
hci_add_acl_hdr(skb, conn->handle, flags);
|
||||
|
||||
hci_queue_acl(conn, &chan->data_q, skb, flags);
|
||||
|
||||
@@ -2313,7 +2248,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
|
||||
{
|
||||
struct hci_conn_hash *h = &hdev->conn_hash;
|
||||
struct hci_conn *conn = NULL, *c;
|
||||
int num = 0, min = ~0;
|
||||
unsigned int num = 0, min = ~0;
|
||||
|
||||
/* We don't have to lock device here. Connections are always
|
||||
* added and removed with TX task disabled. */
|
||||
@@ -2394,7 +2329,7 @@ static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
|
||||
{
|
||||
struct hci_conn_hash *h = &hdev->conn_hash;
|
||||
struct hci_chan *chan = NULL;
|
||||
int num = 0, min = ~0, cur_prio = 0;
|
||||
unsigned int num = 0, min = ~0, cur_prio = 0;
|
||||
struct hci_conn *conn;
|
||||
int cnt, q, conn_num = 0;
|
||||
|
||||
@@ -2945,7 +2880,19 @@ int hci_cancel_inquiry(struct hci_dev *hdev)
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
if (!test_bit(HCI_INQUIRY, &hdev->flags))
|
||||
return -EPERM;
|
||||
return -EALREADY;
|
||||
|
||||
return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
|
||||
}
|
||||
|
||||
u8 bdaddr_to_le(u8 bdaddr_type)
|
||||
{
|
||||
switch (bdaddr_type) {
|
||||
case BDADDR_LE_PUBLIC:
|
||||
return ADDR_LE_DEV_PUBLIC;
|
||||
|
||||
default:
|
||||
/* Fallback to LE Random address type */
|
||||
return ADDR_LE_DEV_RANDOM;
|
||||
}
|
||||
}
|
||||
|
@@ -69,6 +69,18 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
hci_conn_check_pending(hdev);
|
||||
}
|
||||
|
||||
static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
__u8 status = *((__u8 *) skb->data);
|
||||
|
||||
BT_DBG("%s status 0x%x", hdev->name, status);
|
||||
|
||||
if (status)
|
||||
return;
|
||||
|
||||
set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
|
||||
}
|
||||
|
||||
static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
__u8 status = *((__u8 *) skb->data);
|
||||
@@ -78,6 +90,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
if (status)
|
||||
return;
|
||||
|
||||
clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
|
||||
|
||||
hci_conn_check_pending(hdev);
|
||||
}
|
||||
|
||||
@@ -192,7 +206,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
hci_req_complete(hdev, HCI_OP_RESET, status);
|
||||
|
||||
/* Reset all non-persistent flags */
|
||||
hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
|
||||
hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
|
||||
BIT(HCI_PERIODIC_INQ));
|
||||
|
||||
hdev->discovery.state = DISCOVERY_STOPPED;
|
||||
}
|
||||
@@ -505,7 +520,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
|
||||
events[5] |= 0x10; /* Synchronous Connection Changed */
|
||||
|
||||
if (hdev->features[3] & LMP_RSSI_INQ)
|
||||
events[4] |= 0x04; /* Inquiry Result with RSSI */
|
||||
events[4] |= 0x02; /* Inquiry Result with RSSI */
|
||||
|
||||
if (hdev->features[5] & LMP_SNIFF_SUBR)
|
||||
events[5] |= 0x20; /* Sniff Subrating */
|
||||
@@ -615,6 +630,7 @@ done:
|
||||
|
||||
static void hci_setup_link_policy(struct hci_dev *hdev)
|
||||
{
|
||||
struct hci_cp_write_def_link_policy cp;
|
||||
u16 link_policy = 0;
|
||||
|
||||
if (hdev->features[0] & LMP_RSWITCH)
|
||||
@@ -626,9 +642,8 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
|
||||
if (hdev->features[1] & LMP_PARK)
|
||||
link_policy |= HCI_LP_PARK;
|
||||
|
||||
link_policy = cpu_to_le16(link_policy);
|
||||
hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
|
||||
&link_policy);
|
||||
cp.policy = cpu_to_le16(link_policy);
|
||||
hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
@@ -710,7 +725,7 @@ static void hci_set_le_support(struct hci_dev *hdev)
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
|
||||
if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
|
||||
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
|
||||
cp.le = 1;
|
||||
cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
|
||||
}
|
||||
@@ -887,11 +902,14 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
|
||||
static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
__u8 status = *((__u8 *) skb->data);
|
||||
struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
|
||||
|
||||
BT_DBG("%s status 0x%x", hdev->name, status);
|
||||
BT_DBG("%s status 0x%x", hdev->name, rp->status);
|
||||
|
||||
hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
|
||||
if (!rp->status)
|
||||
hdev->inq_tx_power = rp->tx_power;
|
||||
|
||||
hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
|
||||
}
|
||||
|
||||
static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
@@ -1082,23 +1100,23 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
|
||||
|
||||
set_bit(HCI_LE_SCAN, &hdev->dev_flags);
|
||||
|
||||
cancel_delayed_work_sync(&hdev->adv_work);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
hci_adv_entries_clear(hdev);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
|
||||
hci_dev_unlock(hdev);
|
||||
break;
|
||||
|
||||
case LE_SCANNING_DISABLED:
|
||||
if (status)
|
||||
if (status) {
|
||||
hci_dev_lock(hdev);
|
||||
mgmt_stop_discovery_failed(hdev, status);
|
||||
hci_dev_unlock(hdev);
|
||||
return;
|
||||
}
|
||||
|
||||
clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
|
||||
|
||||
schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
|
||||
|
||||
if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
|
||||
if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
|
||||
hdev->discovery.state == DISCOVERY_FINDING) {
|
||||
mgmt_interleaved_discovery(hdev);
|
||||
} else {
|
||||
hci_dev_lock(hdev);
|
||||
@@ -1625,6 +1643,8 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
|
||||
if (status) {
|
||||
if (conn && conn->state == BT_CONNECT) {
|
||||
conn->state = BT_CLOSED;
|
||||
mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
|
||||
conn->dst_type, status);
|
||||
hci_proto_connect_cfm(conn, status);
|
||||
hci_conn_del(conn);
|
||||
}
|
||||
@@ -1699,6 +1719,9 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
|
||||
if (!num_rsp)
|
||||
return;
|
||||
|
||||
if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
|
||||
return;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
for (; num_rsp; num_rsp--, info++) {
|
||||
@@ -2040,7 +2063,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
|
||||
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
|
||||
|
||||
if (ev->status && conn->state == BT_CONNECTED) {
|
||||
hci_acl_disconn(conn, 0x13);
|
||||
hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
|
||||
hci_conn_put(conn);
|
||||
goto unlock;
|
||||
}
|
||||
@@ -2154,6 +2177,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
|
||||
hci_cc_inquiry_cancel(hdev, skb);
|
||||
break;
|
||||
|
||||
case HCI_OP_PERIODIC_INQ:
|
||||
hci_cc_periodic_inq(hdev, skb);
|
||||
break;
|
||||
|
||||
case HCI_OP_EXIT_PERIODIC_INQ:
|
||||
hci_cc_exit_periodic_inq(hdev, skb);
|
||||
break;
|
||||
@@ -2806,6 +2833,9 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
|
||||
if (!num_rsp)
|
||||
return;
|
||||
|
||||
if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
|
||||
return;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
|
||||
@@ -2971,12 +3001,16 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
|
||||
struct inquiry_data data;
|
||||
struct extended_inquiry_info *info = (void *) (skb->data + 1);
|
||||
int num_rsp = *((__u8 *) skb->data);
|
||||
size_t eir_len;
|
||||
|
||||
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
|
||||
|
||||
if (!num_rsp)
|
||||
return;
|
||||
|
||||
if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
|
||||
return;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
for (; num_rsp; num_rsp--, info++) {
|
||||
@@ -3000,9 +3034,10 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
|
||||
|
||||
name_known = hci_inquiry_cache_update(hdev, &data, name_known,
|
||||
&ssp);
|
||||
eir_len = eir_get_length(info->data, sizeof(info->data));
|
||||
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
|
||||
info->dev_class, info->rssi, !name_known,
|
||||
ssp, info->data, sizeof(info->data));
|
||||
ssp, info->data, eir_len);
|
||||
}
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
@@ -3322,8 +3357,6 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
|
||||
while (num_reports--) {
|
||||
struct hci_ev_le_advertising_info *ev = ptr;
|
||||
|
||||
hci_add_adv_entry(hdev, ev);
|
||||
|
||||
rssi = ev->data[ev->length];
|
||||
mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
|
||||
NULL, rssi, 0, 1, ev->data, ev->length);
|
||||
@@ -3343,7 +3376,7 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
|
||||
struct hci_conn *conn;
|
||||
struct smp_ltk *ltk;
|
||||
|
||||
BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
|
||||
BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle));
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
|
@@ -444,8 +444,8 @@ static const struct file_operations blacklist_fops = {
|
||||
|
||||
static void print_bt_uuid(struct seq_file *f, u8 *uuid)
|
||||
{
|
||||
u32 data0, data4;
|
||||
u16 data1, data2, data3, data5;
|
||||
__be32 data0, data4;
|
||||
__be16 data1, data2, data3, data5;
|
||||
|
||||
memcpy(&data0, &uuid[0], 4);
|
||||
memcpy(&data1, &uuid[4], 2);
|
||||
@@ -533,7 +533,6 @@ int hci_add_sysfs(struct hci_dev *hdev)
|
||||
|
||||
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
|
||||
|
||||
dev->parent = hdev->parent;
|
||||
dev_set_name(dev, "%s", hdev->name);
|
||||
|
||||
err = device_add(dev);
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -124,7 +124,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
|
||||
return -EINVAL;
|
||||
|
||||
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
|
||||
&la.l2_bdaddr);
|
||||
&la.l2_bdaddr, la.l2_bdaddr_type);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -148,12 +148,16 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
|
||||
|| sk->sk_state != BT_BOUND) {
|
||||
if (sk->sk_state != BT_BOUND) {
|
||||
err = -EBADFD;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) {
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
switch (chan->mode) {
|
||||
case L2CAP_MODE_BASIC:
|
||||
break;
|
||||
@@ -320,8 +324,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
|
||||
|
||||
case L2CAP_CONNINFO:
|
||||
if (sk->sk_state != BT_CONNECTED &&
|
||||
!(sk->sk_state == BT_CONNECT2 &&
|
||||
bt_sk(sk)->defer_setup)) {
|
||||
!(sk->sk_state == BT_CONNECT2 &&
|
||||
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
|
||||
err = -ENOTCONN;
|
||||
break;
|
||||
}
|
||||
@@ -375,7 +379,10 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
|
||||
}
|
||||
|
||||
memset(&sec, 0, sizeof(sec));
|
||||
sec.level = chan->sec_level;
|
||||
if (chan->conn)
|
||||
sec.level = chan->conn->hcon->sec_level;
|
||||
else
|
||||
sec.level = chan->sec_level;
|
||||
|
||||
if (sk->sk_state == BT_CONNECTED)
|
||||
sec.key_size = chan->conn->hcon->enc_key_size;
|
||||
@@ -392,7 +399,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
|
||||
break;
|
||||
}
|
||||
|
||||
if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
|
||||
if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
|
||||
(u32 __user *) optval))
|
||||
err = -EFAULT;
|
||||
|
||||
break;
|
||||
@@ -594,10 +602,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
|
||||
|
||||
/* or for ACL link */
|
||||
} else if ((sk->sk_state == BT_CONNECT2 &&
|
||||
bt_sk(sk)->defer_setup) ||
|
||||
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
|
||||
sk->sk_state == BT_CONNECTED) {
|
||||
if (!l2cap_chan_check_security(chan))
|
||||
bt_sk(sk)->suspended = true;
|
||||
set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
|
||||
else
|
||||
sk->sk_state_change(sk);
|
||||
} else {
|
||||
@@ -616,7 +624,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
|
||||
break;
|
||||
}
|
||||
|
||||
bt_sk(sk)->defer_setup = opt;
|
||||
if (opt)
|
||||
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
|
||||
else
|
||||
clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
|
||||
break;
|
||||
|
||||
case BT_FLUSHABLE:
|
||||
@@ -716,16 +727,13 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
|
||||
if (msg->msg_flags & MSG_OOB)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
if (sk->sk_state != BT_CONNECTED) {
|
||||
release_sock(sk);
|
||||
if (sk->sk_state != BT_CONNECTED)
|
||||
return -ENOTCONN;
|
||||
}
|
||||
|
||||
l2cap_chan_lock(chan);
|
||||
err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
|
||||
l2cap_chan_unlock(chan);
|
||||
|
||||
release_sock(sk);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -737,7 +745,8 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
|
||||
if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP,
|
||||
&bt_sk(sk)->flags)) {
|
||||
sk->sk_state = BT_CONFIG;
|
||||
pi->chan->state = BT_CONFIG;
|
||||
|
||||
@@ -931,12 +940,19 @@ static void l2cap_sock_state_change_cb(void *data, int state)
|
||||
}
|
||||
|
||||
static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
|
||||
unsigned long len, int nb,
|
||||
int *err)
|
||||
unsigned long len, int nb)
|
||||
{
|
||||
struct sock *sk = chan->sk;
|
||||
struct sk_buff *skb;
|
||||
int err;
|
||||
|
||||
return bt_skb_send_alloc(sk, len, nb, err);
|
||||
l2cap_chan_unlock(chan);
|
||||
skb = bt_skb_send_alloc(chan->sk, len, nb, &err);
|
||||
l2cap_chan_lock(chan);
|
||||
|
||||
if (!skb)
|
||||
return ERR_PTR(err);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct l2cap_ops l2cap_chan_ops = {
|
||||
@@ -952,6 +968,7 @@ static void l2cap_sock_destruct(struct sock *sk)
|
||||
{
|
||||
BT_DBG("sk %p", sk);
|
||||
|
||||
l2cap_chan_put(l2cap_pi(sk)->chan);
|
||||
if (l2cap_pi(sk)->rx_busy_skb) {
|
||||
kfree_skb(l2cap_pi(sk)->rx_busy_skb);
|
||||
l2cap_pi(sk)->rx_busy_skb = NULL;
|
||||
@@ -972,7 +989,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
|
||||
struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
|
||||
|
||||
sk->sk_type = parent->sk_type;
|
||||
bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
|
||||
bt_sk(sk)->flags = bt_sk(parent)->flags;
|
||||
|
||||
chan->chan_type = pchan->chan_type;
|
||||
chan->imtu = pchan->imtu;
|
||||
@@ -1010,13 +1027,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
|
||||
} else {
|
||||
chan->mode = L2CAP_MODE_BASIC;
|
||||
}
|
||||
chan->max_tx = L2CAP_DEFAULT_MAX_TX;
|
||||
chan->fcs = L2CAP_FCS_CRC16;
|
||||
chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
|
||||
chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
|
||||
chan->sec_level = BT_SECURITY_LOW;
|
||||
chan->flags = 0;
|
||||
set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
|
||||
|
||||
l2cap_chan_set_defaults(chan);
|
||||
}
|
||||
|
||||
/* Default config options */
|
||||
@@ -1052,12 +1064,16 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
|
||||
sk->sk_protocol = proto;
|
||||
sk->sk_state = BT_OPEN;
|
||||
|
||||
chan = l2cap_chan_create(sk);
|
||||
chan = l2cap_chan_create();
|
||||
if (!chan) {
|
||||
l2cap_sock_kill(sk);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
l2cap_chan_hold(chan);
|
||||
|
||||
chan->sk = sk;
|
||||
|
||||
l2cap_pi(sk)->chan = chan;
|
||||
|
||||
return sk;
|
||||
|
@@ -35,10 +35,9 @@
|
||||
#include <net/bluetooth/smp.h>
|
||||
|
||||
bool enable_hs;
|
||||
bool enable_le;
|
||||
|
||||
#define MGMT_VERSION 1
|
||||
#define MGMT_REVISION 0
|
||||
#define MGMT_REVISION 1
|
||||
|
||||
static const u16 mgmt_commands[] = {
|
||||
MGMT_OP_READ_INDEX_LIST,
|
||||
@@ -78,6 +77,7 @@ static const u16 mgmt_commands[] = {
|
||||
MGMT_OP_CONFIRM_NAME,
|
||||
MGMT_OP_BLOCK_DEVICE,
|
||||
MGMT_OP_UNBLOCK_DEVICE,
|
||||
MGMT_OP_SET_DEVICE_ID,
|
||||
};
|
||||
|
||||
static const u16 mgmt_events[] = {
|
||||
@@ -224,7 +224,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
|
||||
|
||||
ev = (void *) skb_put(skb, sizeof(*ev));
|
||||
ev->status = status;
|
||||
put_unaligned_le16(cmd, &ev->opcode);
|
||||
ev->opcode = cpu_to_le16(cmd);
|
||||
|
||||
err = sock_queue_rcv_skb(sk, skb);
|
||||
if (err < 0)
|
||||
@@ -254,7 +254,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
|
||||
hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
|
||||
|
||||
ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
|
||||
put_unaligned_le16(cmd, &ev->opcode);
|
||||
ev->opcode = cpu_to_le16(cmd);
|
||||
ev->status = status;
|
||||
|
||||
if (rp)
|
||||
@@ -275,7 +275,7 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
BT_DBG("sock %p", sk);
|
||||
|
||||
rp.version = MGMT_VERSION;
|
||||
put_unaligned_le16(MGMT_REVISION, &rp.revision);
|
||||
rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
|
||||
|
||||
return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
|
||||
sizeof(rp));
|
||||
@@ -285,9 +285,9 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
u16 data_len)
|
||||
{
|
||||
struct mgmt_rp_read_commands *rp;
|
||||
u16 num_commands = ARRAY_SIZE(mgmt_commands);
|
||||
u16 num_events = ARRAY_SIZE(mgmt_events);
|
||||
u16 *opcode;
|
||||
const u16 num_commands = ARRAY_SIZE(mgmt_commands);
|
||||
const u16 num_events = ARRAY_SIZE(mgmt_events);
|
||||
__le16 *opcode;
|
||||
size_t rp_size;
|
||||
int i, err;
|
||||
|
||||
@@ -299,8 +299,8 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
if (!rp)
|
||||
return -ENOMEM;
|
||||
|
||||
put_unaligned_le16(num_commands, &rp->num_commands);
|
||||
put_unaligned_le16(num_events, &rp->num_events);
|
||||
rp->num_commands = __constant_cpu_to_le16(num_commands);
|
||||
rp->num_events = __constant_cpu_to_le16(num_events);
|
||||
|
||||
for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
|
||||
put_unaligned_le16(mgmt_commands[i], opcode);
|
||||
@@ -341,14 +341,14 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
put_unaligned_le16(count, &rp->num_controllers);
|
||||
rp->num_controllers = cpu_to_le16(count);
|
||||
|
||||
i = 0;
|
||||
list_for_each_entry(d, &hci_dev_list, list) {
|
||||
if (test_bit(HCI_SETUP, &d->dev_flags))
|
||||
continue;
|
||||
|
||||
put_unaligned_le16(d->id, &rp->index[i++]);
|
||||
rp->index[i++] = cpu_to_le16(d->id);
|
||||
BT_DBG("Added hci%u", d->id);
|
||||
}
|
||||
|
||||
@@ -383,10 +383,8 @@ static u32 get_supported_settings(struct hci_dev *hdev)
|
||||
if (enable_hs)
|
||||
settings |= MGMT_SETTING_HS;
|
||||
|
||||
if (enable_le) {
|
||||
if (hdev->features[4] & LMP_LE)
|
||||
settings |= MGMT_SETTING_LE;
|
||||
}
|
||||
if (hdev->features[4] & LMP_LE)
|
||||
settings |= MGMT_SETTING_LE;
|
||||
|
||||
return settings;
|
||||
}
|
||||
@@ -442,9 +440,7 @@ static u16 get_uuid16(u8 *uuid128)
|
||||
return 0;
|
||||
}
|
||||
|
||||
memcpy(&val, &uuid128[12], 4);
|
||||
|
||||
val = le32_to_cpu(val);
|
||||
val = get_unaligned_le32(&uuid128[12]);
|
||||
if (val > 0xffff)
|
||||
return 0;
|
||||
|
||||
@@ -479,6 +475,28 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
|
||||
ptr += (name_len + 2);
|
||||
}
|
||||
|
||||
if (hdev->inq_tx_power) {
|
||||
ptr[0] = 2;
|
||||
ptr[1] = EIR_TX_POWER;
|
||||
ptr[2] = (u8) hdev->inq_tx_power;
|
||||
|
||||
eir_len += 3;
|
||||
ptr += 3;
|
||||
}
|
||||
|
||||
if (hdev->devid_source > 0) {
|
||||
ptr[0] = 9;
|
||||
ptr[1] = EIR_DEVICE_ID;
|
||||
|
||||
put_unaligned_le16(hdev->devid_source, ptr + 2);
|
||||
put_unaligned_le16(hdev->devid_vendor, ptr + 4);
|
||||
put_unaligned_le16(hdev->devid_product, ptr + 6);
|
||||
put_unaligned_le16(hdev->devid_version, ptr + 8);
|
||||
|
||||
eir_len += 10;
|
||||
ptr += 10;
|
||||
}
|
||||
|
||||
memset(uuid16_list, 0, sizeof(uuid16_list));
|
||||
|
||||
/* Group all UUID16 types */
|
||||
@@ -642,8 +660,7 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
|
||||
bacpy(&rp.bdaddr, &hdev->bdaddr);
|
||||
|
||||
rp.version = hdev->hci_ver;
|
||||
|
||||
put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
|
||||
rp.manufacturer = cpu_to_le16(hdev->manufacturer);
|
||||
|
||||
rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
|
||||
rp.current_settings = cpu_to_le32(get_current_settings(hdev));
|
||||
@@ -840,7 +857,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
|
||||
BT_DBG("request for %s", hdev->name);
|
||||
|
||||
timeout = get_unaligned_le16(&cp->timeout);
|
||||
timeout = __le16_to_cpu(cp->timeout);
|
||||
if (!cp->val && timeout > 0)
|
||||
return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
@@ -1122,8 +1139,8 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
|
||||
}
|
||||
|
||||
if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
|
||||
err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
|
||||
MGMT_STATUS_BUSY);
|
||||
err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
|
||||
MGMT_STATUS_BUSY);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
@@ -1179,7 +1196,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
if (!enable_le || !(hdev->features[4] & LMP_LE)) {
|
||||
if (!(hdev->features[4] & LMP_LE)) {
|
||||
err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
|
||||
MGMT_STATUS_NOT_SUPPORTED);
|
||||
goto unlock;
|
||||
@@ -1227,10 +1244,8 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
|
||||
|
||||
err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
|
||||
&hci_cp);
|
||||
if (err < 0) {
|
||||
if (err < 0)
|
||||
mgmt_pending_remove(cmd);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
@@ -1280,10 +1295,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
|
||||
}
|
||||
|
||||
cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
|
||||
if (!cmd) {
|
||||
if (!cmd)
|
||||
err = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
failed:
|
||||
hci_dev_unlock(hdev);
|
||||
@@ -1368,10 +1381,8 @@ update_class:
|
||||
}
|
||||
|
||||
cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
|
||||
if (!cmd) {
|
||||
if (!cmd)
|
||||
err = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
@@ -1422,10 +1433,8 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
}
|
||||
|
||||
cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
|
||||
if (!cmd) {
|
||||
if (!cmd)
|
||||
err = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
@@ -1439,7 +1448,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
u16 key_count, expected_len;
|
||||
int i;
|
||||
|
||||
key_count = get_unaligned_le16(&cp->key_count);
|
||||
key_count = __le16_to_cpu(cp->key_count);
|
||||
|
||||
expected_len = sizeof(*cp) + key_count *
|
||||
sizeof(struct mgmt_link_key_info);
|
||||
@@ -1512,7 +1521,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (cp->addr.type == MGMT_ADDR_BREDR)
|
||||
if (cp->addr.type == BDADDR_BREDR)
|
||||
err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
|
||||
else
|
||||
err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
|
||||
@@ -1524,7 +1533,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
}
|
||||
|
||||
if (cp->disconnect) {
|
||||
if (cp->addr.type == MGMT_ADDR_BREDR)
|
||||
if (cp->addr.type == BDADDR_BREDR)
|
||||
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
|
||||
&cp->addr.bdaddr);
|
||||
else
|
||||
@@ -1548,7 +1557,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
put_unaligned_le16(conn->handle, &dc.handle);
|
||||
dc.handle = cpu_to_le16(conn->handle);
|
||||
dc.reason = 0x13; /* Remote User Terminated Connection */
|
||||
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
|
||||
if (err < 0)
|
||||
@@ -1584,7 +1593,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (cp->addr.type == MGMT_ADDR_BREDR)
|
||||
if (cp->addr.type == BDADDR_BREDR)
|
||||
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
|
||||
else
|
||||
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
|
||||
@@ -1601,7 +1610,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
goto failed;
|
||||
}
|
||||
|
||||
put_unaligned_le16(conn->handle, &dc.handle);
|
||||
dc.handle = cpu_to_le16(conn->handle);
|
||||
dc.reason = 0x13; /* Remote User Terminated Connection */
|
||||
|
||||
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
|
||||
@@ -1613,22 +1622,22 @@ failed:
|
||||
return err;
|
||||
}
|
||||
|
||||
static u8 link_to_mgmt(u8 link_type, u8 addr_type)
|
||||
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
|
||||
{
|
||||
switch (link_type) {
|
||||
case LE_LINK:
|
||||
switch (addr_type) {
|
||||
case ADDR_LE_DEV_PUBLIC:
|
||||
return MGMT_ADDR_LE_PUBLIC;
|
||||
case ADDR_LE_DEV_RANDOM:
|
||||
return MGMT_ADDR_LE_RANDOM;
|
||||
return BDADDR_LE_PUBLIC;
|
||||
|
||||
default:
|
||||
return MGMT_ADDR_INVALID;
|
||||
/* Fallback to LE Random address type */
|
||||
return BDADDR_LE_RANDOM;
|
||||
}
|
||||
case ACL_LINK:
|
||||
return MGMT_ADDR_BREDR;
|
||||
|
||||
default:
|
||||
return MGMT_ADDR_INVALID;
|
||||
/* Fallback to BR/EDR type */
|
||||
return BDADDR_BREDR;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1669,13 +1678,13 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
|
||||
continue;
|
||||
bacpy(&rp->addr[i].bdaddr, &c->dst);
|
||||
rp->addr[i].type = link_to_mgmt(c->type, c->dst_type);
|
||||
if (rp->addr[i].type == MGMT_ADDR_INVALID)
|
||||
rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
|
||||
if (c->type == SCO_LINK || c->type == ESCO_LINK)
|
||||
continue;
|
||||
i++;
|
||||
}
|
||||
|
||||
put_unaligned_le16(i, &rp->conn_count);
|
||||
rp->conn_count = cpu_to_le16(i);
|
||||
|
||||
/* Recalculate length in case of filtered SCO connections, etc */
|
||||
rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
|
||||
@@ -1836,7 +1845,7 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
|
||||
struct hci_conn *conn = cmd->user_data;
|
||||
|
||||
bacpy(&rp.addr.bdaddr, &conn->dst);
|
||||
rp.addr.type = link_to_mgmt(conn->type, conn->dst_type);
|
||||
rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
|
||||
|
||||
cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
|
||||
&rp, sizeof(rp));
|
||||
@@ -1890,12 +1899,12 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
else
|
||||
auth_type = HCI_AT_DEDICATED_BONDING_MITM;
|
||||
|
||||
if (cp->addr.type == MGMT_ADDR_BREDR)
|
||||
conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level,
|
||||
auth_type);
|
||||
if (cp->addr.type == BDADDR_BREDR)
|
||||
conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
|
||||
cp->addr.type, sec_level, auth_type);
|
||||
else
|
||||
conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level,
|
||||
auth_type);
|
||||
conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
|
||||
cp->addr.type, sec_level, auth_type);
|
||||
|
||||
memset(&rp, 0, sizeof(rp));
|
||||
bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
|
||||
@@ -1923,7 +1932,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
}
|
||||
|
||||
/* For LE, just connecting isn't a proof that the pairing finished */
|
||||
if (cp->addr.type == MGMT_ADDR_BREDR)
|
||||
if (cp->addr.type == BDADDR_BREDR)
|
||||
conn->connect_cfm_cb = pairing_complete_cb;
|
||||
|
||||
conn->security_cfm_cb = pairing_complete_cb;
|
||||
@@ -2000,7 +2009,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (type == MGMT_ADDR_BREDR)
|
||||
if (type == BDADDR_BREDR)
|
||||
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
|
||||
else
|
||||
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
|
||||
@@ -2011,7 +2020,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (type == MGMT_ADDR_LE_PUBLIC || type == MGMT_ADDR_LE_RANDOM) {
|
||||
if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
|
||||
/* Continue with pairing via SMP */
|
||||
err = smp_user_confirm_reply(conn, mgmt_op, passkey);
|
||||
|
||||
@@ -2295,6 +2304,12 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
|
||||
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
|
||||
MGMT_STATUS_BUSY);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (hdev->discovery.state != DISCOVERY_STOPPED) {
|
||||
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
|
||||
MGMT_STATUS_BUSY);
|
||||
@@ -2381,27 +2396,39 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (hdev->discovery.state == DISCOVERY_FINDING) {
|
||||
err = hci_cancel_inquiry(hdev);
|
||||
if (err < 0)
|
||||
mgmt_pending_remove(cmd);
|
||||
switch (hdev->discovery.state) {
|
||||
case DISCOVERY_FINDING:
|
||||
if (test_bit(HCI_INQUIRY, &hdev->flags))
|
||||
err = hci_cancel_inquiry(hdev);
|
||||
else
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
|
||||
goto unlock;
|
||||
err = hci_cancel_le_scan(hdev);
|
||||
|
||||
break;
|
||||
|
||||
case DISCOVERY_RESOLVING:
|
||||
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
|
||||
NAME_PENDING);
|
||||
if (!e) {
|
||||
mgmt_pending_remove(cmd);
|
||||
err = cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_STOP_DISCOVERY, 0,
|
||||
&mgmt_cp->type,
|
||||
sizeof(mgmt_cp->type));
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
bacpy(&cp.bdaddr, &e->data.bdaddr);
|
||||
err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
|
||||
sizeof(cp), &cp);
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
BT_DBG("unknown discovery state %u", hdev->discovery.state);
|
||||
err = -EFAULT;
|
||||
}
|
||||
|
||||
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING);
|
||||
if (!e) {
|
||||
mgmt_pending_remove(cmd);
|
||||
err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
|
||||
&mgmt_cp->type, sizeof(mgmt_cp->type));
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
bacpy(&cp.bdaddr, &e->data.bdaddr);
|
||||
err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
|
||||
&cp);
|
||||
if (err < 0)
|
||||
mgmt_pending_remove(cmd);
|
||||
else
|
||||
@@ -2501,6 +2528,37 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
u16 len)
|
||||
{
|
||||
struct mgmt_cp_set_device_id *cp = data;
|
||||
int err;
|
||||
__u16 source;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
source = __le16_to_cpu(cp->source);
|
||||
|
||||
if (source > 0x0002)
|
||||
return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
hdev->devid_source = source;
|
||||
hdev->devid_vendor = __le16_to_cpu(cp->vendor);
|
||||
hdev->devid_product = __le16_to_cpu(cp->product);
|
||||
hdev->devid_version = __le16_to_cpu(cp->version);
|
||||
|
||||
err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
|
||||
|
||||
update_eir(hdev);
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
|
||||
void *data, u16 len)
|
||||
{
|
||||
@@ -2565,7 +2623,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
|
||||
u16 key_count, expected_len;
|
||||
int i;
|
||||
|
||||
key_count = get_unaligned_le16(&cp->key_count);
|
||||
key_count = __le16_to_cpu(cp->key_count);
|
||||
|
||||
expected_len = sizeof(*cp) + key_count *
|
||||
sizeof(struct mgmt_ltk_info);
|
||||
@@ -2591,7 +2649,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
|
||||
else
|
||||
type = HCI_SMP_LTK_SLAVE;
|
||||
|
||||
hci_add_ltk(hdev, &key->addr.bdaddr, key->addr.type,
|
||||
hci_add_ltk(hdev, &key->addr.bdaddr,
|
||||
bdaddr_to_le(key->addr.type),
|
||||
type, 0, key->authenticated, key->val,
|
||||
key->enc_size, key->ediv, key->rand);
|
||||
}
|
||||
@@ -2601,7 +2660,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct mgmt_handler {
|
||||
static const struct mgmt_handler {
|
||||
int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
u16 data_len);
|
||||
bool var_len;
|
||||
@@ -2647,6 +2706,7 @@ struct mgmt_handler {
|
||||
{ confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
|
||||
{ block_device, false, MGMT_BLOCK_DEVICE_SIZE },
|
||||
{ unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
|
||||
{ set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
|
||||
};
|
||||
|
||||
|
||||
@@ -2657,7 +2717,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
|
||||
struct mgmt_hdr *hdr;
|
||||
u16 opcode, index, len;
|
||||
struct hci_dev *hdev = NULL;
|
||||
struct mgmt_handler *handler;
|
||||
const struct mgmt_handler *handler;
|
||||
int err;
|
||||
|
||||
BT_DBG("got %zu bytes", msglen);
|
||||
@@ -2675,9 +2735,9 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
|
||||
}
|
||||
|
||||
hdr = buf;
|
||||
opcode = get_unaligned_le16(&hdr->opcode);
|
||||
index = get_unaligned_le16(&hdr->index);
|
||||
len = get_unaligned_le16(&hdr->len);
|
||||
opcode = __le16_to_cpu(hdr->opcode);
|
||||
index = __le16_to_cpu(hdr->index);
|
||||
len = __le16_to_cpu(hdr->len);
|
||||
|
||||
if (len != msglen - sizeof(*hdr)) {
|
||||
err = -EINVAL;
|
||||
@@ -2884,7 +2944,8 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent)
|
||||
int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
|
||||
bool persistent)
|
||||
{
|
||||
struct mgmt_ev_new_link_key ev;
|
||||
|
||||
@@ -2892,7 +2953,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persisten
|
||||
|
||||
ev.store_hint = persistent;
|
||||
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
|
||||
ev.key.addr.type = MGMT_ADDR_BREDR;
|
||||
ev.key.addr.type = BDADDR_BREDR;
|
||||
ev.key.type = key->type;
|
||||
memcpy(ev.key.val, key->val, 16);
|
||||
ev.key.pin_len = key->pin_len;
|
||||
@@ -2908,7 +2969,7 @@ int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
|
||||
|
||||
ev.store_hint = persistent;
|
||||
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
|
||||
ev.key.addr.type = key->bdaddr_type;
|
||||
ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
|
||||
ev.key.authenticated = key->authenticated;
|
||||
ev.key.enc_size = key->enc_size;
|
||||
ev.key.ediv = key->ediv;
|
||||
@@ -2932,7 +2993,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
|
||||
u16 eir_len = 0;
|
||||
|
||||
bacpy(&ev->addr.bdaddr, bdaddr);
|
||||
ev->addr.type = link_to_mgmt(link_type, addr_type);
|
||||
ev->addr.type = link_to_bdaddr(link_type, addr_type);
|
||||
|
||||
ev->flags = __cpu_to_le32(flags);
|
||||
|
||||
@@ -2944,7 +3005,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
|
||||
eir_len = eir_append_data(ev->eir, eir_len,
|
||||
EIR_CLASS_OF_DEV, dev_class, 3);
|
||||
|
||||
put_unaligned_le16(eir_len, &ev->eir_len);
|
||||
ev->eir_len = cpu_to_le16(eir_len);
|
||||
|
||||
return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
|
||||
sizeof(*ev) + eir_len, NULL);
|
||||
@@ -2995,13 +3056,13 @@ int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
||||
mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
|
||||
|
||||
bacpy(&ev.bdaddr, bdaddr);
|
||||
ev.type = link_to_mgmt(link_type, addr_type);
|
||||
ev.type = link_to_bdaddr(link_type, addr_type);
|
||||
|
||||
err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
|
||||
sk);
|
||||
|
||||
if (sk)
|
||||
sock_put(sk);
|
||||
sock_put(sk);
|
||||
|
||||
mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
|
||||
hdev);
|
||||
@@ -3021,7 +3082,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
||||
return -ENOENT;
|
||||
|
||||
bacpy(&rp.addr.bdaddr, bdaddr);
|
||||
rp.addr.type = link_to_mgmt(link_type, addr_type);
|
||||
rp.addr.type = link_to_bdaddr(link_type, addr_type);
|
||||
|
||||
err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
|
||||
mgmt_status(status), &rp, sizeof(rp));
|
||||
@@ -3039,7 +3100,7 @@ int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
|
||||
struct mgmt_ev_connect_failed ev;
|
||||
|
||||
bacpy(&ev.addr.bdaddr, bdaddr);
|
||||
ev.addr.type = link_to_mgmt(link_type, addr_type);
|
||||
ev.addr.type = link_to_bdaddr(link_type, addr_type);
|
||||
ev.status = mgmt_status(status);
|
||||
|
||||
return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
|
||||
@@ -3050,7 +3111,7 @@ int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
|
||||
struct mgmt_ev_pin_code_request ev;
|
||||
|
||||
bacpy(&ev.addr.bdaddr, bdaddr);
|
||||
ev.addr.type = MGMT_ADDR_BREDR;
|
||||
ev.addr.type = BDADDR_BREDR;
|
||||
ev.secure = secure;
|
||||
|
||||
return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
|
||||
@@ -3069,7 +3130,7 @@ int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
||||
return -ENOENT;
|
||||
|
||||
bacpy(&rp.addr.bdaddr, bdaddr);
|
||||
rp.addr.type = MGMT_ADDR_BREDR;
|
||||
rp.addr.type = BDADDR_BREDR;
|
||||
|
||||
err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
|
||||
mgmt_status(status), &rp, sizeof(rp));
|
||||
@@ -3091,7 +3152,7 @@ int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
||||
return -ENOENT;
|
||||
|
||||
bacpy(&rp.addr.bdaddr, bdaddr);
|
||||
rp.addr.type = MGMT_ADDR_BREDR;
|
||||
rp.addr.type = BDADDR_BREDR;
|
||||
|
||||
err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
|
||||
mgmt_status(status), &rp, sizeof(rp));
|
||||
@@ -3110,9 +3171,9 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
bacpy(&ev.addr.bdaddr, bdaddr);
|
||||
ev.addr.type = link_to_mgmt(link_type, addr_type);
|
||||
ev.addr.type = link_to_bdaddr(link_type, addr_type);
|
||||
ev.confirm_hint = confirm_hint;
|
||||
put_unaligned_le32(value, &ev.value);
|
||||
ev.value = value;
|
||||
|
||||
return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
|
||||
NULL);
|
||||
@@ -3126,7 +3187,7 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
bacpy(&ev.addr.bdaddr, bdaddr);
|
||||
ev.addr.type = link_to_mgmt(link_type, addr_type);
|
||||
ev.addr.type = link_to_bdaddr(link_type, addr_type);
|
||||
|
||||
return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
|
||||
NULL);
|
||||
@@ -3145,7 +3206,7 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
||||
return -ENOENT;
|
||||
|
||||
bacpy(&rp.addr.bdaddr, bdaddr);
|
||||
rp.addr.type = link_to_mgmt(link_type, addr_type);
|
||||
rp.addr.type = link_to_bdaddr(link_type, addr_type);
|
||||
err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
|
||||
&rp, sizeof(rp));
|
||||
|
||||
@@ -3188,7 +3249,7 @@ int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
|
||||
struct mgmt_ev_auth_failed ev;
|
||||
|
||||
bacpy(&ev.addr.bdaddr, bdaddr);
|
||||
ev.addr.type = link_to_mgmt(link_type, addr_type);
|
||||
ev.addr.type = link_to_bdaddr(link_type, addr_type);
|
||||
ev.status = mgmt_status(status);
|
||||
|
||||
return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
|
||||
@@ -3413,10 +3474,10 @@ int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
|
||||
|
||||
if (enable && test_and_clear_bit(HCI_LE_ENABLED,
|
||||
&hdev->dev_flags))
|
||||
err = new_settings(hdev, NULL);
|
||||
err = new_settings(hdev, NULL);
|
||||
|
||||
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev,
|
||||
cmd_status_rsp, &mgmt_err);
|
||||
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
|
||||
&mgmt_err);
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -3455,7 +3516,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
|
||||
memset(buf, 0, sizeof(buf));
|
||||
|
||||
bacpy(&ev->addr.bdaddr, bdaddr);
|
||||
ev->addr.type = link_to_mgmt(link_type, addr_type);
|
||||
ev->addr.type = link_to_bdaddr(link_type, addr_type);
|
||||
ev->rssi = rssi;
|
||||
if (cfm_name)
|
||||
ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME;
|
||||
@@ -3469,7 +3530,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
|
||||
eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
|
||||
dev_class, 3);
|
||||
|
||||
put_unaligned_le16(eir_len, &ev->eir_len);
|
||||
ev->eir_len = cpu_to_le16(eir_len);
|
||||
|
||||
ev_size = sizeof(*ev) + eir_len;
|
||||
|
||||
@@ -3488,13 +3549,13 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
|
||||
memset(buf, 0, sizeof(buf));
|
||||
|
||||
bacpy(&ev->addr.bdaddr, bdaddr);
|
||||
ev->addr.type = link_to_mgmt(link_type, addr_type);
|
||||
ev->addr.type = link_to_bdaddr(link_type, addr_type);
|
||||
ev->rssi = rssi;
|
||||
|
||||
eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
|
||||
name_len);
|
||||
|
||||
put_unaligned_le16(eir_len, &ev->eir_len);
|
||||
ev->eir_len = cpu_to_le16(eir_len);
|
||||
|
||||
return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
|
||||
sizeof(*ev) + eir_len, NULL);
|
||||
@@ -3594,6 +3655,3 @@ int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
|
||||
|
||||
module_param(enable_hs, bool, 0644);
|
||||
MODULE_PARM_DESC(enable_hs, "Enable High Speed support");
|
||||
|
||||
module_param(enable_le, bool, 0644);
|
||||
MODULE_PARM_DESC(enable_le, "Enable Low Energy support");
|
||||
|
@@ -260,7 +260,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
|
||||
|
||||
if (parent) {
|
||||
sk->sk_type = parent->sk_type;
|
||||
pi->dlc->defer_setup = bt_sk(parent)->defer_setup;
|
||||
pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP,
|
||||
&bt_sk(parent)->flags);
|
||||
|
||||
pi->sec_level = rfcomm_pi(parent)->sec_level;
|
||||
pi->role_switch = rfcomm_pi(parent)->role_switch;
|
||||
@@ -731,7 +732,11 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
|
||||
break;
|
||||
}
|
||||
|
||||
bt_sk(sk)->defer_setup = opt;
|
||||
if (opt)
|
||||
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
|
||||
else
|
||||
clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
@@ -849,7 +854,8 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
|
||||
break;
|
||||
}
|
||||
|
||||
if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
|
||||
if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
|
||||
(u32 __user *) optval))
|
||||
err = -EFAULT;
|
||||
|
||||
break;
|
||||
@@ -972,7 +978,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
|
||||
done:
|
||||
bh_unlock_sock(parent);
|
||||
|
||||
if (bt_sk(parent)->defer_setup)
|
||||
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
|
||||
parent->sk_state_change(parent);
|
||||
|
||||
return result;
|
||||
|
@@ -61,8 +61,6 @@ static struct bt_sock_list sco_sk_list = {
|
||||
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
|
||||
static void sco_chan_del(struct sock *sk, int err);
|
||||
|
||||
static int sco_conn_del(struct hci_conn *conn, int err);
|
||||
|
||||
static void sco_sock_close(struct sock *sk);
|
||||
static void sco_sock_kill(struct sock *sk);
|
||||
|
||||
@@ -95,12 +93,12 @@ static void sco_sock_clear_timer(struct sock *sk)
|
||||
}
|
||||
|
||||
/* ---- SCO connections ---- */
|
||||
static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
|
||||
static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
|
||||
{
|
||||
struct hci_dev *hdev = hcon->hdev;
|
||||
struct sco_conn *conn = hcon->sco_data;
|
||||
|
||||
if (conn || status)
|
||||
if (conn)
|
||||
return conn;
|
||||
|
||||
conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
|
||||
@@ -195,13 +193,14 @@ static int sco_connect(struct sock *sk)
|
||||
else
|
||||
type = SCO_LINK;
|
||||
|
||||
hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
|
||||
hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW,
|
||||
HCI_AT_NO_BONDING);
|
||||
if (IS_ERR(hcon)) {
|
||||
err = PTR_ERR(hcon);
|
||||
goto done;
|
||||
}
|
||||
|
||||
conn = sco_conn_add(hcon, 0);
|
||||
conn = sco_conn_add(hcon);
|
||||
if (!conn) {
|
||||
hci_conn_put(hcon);
|
||||
err = -ENOMEM;
|
||||
@@ -233,7 +232,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
|
||||
{
|
||||
struct sco_conn *conn = sco_pi(sk)->conn;
|
||||
struct sk_buff *skb;
|
||||
int err, count;
|
||||
int err;
|
||||
|
||||
/* Check outgoing MTU */
|
||||
if (len > conn->mtu)
|
||||
@@ -241,20 +240,18 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
|
||||
|
||||
BT_DBG("sk %p len %d", sk, len);
|
||||
|
||||
count = min_t(unsigned int, conn->mtu, len);
|
||||
skb = bt_skb_send_alloc(sk, count,
|
||||
msg->msg_flags & MSG_DONTWAIT, &err);
|
||||
skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
|
||||
if (!skb)
|
||||
return err;
|
||||
|
||||
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
|
||||
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
|
||||
kfree_skb(skb);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
hci_send_sco(conn->hcon, skb);
|
||||
|
||||
return count;
|
||||
return len;
|
||||
}
|
||||
|
||||
static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
|
||||
@@ -277,17 +274,20 @@ drop:
|
||||
}
|
||||
|
||||
/* -------- Socket interface ---------- */
|
||||
static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba)
|
||||
static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
|
||||
{
|
||||
struct sock *sk;
|
||||
struct hlist_node *node;
|
||||
struct sock *sk;
|
||||
|
||||
sk_for_each(sk, node, &sco_sk_list.head) {
|
||||
if (sk->sk_state != BT_LISTEN)
|
||||
continue;
|
||||
|
||||
sk_for_each(sk, node, &sco_sk_list.head)
|
||||
if (!bacmp(&bt_sk(sk)->src, ba))
|
||||
goto found;
|
||||
sk = NULL;
|
||||
found:
|
||||
return sk;
|
||||
return sk;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Find socket listening on source bdaddr.
|
||||
@@ -466,7 +466,6 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
|
||||
{
|
||||
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
|
||||
struct sock *sk = sock->sk;
|
||||
bdaddr_t *src = &sa->sco_bdaddr;
|
||||
int err = 0;
|
||||
|
||||
BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
|
||||
@@ -481,17 +480,14 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
|
||||
goto done;
|
||||
}
|
||||
|
||||
write_lock(&sco_sk_list.lock);
|
||||
|
||||
if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
|
||||
err = -EADDRINUSE;
|
||||
} else {
|
||||
/* Save source address */
|
||||
bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
|
||||
sk->sk_state = BT_BOUND;
|
||||
if (sk->sk_type != SOCK_SEQPACKET) {
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
write_unlock(&sco_sk_list.lock);
|
||||
bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
|
||||
|
||||
sk->sk_state = BT_BOUND;
|
||||
|
||||
done:
|
||||
release_sock(sk);
|
||||
@@ -537,21 +533,38 @@ done:
|
||||
static int sco_sock_listen(struct socket *sock, int backlog)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
bdaddr_t *src = &bt_sk(sk)->src;
|
||||
int err = 0;
|
||||
|
||||
BT_DBG("sk %p backlog %d", sk, backlog);
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
|
||||
if (sk->sk_state != BT_BOUND) {
|
||||
err = -EBADFD;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (sk->sk_type != SOCK_SEQPACKET) {
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
write_lock(&sco_sk_list.lock);
|
||||
|
||||
if (__sco_get_sock_listen_by_addr(src)) {
|
||||
err = -EADDRINUSE;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
sk->sk_max_ack_backlog = backlog;
|
||||
sk->sk_ack_backlog = 0;
|
||||
|
||||
sk->sk_state = BT_LISTEN;
|
||||
|
||||
unlock:
|
||||
write_unlock(&sco_sk_list.lock);
|
||||
|
||||
done:
|
||||
release_sock(sk);
|
||||
return err;
|
||||
@@ -923,7 +936,7 @@ int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
|
||||
if (!status) {
|
||||
struct sco_conn *conn;
|
||||
|
||||
conn = sco_conn_add(hcon, status);
|
||||
conn = sco_conn_add(hcon);
|
||||
if (conn)
|
||||
sco_conn_ready(conn);
|
||||
} else
|
||||
|
@@ -956,7 +956,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
|
||||
HCI_SMP_LTK_SLAVE, 1, authenticated,
|
||||
enc.ltk, smp->enc_key_size, ediv, ident.rand);
|
||||
|
||||
ident.ediv = cpu_to_le16(ediv);
|
||||
ident.ediv = ediv;
|
||||
|
||||
smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
|
||||
|
||||
|
@@ -421,16 +421,22 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
|
||||
struct tid_ampdu_tx *tid_tx;
|
||||
unsigned long timeout;
|
||||
|
||||
tid_tx = rcu_dereference_protected_tid_tx(sta, *ptid);
|
||||
if (!tid_tx)
|
||||
rcu_read_lock();
|
||||
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]);
|
||||
if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
|
||||
if (time_is_after_jiffies(timeout)) {
|
||||
mod_timer(&tid_tx->session_timer, timeout);
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
#ifdef CONFIG_MAC80211_HT_DEBUG
|
||||
printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
|
||||
#endif
|
||||
|
@@ -509,6 +509,7 @@ IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
|
||||
u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
|
||||
IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC);
|
||||
IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC);
|
||||
IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC);
|
||||
#endif
|
||||
|
||||
#define DEBUGFS_ADD_MODE(name, mode) \
|
||||
@@ -608,6 +609,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
|
||||
MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
|
||||
MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
|
||||
MESHPARAMS_ADD(rssi_threshold);
|
||||
MESHPARAMS_ADD(ht_opmode);
|
||||
#undef MESHPARAMS_ADD
|
||||
}
|
||||
#endif
|
||||
|
@@ -163,6 +163,11 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
|
||||
sizeof(struct ieee80211_ht_operation));
|
||||
pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
|
||||
sband->ht_cap.cap);
|
||||
/*
|
||||
* Note: According to 802.11n-2009 9.13.3.1, HT Protection
|
||||
* field and RIFS Mode are reserved in IBSS mode, therefore
|
||||
* keep them at 0
|
||||
*/
|
||||
pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap,
|
||||
chan, channel_type, 0);
|
||||
}
|
||||
|
@@ -206,8 +206,10 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
|
||||
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
||||
if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
|
||||
sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE;
|
||||
else
|
||||
else if (local->hw.queues >= IEEE80211_NUM_ACS)
|
||||
sdata->vif.hw_queue[i] = i;
|
||||
else
|
||||
sdata->vif.hw_queue[i] = 0;
|
||||
}
|
||||
sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
|
||||
}
|
||||
|
@@ -596,6 +596,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
|
||||
local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE;
|
||||
local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
|
||||
local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
|
||||
local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
|
||||
IEEE80211_RADIOTAP_MCS_HAVE_GI |
|
||||
IEEE80211_RADIOTAP_MCS_HAVE_BW;
|
||||
local->user_power_level = -1;
|
||||
wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
|
||||
|
||||
|
@@ -109,8 +109,10 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
|
||||
|
||||
/* Disallow HT40+/- mismatch */
|
||||
if (ie->ht_operation &&
|
||||
local->_oper_channel_type > NL80211_CHAN_HT20 &&
|
||||
sta_channel_type > NL80211_CHAN_HT20 &&
|
||||
(local->_oper_channel_type == NL80211_CHAN_HT40MINUS ||
|
||||
local->_oper_channel_type == NL80211_CHAN_HT40PLUS) &&
|
||||
(sta_channel_type == NL80211_CHAN_HT40MINUS ||
|
||||
sta_channel_type == NL80211_CHAN_HT40PLUS) &&
|
||||
local->_oper_channel_type != sta_channel_type)
|
||||
goto mismatch;
|
||||
|
||||
|
@@ -603,7 +603,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
|
||||
hopcount, ttl, cpu_to_le32(lifetime),
|
||||
cpu_to_le32(metric), cpu_to_le32(preq_id),
|
||||
sdata);
|
||||
ifmsh->mshstats.fwded_mcast++;
|
||||
if (!is_multicast_ether_addr(da))
|
||||
ifmsh->mshstats.fwded_unicast++;
|
||||
else
|
||||
ifmsh->mshstats.fwded_mcast++;
|
||||
ifmsh->mshstats.fwded_frames++;
|
||||
}
|
||||
}
|
||||
|
@@ -105,15 +105,15 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
|
||||
return sta;
|
||||
}
|
||||
|
||||
/** mesh_set_ht_prot_mode - set correct HT protection mode
|
||||
/*
|
||||
* mesh_set_ht_prot_mode - set correct HT protection mode
|
||||
*
|
||||
* Section 9.23.3.5 of IEEE 80211s standard describes the protection rules for
|
||||
* HT mesh STA in a MBSS. Three HT protection modes are supported for now,
|
||||
* non-HT mixed mode, 20MHz-protection and no-protection mode. non-HT mixed
|
||||
* mode is selected if any non-HT peers are present in our MBSS.
|
||||
* 20MHz-protection mode is selected if all peers in our 20/40MHz MBSS support
|
||||
* HT and atleast one HT20 peer is present. Otherwise no-protection mode is
|
||||
* selected.
|
||||
* Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT
|
||||
* mesh STA in a MBSS. Three HT protection modes are supported for now, non-HT
|
||||
* mixed mode, 20MHz-protection and no-protection mode. non-HT mixed mode is
|
||||
* selected if any non-HT peers are present in our MBSS. 20MHz-protection mode
|
||||
* is selected if all peers in our 20/40MHz MBSS support HT and atleast one
|
||||
* HT20 peer is present. Otherwise no-protection mode is selected.
|
||||
*/
|
||||
static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
|
||||
{
|
||||
@@ -128,21 +128,22 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(sta, &local->sta_list, list) {
|
||||
if (sdata == sta->sdata &&
|
||||
sta->plink_state == NL80211_PLINK_ESTAB) {
|
||||
switch (sta->ch_type) {
|
||||
case NL80211_CHAN_NO_HT:
|
||||
mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present",
|
||||
sdata->vif.addr, sta->sta.addr);
|
||||
non_ht_sta = true;
|
||||
goto out;
|
||||
case NL80211_CHAN_HT20:
|
||||
mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present",
|
||||
sdata->vif.addr, sta->sta.addr);
|
||||
ht20_sta = true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (sdata != sta->sdata ||
|
||||
sta->plink_state != NL80211_PLINK_ESTAB)
|
||||
continue;
|
||||
|
||||
switch (sta->ch_type) {
|
||||
case NL80211_CHAN_NO_HT:
|
||||
mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present",
|
||||
sdata->vif.addr, sta->sta.addr);
|
||||
non_ht_sta = true;
|
||||
goto out;
|
||||
case NL80211_CHAN_HT20:
|
||||
mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present",
|
||||
sdata->vif.addr, sta->sta.addr);
|
||||
ht20_sta = true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
out:
|
||||
@@ -346,6 +347,15 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
|
||||
|
||||
sta = sta_info_get(sdata, addr);
|
||||
if (!sta) {
|
||||
/* Userspace handles peer allocation when security is enabled */
|
||||
if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
|
||||
cfg80211_notify_new_peer_candidate(sdata->dev, addr,
|
||||
elems->ie_start,
|
||||
elems->total_len,
|
||||
GFP_ATOMIC);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sta = mesh_plink_alloc(sdata, addr);
|
||||
if (!sta)
|
||||
return NULL;
|
||||
@@ -387,15 +397,6 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
|
||||
{
|
||||
struct sta_info *sta;
|
||||
|
||||
/* Userspace handles peer allocation when security is enabled */
|
||||
if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
|
||||
cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr,
|
||||
elems->ie_start,
|
||||
elems->total_len,
|
||||
GFP_KERNEL);
|
||||
return;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
sta = mesh_peer_init(sdata, hw_addr, elems);
|
||||
if (!sta)
|
||||
|
@@ -204,14 +204,14 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
|
||||
|
||||
if (status->flag & RX_FLAG_HT) {
|
||||
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
|
||||
*pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
|
||||
IEEE80211_RADIOTAP_MCS_HAVE_GI |
|
||||
IEEE80211_RADIOTAP_MCS_HAVE_BW;
|
||||
*pos++ = local->hw.radiotap_mcs_details;
|
||||
*pos = 0;
|
||||
if (status->flag & RX_FLAG_SHORT_GI)
|
||||
*pos |= IEEE80211_RADIOTAP_MCS_SGI;
|
||||
if (status->flag & RX_FLAG_40MHZ)
|
||||
*pos |= IEEE80211_RADIOTAP_MCS_BW_40;
|
||||
if (status->flag & RX_FLAG_HT_GF)
|
||||
*pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
|
||||
pos++;
|
||||
*pos++ = status->rate_idx;
|
||||
}
|
||||
|
@@ -92,6 +92,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
|
||||
int keylen, int keyidx)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
unsigned int hdrlen;
|
||||
u8 *newhdr;
|
||||
|
||||
@@ -104,6 +105,13 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
|
||||
hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
newhdr = skb_push(skb, WEP_IV_LEN);
|
||||
memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen);
|
||||
|
||||
/* the HW only needs room for the IV, but not the actual IV */
|
||||
if (info->control.hw_key &&
|
||||
(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
|
||||
return newhdr + hdrlen;
|
||||
|
||||
skb_set_network_header(skb, skb_network_offset(skb) + WEP_IV_LEN);
|
||||
ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen);
|
||||
return newhdr + hdrlen;
|
||||
}
|
||||
@@ -313,14 +321,15 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
|
||||
static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_key_conf *hw_key = info->control.hw_key;
|
||||
|
||||
if (!info->control.hw_key) {
|
||||
if (!hw_key) {
|
||||
if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key,
|
||||
tx->key->conf.keylen,
|
||||
tx->key->conf.keyidx))
|
||||
return -1;
|
||||
} else if (info->control.hw_key->flags &
|
||||
IEEE80211_KEY_FLAG_GENERATE_IV) {
|
||||
} else if ((hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
|
||||
(hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
|
||||
if (!ieee80211_wep_add_iv(tx->local, skb,
|
||||
tx->key->conf.keylen,
|
||||
tx->key->conf.keyidx))
|
||||
|
@@ -183,7 +183,8 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
|
||||
u8 *pos;
|
||||
|
||||
if (info->control.hw_key &&
|
||||
!(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
|
||||
!(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
|
||||
!(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
|
||||
/* hwaccel - with no need for software-generated IV */
|
||||
return 0;
|
||||
}
|
||||
@@ -202,8 +203,14 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
|
||||
|
||||
pos = skb_push(skb, TKIP_IV_LEN);
|
||||
memmove(pos, pos + TKIP_IV_LEN, hdrlen);
|
||||
skb_set_network_header(skb, skb_network_offset(skb) + TKIP_IV_LEN);
|
||||
pos += hdrlen;
|
||||
|
||||
/* the HW only needs room for the IV, but not the actual IV */
|
||||
if (info->control.hw_key &&
|
||||
(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
|
||||
return 0;
|
||||
|
||||
/* Increase IV for the frame */
|
||||
spin_lock_irqsave(&key->u.tkip.txlock, flags);
|
||||
key->u.tkip.tx.iv16++;
|
||||
@@ -422,6 +429,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
|
||||
|
||||
pos = skb_push(skb, CCMP_HDR_LEN);
|
||||
memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
|
||||
skb_set_network_header(skb, skb_network_offset(skb) + CCMP_HDR_LEN);
|
||||
|
||||
/* the HW only needs room for the IV, but not the actual IV */
|
||||
if (info->control.hw_key &&
|
||||
|
112
net/nfc/core.c
112
net/nfc/core.c
@@ -97,7 +97,7 @@ int nfc_dev_down(struct nfc_dev *dev)
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (dev->polling || dev->activated_target_idx != NFC_TARGET_IDX_NONE) {
|
||||
if (dev->polling || dev->active_target) {
|
||||
rc = -EBUSY;
|
||||
goto error;
|
||||
}
|
||||
@@ -183,11 +183,27 @@ error:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct nfc_target *nfc_find_target(struct nfc_dev *dev, u32 target_idx)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (dev->n_targets == 0)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < dev->n_targets ; i++) {
|
||||
if (dev->targets[i].idx == target_idx)
|
||||
return &dev->targets[i];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
|
||||
{
|
||||
int rc = 0;
|
||||
u8 *gb;
|
||||
size_t gb_len;
|
||||
struct nfc_target *target;
|
||||
|
||||
pr_debug("dev_name=%s comm %d\n", dev_name(&dev->dev), comm_mode);
|
||||
|
||||
@@ -212,9 +228,15 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
|
||||
goto error;
|
||||
}
|
||||
|
||||
rc = dev->ops->dep_link_up(dev, target_index, comm_mode, gb, gb_len);
|
||||
target = nfc_find_target(dev, target_index);
|
||||
if (target == NULL) {
|
||||
rc = -ENOTCONN;
|
||||
goto error;
|
||||
}
|
||||
|
||||
rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len);
|
||||
if (!rc)
|
||||
dev->activated_target_idx = target_index;
|
||||
dev->active_target = target;
|
||||
|
||||
error:
|
||||
device_unlock(&dev->dev);
|
||||
@@ -250,7 +272,7 @@ int nfc_dep_link_down(struct nfc_dev *dev)
|
||||
rc = dev->ops->dep_link_down(dev);
|
||||
if (!rc) {
|
||||
dev->dep_link_up = false;
|
||||
dev->activated_target_idx = NFC_TARGET_IDX_NONE;
|
||||
dev->active_target = NULL;
|
||||
nfc_llcp_mac_is_down(dev);
|
||||
nfc_genl_dep_link_down_event(dev);
|
||||
}
|
||||
@@ -282,6 +304,7 @@ EXPORT_SYMBOL(nfc_dep_link_is_up);
|
||||
int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
|
||||
{
|
||||
int rc;
|
||||
struct nfc_target *target;
|
||||
|
||||
pr_debug("dev_name=%s target_idx=%u protocol=%u\n",
|
||||
dev_name(&dev->dev), target_idx, protocol);
|
||||
@@ -293,9 +316,20 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
|
||||
goto error;
|
||||
}
|
||||
|
||||
rc = dev->ops->activate_target(dev, target_idx, protocol);
|
||||
if (dev->active_target) {
|
||||
rc = -EBUSY;
|
||||
goto error;
|
||||
}
|
||||
|
||||
target = nfc_find_target(dev, target_idx);
|
||||
if (target == NULL) {
|
||||
rc = -ENOTCONN;
|
||||
goto error;
|
||||
}
|
||||
|
||||
rc = dev->ops->activate_target(dev, target, protocol);
|
||||
if (!rc) {
|
||||
dev->activated_target_idx = target_idx;
|
||||
dev->active_target = target;
|
||||
|
||||
if (dev->ops->check_presence)
|
||||
mod_timer(&dev->check_pres_timer, jiffies +
|
||||
@@ -327,11 +361,21 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (dev->active_target == NULL) {
|
||||
rc = -ENOTCONN;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (dev->active_target->idx != target_idx) {
|
||||
rc = -ENOTCONN;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (dev->ops->check_presence)
|
||||
del_timer_sync(&dev->check_pres_timer);
|
||||
|
||||
dev->ops->deactivate_target(dev, target_idx);
|
||||
dev->activated_target_idx = NFC_TARGET_IDX_NONE;
|
||||
dev->ops->deactivate_target(dev, dev->active_target);
|
||||
dev->active_target = NULL;
|
||||
|
||||
error:
|
||||
device_unlock(&dev->dev);
|
||||
@@ -365,13 +409,13 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (dev->activated_target_idx == NFC_TARGET_IDX_NONE) {
|
||||
if (dev->active_target == NULL) {
|
||||
rc = -ENOTCONN;
|
||||
kfree_skb(skb);
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (target_idx != dev->activated_target_idx) {
|
||||
if (dev->active_target->idx != target_idx) {
|
||||
rc = -EADDRNOTAVAIL;
|
||||
kfree_skb(skb);
|
||||
goto error;
|
||||
@@ -380,7 +424,8 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
|
||||
if (dev->ops->check_presence)
|
||||
del_timer_sync(&dev->check_pres_timer);
|
||||
|
||||
rc = dev->ops->data_exchange(dev, target_idx, skb, cb, cb_context);
|
||||
rc = dev->ops->data_exchange(dev, dev->active_target, skb, cb,
|
||||
cb_context);
|
||||
|
||||
if (!rc && dev->ops->check_presence)
|
||||
mod_timer(&dev->check_pres_timer, jiffies +
|
||||
@@ -456,6 +501,9 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb);
|
||||
* The device driver must call this function when one or many nfc targets
|
||||
* are found. After calling this function, the device driver must stop
|
||||
* polling for targets.
|
||||
* IMPORTANT: this function must not be called from an atomic context.
|
||||
* In addition, it must also not be called from a context that would prevent
|
||||
* the NFC Core to call other nfc ops entry point concurrently.
|
||||
*/
|
||||
int nfc_targets_found(struct nfc_dev *dev,
|
||||
struct nfc_target *targets, int n_targets)
|
||||
@@ -469,7 +517,7 @@ int nfc_targets_found(struct nfc_dev *dev,
|
||||
for (i = 0; i < n_targets; i++)
|
||||
targets[i].idx = dev->target_next_idx++;
|
||||
|
||||
spin_lock_bh(&dev->targets_lock);
|
||||
device_lock(&dev->dev);
|
||||
|
||||
dev->targets_generation++;
|
||||
|
||||
@@ -479,12 +527,12 @@ int nfc_targets_found(struct nfc_dev *dev,
|
||||
|
||||
if (!dev->targets) {
|
||||
dev->n_targets = 0;
|
||||
spin_unlock_bh(&dev->targets_lock);
|
||||
device_unlock(&dev->dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev->n_targets = n_targets;
|
||||
spin_unlock_bh(&dev->targets_lock);
|
||||
device_unlock(&dev->dev);
|
||||
|
||||
nfc_genl_targets_found(dev);
|
||||
|
||||
@@ -492,6 +540,18 @@ int nfc_targets_found(struct nfc_dev *dev,
|
||||
}
|
||||
EXPORT_SYMBOL(nfc_targets_found);
|
||||
|
||||
/**
|
||||
* nfc_target_lost - inform that an activated target went out of field
|
||||
*
|
||||
* @dev: The nfc device that had the activated target in field
|
||||
* @target_idx: the nfc index of the target
|
||||
*
|
||||
* The device driver must call this function when the activated target
|
||||
* goes out of the field.
|
||||
* IMPORTANT: this function must not be called from an atomic context.
|
||||
* In addition, it must also not be called from a context that would prevent
|
||||
* the NFC Core to call other nfc ops entry point concurrently.
|
||||
*/
|
||||
int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
|
||||
{
|
||||
struct nfc_target *tg;
|
||||
@@ -499,7 +559,7 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
|
||||
|
||||
pr_debug("dev_name %s n_target %d\n", dev_name(&dev->dev), target_idx);
|
||||
|
||||
spin_lock_bh(&dev->targets_lock);
|
||||
device_lock(&dev->dev);
|
||||
|
||||
for (i = 0; i < dev->n_targets; i++) {
|
||||
tg = &dev->targets[i];
|
||||
@@ -508,13 +568,13 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
|
||||
}
|
||||
|
||||
if (i == dev->n_targets) {
|
||||
spin_unlock_bh(&dev->targets_lock);
|
||||
device_unlock(&dev->dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev->targets_generation++;
|
||||
dev->n_targets--;
|
||||
dev->activated_target_idx = NFC_TARGET_IDX_NONE;
|
||||
dev->active_target = NULL;
|
||||
|
||||
if (dev->n_targets) {
|
||||
memcpy(&dev->targets[i], &dev->targets[i + 1],
|
||||
@@ -524,7 +584,7 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
|
||||
dev->targets = NULL;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&dev->targets_lock);
|
||||
device_unlock(&dev->dev);
|
||||
|
||||
nfc_genl_target_lost(dev, target_idx);
|
||||
|
||||
@@ -556,15 +616,16 @@ static void nfc_check_pres_work(struct work_struct *work)
|
||||
|
||||
device_lock(&dev->dev);
|
||||
|
||||
if (dev->activated_target_idx != NFC_TARGET_IDX_NONE &&
|
||||
timer_pending(&dev->check_pres_timer) == 0) {
|
||||
rc = dev->ops->check_presence(dev, dev->activated_target_idx);
|
||||
if (dev->active_target && timer_pending(&dev->check_pres_timer) == 0) {
|
||||
rc = dev->ops->check_presence(dev, dev->active_target);
|
||||
if (!rc) {
|
||||
mod_timer(&dev->check_pres_timer, jiffies +
|
||||
msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
|
||||
} else {
|
||||
nfc_target_lost(dev, dev->activated_target_idx);
|
||||
dev->activated_target_idx = NFC_TARGET_IDX_NONE;
|
||||
u32 active_target_idx = dev->active_target->idx;
|
||||
device_unlock(&dev->dev);
|
||||
nfc_target_lost(dev, active_target_idx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -637,14 +698,12 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
|
||||
dev->tx_headroom = tx_headroom;
|
||||
dev->tx_tailroom = tx_tailroom;
|
||||
|
||||
spin_lock_init(&dev->targets_lock);
|
||||
nfc_genl_data_init(&dev->genl_data);
|
||||
|
||||
|
||||
/* first generation must not be 0 */
|
||||
dev->targets_generation = 1;
|
||||
|
||||
dev->activated_target_idx = NFC_TARGET_IDX_NONE;
|
||||
|
||||
if (ops->check_presence) {
|
||||
char name[32];
|
||||
init_timer(&dev->check_pres_timer);
|
||||
@@ -662,7 +721,6 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return dev;
|
||||
}
|
||||
EXPORT_SYMBOL(nfc_allocate_device);
|
||||
|
@@ -9,6 +9,7 @@ config NFC_HCI
|
||||
|
||||
config NFC_SHDLC
|
||||
depends on NFC_HCI
|
||||
select CRC_CCITT
|
||||
bool "SHDLC link layer for HCI based NFC drivers"
|
||||
default n
|
||||
---help---
|
||||
|
@@ -235,13 +235,6 @@ static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
|
||||
targets->hci_reader_gate = gate;
|
||||
|
||||
r = nfc_targets_found(hdev->ndev, targets, 1);
|
||||
if (r < 0)
|
||||
goto exit;
|
||||
|
||||
kfree(hdev->targets);
|
||||
hdev->targets = targets;
|
||||
targets = NULL;
|
||||
hdev->target_count = 1;
|
||||
|
||||
exit:
|
||||
kfree(targets);
|
||||
@@ -258,11 +251,6 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
|
||||
|
||||
switch (event) {
|
||||
case NFC_HCI_EVT_TARGET_DISCOVERED:
|
||||
if (hdev->poll_started == false) {
|
||||
r = -EPROTO;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (skb->len < 1) { /* no status data? */
|
||||
r = -EPROTO;
|
||||
goto exit;
|
||||
@@ -496,74 +484,42 @@ static int hci_dev_down(struct nfc_dev *nfc_dev)
|
||||
static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols)
|
||||
{
|
||||
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
|
||||
int r;
|
||||
|
||||
if (hdev->ops->start_poll)
|
||||
r = hdev->ops->start_poll(hdev, protocols);
|
||||
return hdev->ops->start_poll(hdev, protocols);
|
||||
else
|
||||
r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
|
||||
return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
|
||||
NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
|
||||
if (r == 0)
|
||||
hdev->poll_started = true;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void hci_stop_poll(struct nfc_dev *nfc_dev)
|
||||
{
|
||||
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
|
||||
|
||||
if (hdev->poll_started) {
|
||||
nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
|
||||
NFC_HCI_EVT_END_OPERATION, NULL, 0);
|
||||
hdev->poll_started = false;
|
||||
}
|
||||
nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
|
||||
NFC_HCI_EVT_END_OPERATION, NULL, 0);
|
||||
}
|
||||
|
||||
static struct nfc_target *hci_find_target(struct nfc_hci_dev *hdev,
|
||||
u32 target_idx)
|
||||
static int hci_activate_target(struct nfc_dev *nfc_dev,
|
||||
struct nfc_target *target, u32 protocol)
|
||||
{
|
||||
int i;
|
||||
if (hdev->poll_started == false || hdev->targets == NULL)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < hdev->target_count; i++) {
|
||||
if (hdev->targets[i].idx == target_idx)
|
||||
return &hdev->targets[i];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int hci_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
|
||||
u32 protocol)
|
||||
{
|
||||
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
|
||||
|
||||
if (hci_find_target(hdev, target_idx) == NULL)
|
||||
return -ENOMEDIUM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hci_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx)
|
||||
static void hci_deactivate_target(struct nfc_dev *nfc_dev,
|
||||
struct nfc_target *target)
|
||||
{
|
||||
}
|
||||
|
||||
static int hci_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
|
||||
static int hci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
|
||||
struct sk_buff *skb, data_exchange_cb_t cb,
|
||||
void *cb_context)
|
||||
{
|
||||
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
|
||||
int r;
|
||||
struct nfc_target *target;
|
||||
struct sk_buff *res_skb = NULL;
|
||||
|
||||
pr_debug("target_idx=%d\n", target_idx);
|
||||
|
||||
target = hci_find_target(hdev, target_idx);
|
||||
if (target == NULL)
|
||||
return -ENOMEDIUM;
|
||||
pr_debug("target_idx=%d\n", target->idx);
|
||||
|
||||
switch (target->hci_reader_gate) {
|
||||
case NFC_HCI_RF_READER_A_GATE:
|
||||
@@ -605,7 +561,18 @@ static int hci_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nfc_ops hci_nfc_ops = {
|
||||
static int hci_check_presence(struct nfc_dev *nfc_dev,
|
||||
struct nfc_target *target)
|
||||
{
|
||||
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
|
||||
|
||||
if (hdev->ops->check_presence)
|
||||
return hdev->ops->check_presence(hdev, target);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nfc_ops hci_nfc_ops = {
|
||||
.dev_up = hci_dev_up,
|
||||
.dev_down = hci_dev_down,
|
||||
.start_poll = hci_start_poll,
|
||||
@@ -613,6 +580,7 @@ struct nfc_ops hci_nfc_ops = {
|
||||
.activate_target = hci_activate_target,
|
||||
.deactivate_target = hci_deactivate_target,
|
||||
.data_exchange = hci_data_exchange,
|
||||
.check_presence = hci_check_presence,
|
||||
};
|
||||
|
||||
struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
|
||||
|
@@ -816,6 +816,17 @@ static int nfc_shdlc_data_exchange(struct nfc_hci_dev *hdev,
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
static int nfc_shdlc_check_presence(struct nfc_hci_dev *hdev,
|
||||
struct nfc_target *target)
|
||||
{
|
||||
struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
|
||||
|
||||
if (shdlc->ops->check_presence)
|
||||
return shdlc->ops->check_presence(shdlc, target);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nfc_hci_ops shdlc_ops = {
|
||||
.open = nfc_shdlc_open,
|
||||
.close = nfc_shdlc_close,
|
||||
@@ -825,6 +836,7 @@ static struct nfc_hci_ops shdlc_ops = {
|
||||
.target_from_gate = nfc_shdlc_target_from_gate,
|
||||
.complete_target_discovered = nfc_shdlc_complete_target_discovered,
|
||||
.data_exchange = nfc_shdlc_data_exchange,
|
||||
.check_presence = nfc_shdlc_check_presence,
|
||||
};
|
||||
|
||||
struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
|
||||
|
@@ -488,7 +488,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
|
||||
|
||||
memcpy(skb_put(pdu, frag_len), msg_ptr, frag_len);
|
||||
|
||||
skb_queue_head(&sock->tx_queue, pdu);
|
||||
skb_queue_tail(&sock->tx_queue, pdu);
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
@@ -502,7 +502,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
|
||||
|
||||
kfree(msg_data);
|
||||
|
||||
return 0;
|
||||
return len;
|
||||
}
|
||||
|
||||
int nfc_llcp_send_rr(struct nfc_llcp_sock *sock)
|
||||
|
@@ -448,6 +448,8 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
|
||||
{
|
||||
struct nfc_llcp_sock *sock, *llcp_sock, *n;
|
||||
|
||||
pr_debug("ssap dsap %d %d\n", ssap, dsap);
|
||||
|
||||
if (ssap == 0 && dsap == 0)
|
||||
return NULL;
|
||||
|
||||
@@ -783,6 +785,7 @@ static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
|
||||
static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
|
||||
{
|
||||
struct nfc_llcp_sock *llcp_sock;
|
||||
struct sock *sk;
|
||||
u8 dsap, ssap;
|
||||
|
||||
dsap = nfc_llcp_dsap(skb);
|
||||
@@ -801,10 +804,14 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
llcp_sock->dsap = ssap;
|
||||
sk = &llcp_sock->sk;
|
||||
|
||||
nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
|
||||
skb->len - LLCP_HEADER_SIZE);
|
||||
|
||||
sk->sk_state = LLCP_CONNECTED;
|
||||
sk->sk_state_change(sk);
|
||||
|
||||
nfc_llcp_sock_put(llcp_sock);
|
||||
}
|
||||
|
||||
|
@@ -27,6 +27,42 @@
|
||||
#include "../nfc.h"
|
||||
#include "llcp.h"
|
||||
|
||||
static int sock_wait_state(struct sock *sk, int state, unsigned long timeo)
|
||||
{
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
int err = 0;
|
||||
|
||||
pr_debug("sk %p", sk);
|
||||
|
||||
add_wait_queue(sk_sleep(sk), &wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
while (sk->sk_state != state) {
|
||||
if (!timeo) {
|
||||
err = -EINPROGRESS;
|
||||
break;
|
||||
}
|
||||
|
||||
if (signal_pending(current)) {
|
||||
err = sock_intr_errno(timeo);
|
||||
break;
|
||||
}
|
||||
|
||||
release_sock(sk);
|
||||
timeo = schedule_timeout(timeo);
|
||||
lock_sock(sk);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
err = sock_error(sk);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct proto llcp_sock_proto = {
|
||||
.name = "NFC_LLCP",
|
||||
.owner = THIS_MODULE,
|
||||
@@ -304,11 +340,24 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
|
||||
mask |= POLLERR;
|
||||
|
||||
if (!skb_queue_empty(&sk->sk_receive_queue))
|
||||
mask |= POLLIN;
|
||||
mask |= POLLIN | POLLRDNORM;
|
||||
|
||||
if (sk->sk_state == LLCP_CLOSED)
|
||||
mask |= POLLHUP;
|
||||
|
||||
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
||||
mask |= POLLRDHUP | POLLIN | POLLRDNORM;
|
||||
|
||||
if (sk->sk_shutdown == SHUTDOWN_MASK)
|
||||
mask |= POLLHUP;
|
||||
|
||||
if (sock_writeable(sk))
|
||||
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
|
||||
else
|
||||
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
|
||||
|
||||
pr_debug("mask 0x%x\n", mask);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
@@ -462,9 +511,13 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
|
||||
if (ret)
|
||||
goto put_dev;
|
||||
|
||||
sk->sk_state = LLCP_CONNECTED;
|
||||
ret = sock_wait_state(sk, LLCP_CONNECTED,
|
||||
sock_sndtimeo(sk, flags & O_NONBLOCK));
|
||||
if (ret)
|
||||
goto put_dev;
|
||||
|
||||
release_sock(sk);
|
||||
|
||||
return 0;
|
||||
|
||||
put_dev:
|
||||
|
@@ -436,16 +436,16 @@ static void nci_stop_poll(struct nfc_dev *nfc_dev)
|
||||
msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
|
||||
}
|
||||
|
||||
static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
|
||||
__u32 protocol)
|
||||
static int nci_activate_target(struct nfc_dev *nfc_dev,
|
||||
struct nfc_target *target, __u32 protocol)
|
||||
{
|
||||
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
|
||||
struct nci_rf_discover_select_param param;
|
||||
struct nfc_target *target = NULL;
|
||||
struct nfc_target *nci_target = NULL;
|
||||
int i;
|
||||
int rc = 0;
|
||||
|
||||
pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol);
|
||||
pr_debug("target_idx %d, protocol 0x%x\n", target->idx, protocol);
|
||||
|
||||
if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
|
||||
(atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
|
||||
@@ -459,25 +459,25 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
|
||||
}
|
||||
|
||||
for (i = 0; i < ndev->n_targets; i++) {
|
||||
if (ndev->targets[i].idx == target_idx) {
|
||||
target = &ndev->targets[i];
|
||||
if (ndev->targets[i].idx == target->idx) {
|
||||
nci_target = &ndev->targets[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!target) {
|
||||
if (!nci_target) {
|
||||
pr_err("unable to find the selected target\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(target->supported_protocols & (1 << protocol))) {
|
||||
if (!(nci_target->supported_protocols & (1 << protocol))) {
|
||||
pr_err("target does not support the requested protocol 0x%x\n",
|
||||
protocol);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
|
||||
param.rf_discovery_id = target->logical_idx;
|
||||
param.rf_discovery_id = nci_target->logical_idx;
|
||||
|
||||
if (protocol == NFC_PROTO_JEWEL)
|
||||
param.rf_protocol = NCI_RF_PROTOCOL_T1T;
|
||||
@@ -501,11 +501,12 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
|
||||
static void nci_deactivate_target(struct nfc_dev *nfc_dev,
|
||||
struct nfc_target *target)
|
||||
{
|
||||
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
|
||||
|
||||
pr_debug("target_idx %d\n", target_idx);
|
||||
pr_debug("target_idx %d\n", target->idx);
|
||||
|
||||
if (!ndev->target_active_prot) {
|
||||
pr_err("unable to deactivate target, no active target\n");
|
||||
@@ -520,14 +521,14 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
|
||||
}
|
||||
}
|
||||
|
||||
static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
|
||||
static int nci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
|
||||
struct sk_buff *skb,
|
||||
data_exchange_cb_t cb, void *cb_context)
|
||||
{
|
||||
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
|
||||
int rc;
|
||||
|
||||
pr_debug("target_idx %d, len %d\n", target_idx, skb->len);
|
||||
pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
|
||||
|
||||
if (!ndev->target_active_prot) {
|
||||
pr_err("unable to exchange data, no active target\n");
|
||||
|
@@ -49,7 +49,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
|
||||
|
||||
if (cb) {
|
||||
ndev->data_exchange_cb = NULL;
|
||||
ndev->data_exchange_cb_context = 0;
|
||||
ndev->data_exchange_cb_context = NULL;
|
||||
|
||||
/* forward skb to nfc core */
|
||||
cb(cb_context, skb, err);
|
||||
@@ -200,10 +200,10 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
|
||||
pr_err("error adding room for accumulated rx data\n");
|
||||
|
||||
kfree_skb(skb);
|
||||
skb = 0;
|
||||
skb = NULL;
|
||||
|
||||
kfree_skb(ndev->rx_data_reassembly);
|
||||
ndev->rx_data_reassembly = 0;
|
||||
ndev->rx_data_reassembly = NULL;
|
||||
|
||||
err = -ENOMEM;
|
||||
goto exit;
|
||||
@@ -216,7 +216,7 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
|
||||
|
||||
/* third, free old reassembly */
|
||||
kfree_skb(ndev->rx_data_reassembly);
|
||||
ndev->rx_data_reassembly = 0;
|
||||
ndev->rx_data_reassembly = NULL;
|
||||
}
|
||||
|
||||
if (pbf == NCI_PBF_CONT) {
|
||||
|
@@ -31,6 +31,7 @@
|
||||
#include <linux/errno.h>
|
||||
|
||||
#include <net/nfc/nci.h>
|
||||
#include <net/nfc/nci_core.h>
|
||||
|
||||
/* NCI status codes to Unix errno mapping */
|
||||
int nci_to_errno(__u8 code)
|
||||
|
@@ -497,7 +497,7 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
|
||||
/* drop partial rx data packet */
|
||||
if (ndev->rx_data_reassembly) {
|
||||
kfree_skb(ndev->rx_data_reassembly);
|
||||
ndev->rx_data_reassembly = 0;
|
||||
ndev->rx_data_reassembly = NULL;
|
||||
}
|
||||
|
||||
/* complete the data exchange transaction, if exists */
|
||||
|
@@ -33,7 +33,7 @@ static struct genl_multicast_group nfc_genl_event_mcgrp = {
|
||||
.name = NFC_GENL_MCAST_EVENT_NAME,
|
||||
};
|
||||
|
||||
struct genl_family nfc_genl_family = {
|
||||
static struct genl_family nfc_genl_family = {
|
||||
.id = GENL_ID_GENERATE,
|
||||
.hdrsize = 0,
|
||||
.name = NFC_GENL_NAME,
|
||||
@@ -128,7 +128,7 @@ static int nfc_genl_dump_targets(struct sk_buff *skb,
|
||||
cb->args[1] = (long) dev;
|
||||
}
|
||||
|
||||
spin_lock_bh(&dev->targets_lock);
|
||||
device_lock(&dev->dev);
|
||||
|
||||
cb->seq = dev->targets_generation;
|
||||
|
||||
@@ -141,7 +141,7 @@ static int nfc_genl_dump_targets(struct sk_buff *skb,
|
||||
i++;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&dev->targets_lock);
|
||||
device_unlock(&dev->dev);
|
||||
|
||||
cb->args[0] = i;
|
||||
|
||||
|
@@ -84,7 +84,7 @@ static inline int nfc_llcp_set_remote_gb(struct nfc_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *gb_len)
|
||||
static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *gb_len)
|
||||
{
|
||||
*gb_len = 0;
|
||||
return NULL;
|
||||
|
@@ -45,7 +45,7 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
|
||||
return chan;
|
||||
}
|
||||
|
||||
int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
|
||||
bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
|
||||
struct ieee80211_channel *chan,
|
||||
enum nl80211_channel_type channel_type)
|
||||
{
|
||||
|
@@ -664,7 +664,7 @@ void wiphy_unregister(struct wiphy *wiphy)
|
||||
mutex_lock(&rdev->devlist_mtx);
|
||||
__count = rdev->opencount;
|
||||
mutex_unlock(&rdev->devlist_mtx);
|
||||
__count == 0;}));
|
||||
__count == 0; }));
|
||||
|
||||
mutex_lock(&rdev->devlist_mtx);
|
||||
BUG_ON(!list_empty(&rdev->netdev_list));
|
||||
@@ -776,7 +776,7 @@ static struct device_type wiphy_type = {
|
||||
.name = "wlan",
|
||||
};
|
||||
|
||||
static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
|
||||
static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
|
||||
unsigned long state,
|
||||
void *ndev)
|
||||
{
|
||||
|
@@ -445,8 +445,6 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
|
||||
struct wireless_dev *wdev, int freq,
|
||||
enum nl80211_channel_type channel_type);
|
||||
|
||||
u16 cfg80211_calculate_bitrate(struct rate_info *rate);
|
||||
|
||||
int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
|
||||
const u8 *rates, unsigned int n_rates,
|
||||
u32 *mask);
|
||||
|
@@ -1179,6 +1179,27 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
|
||||
wdev->iftype == NL80211_IFTYPE_P2P_GO;
|
||||
}
|
||||
|
||||
static bool nl80211_valid_channel_type(struct genl_info *info,
|
||||
enum nl80211_channel_type *channel_type)
|
||||
{
|
||||
enum nl80211_channel_type tmp;
|
||||
|
||||
if (!info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE])
|
||||
return false;
|
||||
|
||||
tmp = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
|
||||
if (tmp != NL80211_CHAN_NO_HT &&
|
||||
tmp != NL80211_CHAN_HT20 &&
|
||||
tmp != NL80211_CHAN_HT40PLUS &&
|
||||
tmp != NL80211_CHAN_HT40MINUS)
|
||||
return false;
|
||||
|
||||
if (channel_type)
|
||||
*channel_type = tmp;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
|
||||
struct wireless_dev *wdev,
|
||||
struct genl_info *info)
|
||||
@@ -1193,15 +1214,9 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
|
||||
if (!nl80211_can_set_dev_channel(wdev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
|
||||
channel_type = nla_get_u32(info->attrs[
|
||||
NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
|
||||
if (channel_type != NL80211_CHAN_NO_HT &&
|
||||
channel_type != NL80211_CHAN_HT20 &&
|
||||
channel_type != NL80211_CHAN_HT40PLUS &&
|
||||
channel_type != NL80211_CHAN_HT40MINUS)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
|
||||
!nl80211_valid_channel_type(info, &channel_type))
|
||||
return -EINVAL;
|
||||
|
||||
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
|
||||
|
||||
@@ -2410,10 +2425,16 @@ static int parse_station_flags(struct genl_info *info,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++)
|
||||
if (flags[flag])
|
||||
for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++) {
|
||||
if (flags[flag]) {
|
||||
params->sta_flags_set |= (1<<flag);
|
||||
|
||||
/* no longer support new API additions in old API */
|
||||
if (flag > NL80211_STA_FLAG_MAX_OLD_API)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -4912,12 +4933,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
|
||||
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
|
||||
enum nl80211_channel_type channel_type;
|
||||
|
||||
channel_type = nla_get_u32(
|
||||
info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
|
||||
if (channel_type != NL80211_CHAN_NO_HT &&
|
||||
channel_type != NL80211_CHAN_HT20 &&
|
||||
channel_type != NL80211_CHAN_HT40MINUS &&
|
||||
channel_type != NL80211_CHAN_HT40PLUS)
|
||||
if (!nl80211_valid_channel_type(info, &channel_type))
|
||||
return -EINVAL;
|
||||
|
||||
if (channel_type != NL80211_CHAN_NO_HT &&
|
||||
@@ -5485,15 +5501,9 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
|
||||
!(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
|
||||
channel_type = nla_get_u32(
|
||||
info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
|
||||
if (channel_type != NL80211_CHAN_NO_HT &&
|
||||
channel_type != NL80211_CHAN_HT20 &&
|
||||
channel_type != NL80211_CHAN_HT40PLUS &&
|
||||
channel_type != NL80211_CHAN_HT40MINUS)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
|
||||
!nl80211_valid_channel_type(info, &channel_type))
|
||||
return -EINVAL;
|
||||
|
||||
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
|
||||
chan = rdev_freq_to_chan(rdev, freq, channel_type);
|
||||
@@ -5764,12 +5774,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
|
||||
}
|
||||
|
||||
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
|
||||
channel_type = nla_get_u32(
|
||||
info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
|
||||
if (channel_type != NL80211_CHAN_NO_HT &&
|
||||
channel_type != NL80211_CHAN_HT20 &&
|
||||
channel_type != NL80211_CHAN_HT40PLUS &&
|
||||
channel_type != NL80211_CHAN_HT40MINUS)
|
||||
if (!nl80211_valid_channel_type(info, &channel_type))
|
||||
return -EINVAL;
|
||||
channel_type_valid = true;
|
||||
}
|
||||
|
@@ -879,7 +879,7 @@ u16 cfg80211_calculate_bitrate(struct rate_info *rate)
|
||||
return rate->legacy;
|
||||
|
||||
/* the formula below does only work for MCS values smaller than 32 */
|
||||
if (rate->mcs >= 32)
|
||||
if (WARN_ON_ONCE(rate->mcs >= 32))
|
||||
return 0;
|
||||
|
||||
modulation = rate->mcs & 7;
|
||||
|
Reference in New Issue
Block a user