summaryrefslogtreecommitdiffstats
path: root/net/iucv
diff options
context:
space:
mode:
Diffstat (limited to 'net/iucv')
-rw-r--r--net/iucv/af_iucv.c381
1 files changed, 220 insertions, 161 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index d5c5b8f..07d7d55 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -90,6 +90,7 @@ do { \
static void iucv_sock_kill(struct sock *sk);
static void iucv_sock_close(struct sock *sk);
+static void iucv_sever_path(struct sock *, int);
static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
@@ -130,17 +131,6 @@ static inline void low_nmcpy(unsigned char *dst, char *src)
memcpy(&dst[8], src, 8);
}
-static void iucv_skb_queue_purge(struct sk_buff_head *list)
-{
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(list)) != NULL) {
- if (skb->dev)
- dev_put(skb->dev);
- kfree_skb(skb);
- }
-}
-
static int afiucv_pm_prepare(struct device *dev)
{
#ifdef CONFIG_PM_DEBUG
@@ -175,17 +165,11 @@ static int afiucv_pm_freeze(struct device *dev)
read_lock(&iucv_sk_list.lock);
sk_for_each(sk, node, &iucv_sk_list.head) {
iucv = iucv_sk(sk);
- iucv_skb_queue_purge(&iucv->send_skb_q);
- skb_queue_purge(&iucv->backlog_skb_q);
switch (sk->sk_state) {
case IUCV_DISCONN:
case IUCV_CLOSING:
case IUCV_CONNECTED:
- if (iucv->path) {
- err = pr_iucv->path_sever(iucv->path, NULL);
- iucv_path_free(iucv->path);
- iucv->path = NULL;
- }
+ iucv_sever_path(sk, 0);
break;
case IUCV_OPEN:
case IUCV_BOUND:
@@ -194,6 +178,8 @@ static int afiucv_pm_freeze(struct device *dev)
default:
break;
}
+ skb_queue_purge(&iucv->send_skb_q);
+ skb_queue_purge(&iucv->backlog_skb_q);
}
read_unlock(&iucv_sk_list.lock);
return err;
@@ -338,7 +324,6 @@ static void iucv_sock_wake_msglim(struct sock *sk)
static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
struct sk_buff *skb, u8 flags)
{
- struct net *net = sock_net(sock);
struct iucv_sock *iucv = iucv_sk(sock);
struct af_iucv_trans_hdr *phs_hdr;
struct sk_buff *nskb;
@@ -375,10 +360,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
if (imsg)
memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
- skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if);
+ skb->dev = iucv->hs_dev;
if (!skb->dev)
return -ENODEV;
- if (!(skb->dev->flags & IFF_UP))
+ if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
return -ENETDOWN;
if (skb->len > skb->dev->mtu) {
if (sock->sk_type == SOCK_SEQPACKET)
@@ -393,15 +378,14 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
return -ENOMEM;
skb_queue_tail(&iucv->send_skb_q, nskb);
err = dev_queue_xmit(skb);
- if (err) {
+ if (net_xmit_eval(err)) {
skb_unlink(nskb, &iucv->send_skb_q);
- dev_put(nskb->dev);
kfree_skb(nskb);
} else {
atomic_sub(confirm_recv, &iucv->msg_recv);
WARN_ON(atomic_read(&iucv->msg_recv) < 0);
}
- return err;
+ return net_xmit_eval(err);
}
static struct sock *__iucv_get_sock_by_name(char *nm)
@@ -419,7 +403,19 @@ static struct sock *__iucv_get_sock_by_name(char *nm)
static void iucv_sock_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
+ skb_queue_purge(&sk->sk_error_queue);
+
+ sk_mem_reclaim(sk);
+
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ pr_err("Attempt to release alive iucv socket %p\n", sk);
+ return;
+ }
+
+ WARN_ON(atomic_read(&sk->sk_rmem_alloc));
+ WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+ WARN_ON(sk->sk_wmem_queued);
+ WARN_ON(sk->sk_forward_alloc);
}
/* Cleanup Listen */
@@ -447,14 +443,48 @@ static void iucv_sock_kill(struct sock *sk)
sock_put(sk);
}
+/* Terminate an IUCV path */
+static void iucv_sever_path(struct sock *sk, int with_user_data)
+{
+ unsigned char user_data[16];
+ struct iucv_sock *iucv = iucv_sk(sk);
+ struct iucv_path *path = iucv->path;
+
+ if (iucv->path) {
+ iucv->path = NULL;
+ if (with_user_data) {
+ low_nmcpy(user_data, iucv->src_name);
+ high_nmcpy(user_data, iucv->dst_name);
+ ASCEBC(user_data, sizeof(user_data));
+ pr_iucv->path_sever(path, user_data);
+ } else
+ pr_iucv->path_sever(path, NULL);
+ iucv_path_free(path);
+ }
+}
+
+/* Send FIN through an IUCV socket for HIPER transport */
+static int iucv_send_ctrl(struct sock *sk, u8 flags)
+{
+ int err = 0;
+ int blen;
+ struct sk_buff *skb;
+
+ blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
+ skb = sock_alloc_send_skb(sk, blen, 1, &err);
+ if (skb) {
+ skb_reserve(skb, blen);
+ err = afiucv_hs_send(NULL, sk, skb, flags);
+ }
+ return err;
+}
+
/* Close an IUCV socket */
static void iucv_sock_close(struct sock *sk)
{
- unsigned char user_data[16];
struct iucv_sock *iucv = iucv_sk(sk);
unsigned long timeo;
- int err, blen;
- struct sk_buff *skb;
+ int err = 0;
lock_sock(sk);
@@ -465,14 +495,7 @@ static void iucv_sock_close(struct sock *sk)
case IUCV_CONNECTED:
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
- /* send fin */
- blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
- skb = sock_alloc_send_skb(sk, blen, 1, &err);
- if (skb) {
- skb_reserve(skb, blen);
- err = afiucv_hs_send(NULL, sk, skb,
- AF_IUCV_FLAG_FIN);
- }
+ err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
@@ -480,7 +503,7 @@ static void iucv_sock_close(struct sock *sk)
sk->sk_state = IUCV_CLOSING;
sk->sk_state_change(sk);
- if (!skb_queue_empty(&iucv->send_skb_q)) {
+ if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
timeo = sk->sk_lingertime;
else
@@ -494,25 +517,20 @@ static void iucv_sock_close(struct sock *sk)
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
- if (iucv->path) {
- low_nmcpy(user_data, iucv->src_name);
- high_nmcpy(user_data, iucv->dst_name);
- ASCEBC(user_data, sizeof(user_data));
- pr_iucv->path_sever(iucv->path, user_data);
- iucv_path_free(iucv->path);
- iucv->path = NULL;
- }
-
sk->sk_err = ECONNRESET;
sk->sk_state_change(sk);
- iucv_skb_queue_purge(&iucv->send_skb_q);
+ skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
- break;
- default:
- /* nothing to do here */
- break;
+ default: /* fall through */
+ iucv_sever_path(sk, 1);
+ }
+
+ if (iucv->hs_dev) {
+ dev_put(iucv->hs_dev);
+ iucv->hs_dev = NULL;
+ sk->sk_bound_dev_if = 0;
}
/* mark socket for deletion by iucv_sock_kill() */
@@ -706,7 +724,6 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
goto done_unlock;
/* Bind the socket */
-
if (pr_iucv)
if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
goto vm_bind; /* VM IUCV transport */
@@ -720,6 +737,8 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
memcpy(iucv->src_name, sa->siucv_name, 8);
memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
sk->sk_bound_dev_if = dev->ifindex;
+ iucv->hs_dev = dev;
+ dev_hold(dev);
sk->sk_state = IUCV_BOUND;
iucv->transport = AF_IUCV_TRANS_HIPER;
if (!iucv->msglimit)
@@ -780,26 +799,6 @@ static int iucv_sock_autobind(struct sock *sk)
return err;
}
-static int afiucv_hs_connect(struct socket *sock)
-{
- struct sock *sk = sock->sk;
- struct sk_buff *skb;
- int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
- int err = 0;
-
- /* send syn */
- skb = sock_alloc_send_skb(sk, blen, 1, &err);
- if (!skb) {
- err = -ENOMEM;
- goto done;
- }
- skb->dev = NULL;
- skb_reserve(skb, blen);
- err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
-done:
- return err;
-}
-
static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
{
struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
@@ -880,7 +879,7 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
memcpy(iucv->dst_name, sa->siucv_name, 8);
if (iucv->transport == AF_IUCV_TRANS_HIPER)
- err = afiucv_hs_connect(sock);
+ err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
else
err = afiucv_path_connect(sock, addr);
if (err)
@@ -894,11 +893,8 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
err = -ECONNREFUSED;
- if (err && iucv->transport == AF_IUCV_TRANS_IUCV) {
- pr_iucv->path_sever(iucv->path, NULL);
- iucv_path_free(iucv->path);
- iucv->path = NULL;
- }
+ if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
+ iucv_sever_path(sk, 0);
done:
release_sock(sk);
@@ -1124,8 +1120,10 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
noblock, &err);
else
skb = sock_alloc_send_skb(sk, len, noblock, &err);
- if (!skb)
+ if (!skb) {
+ err = -ENOMEM;
goto out;
+ }
if (iucv->transport == AF_IUCV_TRANS_HIPER)
skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
@@ -1148,6 +1146,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
/* increment and save iucv message tag for msg_completion cbk */
txmsg.tag = iucv->send_tag++;
memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
+
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
atomic_inc(&iucv->msg_sent);
err = afiucv_hs_send(&txmsg, sk, skb, 0);
@@ -1202,8 +1201,6 @@ release:
return len;
fail:
- if (skb->dev)
- dev_put(skb->dev);
kfree_skb(skb);
out:
release_sock(sk);
@@ -1332,8 +1329,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
unsigned int copied, rlen;
- struct sk_buff *skb, *rskb, *cskb, *sskb;
- int blen;
+ struct sk_buff *skb, *rskb, *cskb;
int err = 0;
if ((sk->sk_state == IUCV_DISCONN) &&
@@ -1356,6 +1352,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
rlen = skb->len; /* real length of skb */
copied = min_t(unsigned int, rlen, len);
+ if (!rlen)
+ sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
cskb = skb;
if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
@@ -1396,7 +1394,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
}
kfree_skb(skb);
- atomic_inc(&iucv->msg_recv);
+ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+ atomic_inc(&iucv->msg_recv);
+ if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
+ WARN_ON(1);
+ iucv_sock_close(sk);
+ return -EFAULT;
+ }
+ }
/* Queue backlog skbs */
spin_lock_bh(&iucv->message_q.lock);
@@ -1415,15 +1420,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
iucv_process_message_q(sk);
if (atomic_read(&iucv->msg_recv) >=
iucv->msglimit / 2) {
- /* send WIN to peer */
- blen = sizeof(struct af_iucv_trans_hdr) +
- ETH_HLEN;
- sskb = sock_alloc_send_skb(sk, blen, 1, &err);
- if (sskb) {
- skb_reserve(sskb, blen);
- err = afiucv_hs_send(NULL, sk, sskb,
- AF_IUCV_FLAG_WIN);
- }
+ err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
if (err) {
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
@@ -1486,7 +1483,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
if (sk->sk_state == IUCV_DISCONN)
mask |= POLLIN;
- if (sock_writeable(sk))
+ if (sock_writeable(sk) && iucv_below_msglim(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
@@ -1508,42 +1505,47 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
lock_sock(sk);
switch (sk->sk_state) {
+ case IUCV_LISTEN:
case IUCV_DISCONN:
case IUCV_CLOSING:
case IUCV_CLOSED:
err = -ENOTCONN;
goto fail;
-
default:
- sk->sk_shutdown |= how;
break;
}
if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
- txmsg.class = 0;
- txmsg.tag = 0;
- err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA,
- 0, (void *) iprm_shutdown, 8);
- if (err) {
- switch (err) {
- case 1:
- err = -ENOTCONN;
- break;
- case 2:
- err = -ECONNRESET;
- break;
- default:
- err = -ENOTCONN;
- break;
+ if (iucv->transport == AF_IUCV_TRANS_IUCV) {
+ txmsg.class = 0;
+ txmsg.tag = 0;
+ err = pr_iucv->message_send(iucv->path, &txmsg,
+ IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
+ if (err) {
+ switch (err) {
+ case 1:
+ err = -ENOTCONN;
+ break;
+ case 2:
+ err = -ECONNRESET;
+ break;
+ default:
+ err = -ENOTCONN;
+ break;
+ }
}
- }
+ } else
+ iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
}
+ sk->sk_shutdown |= how;
if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
- err = pr_iucv->path_quiesce(iucv->path, NULL);
- if (err)
- err = -ENOTCONN;
-
+ if (iucv->transport == AF_IUCV_TRANS_IUCV) {
+ err = pr_iucv->path_quiesce(iucv->path, NULL);
+ if (err)
+ err = -ENOTCONN;
+/* skb_queue_purge(&sk->sk_receive_queue); */
+ }
skb_queue_purge(&sk->sk_receive_queue);
}
@@ -1565,13 +1567,6 @@ static int iucv_sock_release(struct socket *sock)
iucv_sock_close(sk);
- /* Unregister with IUCV base support */
- if (iucv_sk(sk)->path) {
- pr_iucv->path_sever(iucv_sk(sk)->path, NULL);
- iucv_path_free(iucv_sk(sk)->path);
- iucv_sk(sk)->path = NULL;
- }
-
sock_orphan(sk);
iucv_sock_kill(sk);
return err;
@@ -1633,7 +1628,8 @@ static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
- int val, len;
+ unsigned int val;
+ int len;
if (level != SOL_IUCV)
return -ENOPROTOOPT;
@@ -1656,6 +1652,13 @@ static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
: iucv->msglimit; /* default */
release_sock(sk);
break;
+ case SO_MSGSIZE:
+ if (sk->sk_state == IUCV_OPEN)
+ return -EBADFD;
+ val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
+ sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
+ 0x7fffffff;
+ break;
default:
return -ENOPROTOOPT;
}
@@ -1750,8 +1753,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
path->msglim = iucv->msglimit;
err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
if (err) {
- err = pr_iucv->path_sever(path, user_data);
- iucv_path_free(path);
+ iucv_sever_path(nsk, 1);
iucv_sock_kill(nsk);
goto fail;
}
@@ -1828,6 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
struct sk_buff *list_skb = list->next;
unsigned long flags;
+ bh_lock_sock(sk);
if (!skb_queue_empty(list)) {
spin_lock_irqsave(&list->lock, flags);
@@ -1849,7 +1852,6 @@ static void iucv_callback_txdone(struct iucv_path *path,
iucv_sock_wake_msglim(sk);
}
}
- BUG_ON(!this);
if (sk->sk_state == IUCV_CLOSING) {
if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
@@ -1857,6 +1859,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
sk->sk_state_change(sk);
}
}
+ bh_unlock_sock(sk);
}
@@ -1864,9 +1867,15 @@ static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
{
struct sock *sk = path->private;
+ if (sk->sk_state == IUCV_CLOSED)
+ return;
+
+ bh_lock_sock(sk);
+ iucv_sever_path(sk, 1);
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
+ bh_unlock_sock(sk);
}
/* called if the other communication side shuts down its RECV direction;
@@ -1954,6 +1963,8 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
memcpy(niucv->src_name, iucv->src_name, 8);
memcpy(niucv->src_user_id, iucv->src_user_id, 8);
nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
+ niucv->hs_dev = iucv->hs_dev;
+ dev_hold(niucv->hs_dev);
afiucv_swap_src_dest(skb);
trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
trans_hdr->window = niucv->msglimit;
@@ -2022,12 +2033,15 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
struct iucv_sock *iucv = iucv_sk(sk);
/* other end of connection closed */
- if (iucv) {
- bh_lock_sock(sk);
+ if (!iucv)
+ goto out;
+ bh_lock_sock(sk);
+ if (sk->sk_state == IUCV_CONNECTED) {
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
- bh_unlock_sock(sk);
}
+ bh_unlock_sock(sk);
+out:
kfree_skb(skb);
return NET_RX_SUCCESS;
}
@@ -2069,8 +2083,13 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
/* write stuff from iucv_msg to skb cb */
- if (skb->len <= sizeof(struct af_iucv_trans_hdr)) {
+ if (skb->len < sizeof(struct af_iucv_trans_hdr)) {
kfree_skb(skb);
return NET_RX_SUCCESS;
}
@@ -2172,11 +2191,14 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
break;
case (AF_IUCV_FLAG_WIN):
err = afiucv_hs_callback_win(sk, skb);
- if (skb->len > sizeof(struct af_iucv_trans_hdr))
- err = afiucv_hs_callback_rx(sk, skb);
- else
- kfree(skb);
- break;
+ if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
+ kfree_skb(skb);
+ break;
+ }
+ /* fall through and receive non-zero length data */
+ case (AF_IUCV_FLAG_SHT):
+ /* shutdown request */
+ /* fall through and receive zero length data */
case 0:
/* plain data frame */
memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
@@ -2202,65 +2224,64 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
struct iucv_sock *iucv = NULL;
struct sk_buff_head *list;
struct sk_buff *list_skb;
- struct sk_buff *this = NULL;
+ struct sk_buff *nskb;
unsigned long flags;
struct hlist_node *node;
- read_lock(&iucv_sk_list.lock);
+ read_lock_irqsave(&iucv_sk_list.lock, flags);
sk_for_each(sk, node, &iucv_sk_list.head)
if (sk == isk) {
iucv = iucv_sk(sk);
break;
}
- read_unlock(&iucv_sk_list.lock);
+ read_unlock_irqrestore(&iucv_sk_list.lock, flags);
- if (!iucv)
+ if (!iucv || sock_flag(sk, SOCK_ZAPPED))
return;
- bh_lock_sock(sk);
list = &iucv->send_skb_q;
- list_skb = list->next;
+ spin_lock_irqsave(&list->lock, flags);
if (skb_queue_empty(list))
goto out_unlock;
-
- spin_lock_irqsave(&list->lock, flags);
+ list_skb = list->next;
+ nskb = list_skb->next;
while (list_skb != (struct sk_buff *)list) {
if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
- this = list_skb;
switch (n) {
case TX_NOTIFY_OK:
- __skb_unlink(this, list);
+ __skb_unlink(list_skb, list);
+ kfree_skb(list_skb);
iucv_sock_wake_msglim(sk);
- dev_put(this->dev);
- kfree_skb(this);
break;
case TX_NOTIFY_PENDING:
atomic_inc(&iucv->pendings);
break;
case TX_NOTIFY_DELAYED_OK:
- __skb_unlink(this, list);
+ __skb_unlink(list_skb, list);
atomic_dec(&iucv->pendings);
if (atomic_read(&iucv->pendings) <= 0)
iucv_sock_wake_msglim(sk);
- dev_put(this->dev);
- kfree_skb(this);
+ kfree_skb(list_skb);
break;
case TX_NOTIFY_UNREACHABLE:
case TX_NOTIFY_DELAYED_UNREACHABLE:
case TX_NOTIFY_TPQFULL: /* not yet used */
case TX_NOTIFY_GENERALERROR:
case TX_NOTIFY_DELAYED_GENERALERROR:
- __skb_unlink(this, list);
- dev_put(this->dev);
- kfree_skb(this);
- sk->sk_state = IUCV_DISCONN;
- sk->sk_state_change(sk);
+ __skb_unlink(list_skb, list);
+ kfree_skb(list_skb);
+ if (sk->sk_state == IUCV_CONNECTED) {
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ }
break;
}
break;
}
- list_skb = list_skb->next;
+ list_skb = nskb;
+ nskb = nskb->next;
}
+out_unlock:
spin_unlock_irqrestore(&list->lock, flags);
if (sk->sk_state == IUCV_CLOSING) {
@@ -2270,9 +2291,45 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
}
}
-out_unlock:
- bh_unlock_sock(sk);
}
+
+/*
+ * afiucv_netdev_event: handle netdev notifier chain events
+ */
+static int afiucv_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *event_dev = (struct net_device *)ptr;
+ struct hlist_node *node;
+ struct sock *sk;
+ struct iucv_sock *iucv;
+
+ switch (event) {
+ case NETDEV_REBOOT:
+ case NETDEV_GOING_DOWN:
+ sk_for_each(sk, node, &iucv_sk_list.head) {
+ iucv = iucv_sk(sk);
+ if ((iucv->hs_dev == event_dev) &&
+ (sk->sk_state == IUCV_CONNECTED)) {
+ if (event == NETDEV_GOING_DOWN)
+ iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ }
+ }
+ break;
+ case NETDEV_DOWN:
+ case NETDEV_UNREGISTER:
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block afiucv_netdev_notifier = {
+ .notifier_call = afiucv_netdev_event,
+};
+
static const struct proto_ops iucv_sock_ops = {
.family = PF_IUCV,
.owner = THIS_MODULE,
@@ -2372,7 +2429,8 @@ static int __init afiucv_init(void)
err = afiucv_iucv_init();
if (err)
goto out_sock;
- }
+ } else
+ register_netdevice_notifier(&afiucv_netdev_notifier);
dev_add_pack(&iucv_packet_type);
return 0;
@@ -2393,7 +2451,8 @@ static void __exit afiucv_exit(void)
driver_unregister(&af_iucv_driver);
pr_iucv->iucv_unregister(&af_iucv_handler, 0);
symbol_put(iucv_if);
- }
+ } else
+ unregister_netdevice_notifier(&afiucv_netdev_notifier);
dev_remove_pack(&iucv_packet_type);
sock_unregister(PF_IUCV);
proto_unregister(&iucv_proto);
OpenPOWER on IntegriCloud