diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-09-19 13:47:57 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-09-19 13:47:57 +0100 |
commit | 40d743b8c16a8cf6e30c1d941aa6147f9550ea75 (patch) | |
tree | 9fcdf9a06b18a275253048d1ea7c9803cec38845 /net/iucv | |
parent | 7da18afa423f167e7ef3c9728e584d8bf05bd55a (diff) | |
parent | 83e686ea0291ee93b87dcdc00b96443b80de56c9 (diff) |
Merge branch 'for-rmk' of git://linux-arm.org/linux-2.6
Diffstat (limited to 'net/iucv')
-rw-r--r-- | net/iucv/af_iucv.c | 33 | ||||
-rw-r--r-- | net/iucv/iucv.c | 38 |
2 files changed, 49 insertions, 22 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 49c15b48408..d985d163dcf 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -34,7 +34,7 @@ static char iucv_userid[80]; -static struct proto_ops iucv_sock_ops; +static const struct proto_ops iucv_sock_ops; static struct proto iucv_proto = { .name = "AF_IUCV", @@ -59,8 +59,8 @@ do { \ DEFINE_WAIT(__wait); \ long __timeo = timeo; \ ret = 0; \ + prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \ while (!(condition)) { \ - prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \ if (!__timeo) { \ ret = -EAGAIN; \ break; \ @@ -361,10 +361,9 @@ static void iucv_sock_cleanup_listen(struct sock *parent) } parent->sk_state = IUCV_CLOSED; - sock_set_flag(parent, SOCK_ZAPPED); } -/* Kill socket */ +/* Kill socket (only if zapped and orphaned) */ static void iucv_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) @@ -426,17 +425,18 @@ static void iucv_sock_close(struct sock *sk) skb_queue_purge(&iucv->send_skb_q); skb_queue_purge(&iucv->backlog_skb_q); - - sock_set_flag(sk, SOCK_ZAPPED); break; default: sock_set_flag(sk, SOCK_ZAPPED); + /* nothing to do here */ break; } + /* mark socket for deletion by iucv_sock_kill() */ + sock_set_flag(sk, SOCK_ZAPPED); + release_sock(sk); - iucv_sock_kill(sk); } static void iucv_sock_init(struct sock *sk, struct sock *parent) @@ -569,6 +569,7 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) if (sk->sk_state == IUCV_CONNECTED || sk->sk_state == IUCV_SEVERED || + sk->sk_state == IUCV_DISCONN || /* due to PM restore */ !newsock) { iucv_accept_unlink(sk); if (newsock) @@ -1035,6 +1036,10 @@ out: return err; } +/* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's + * + * Locking: must be called with message_q.lock held + */ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) { int dataleft, size, copied = 0; @@ -1069,6 +1074,10 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) return 0; } +/* iucv_process_message() - Receive a single outstanding IUCV message + * + * Locking: must be called with message_q.lock held + */ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, struct iucv_path *path, struct iucv_message *msg) @@ -1119,6 +1128,10 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); } +/* iucv_process_message_q() - Process outstanding IUCV messages + * + * Locking: must be called with message_q.lock held + */ static void iucv_process_message_q(struct sock *sk) { struct iucv_sock *iucv = iucv_sk(sk); @@ -1209,6 +1222,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, kfree_skb(skb); /* Queue backlog skbs */ + spin_lock_bh(&iucv->message_q.lock); rskb = skb_dequeue(&iucv->backlog_skb_q); while (rskb) { if (sock_queue_rcv_skb(sk, rskb)) { @@ -1220,11 +1234,10 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, } } if (skb_queue_empty(&iucv->backlog_skb_q)) { - spin_lock_bh(&iucv->message_q.lock); if (!list_empty(&iucv->message_q.list)) iucv_process_message_q(sk); - spin_unlock_bh(&iucv->message_q.lock); } + spin_unlock_bh(&iucv->message_q.lock); } done: @@ -1682,7 +1695,7 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16]) bh_unlock_sock(sk); } -static struct proto_ops iucv_sock_ops = { +static const struct proto_ops iucv_sock_ops = { .family = PF_IUCV, .owner = THIS_MODULE, .release = iucv_sock_release, diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index c833481d32e..3973d0e61e5 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -79,6 +79,14 @@ static int iucv_bus_match(struct device *dev, struct device_driver *drv) return 0; } +enum iucv_pm_states { + IUCV_PM_INITIAL = 0, + IUCV_PM_FREEZING = 1, + IUCV_PM_THAWING = 2, + IUCV_PM_RESTORING = 3, +}; +static enum iucv_pm_states iucv_pm_state; + static int iucv_pm_prepare(struct device *); static void iucv_pm_complete(struct device *); static int iucv_pm_freeze(struct device *); @@ -354,7 +362,7 @@ static int iucv_query_maxconn(void) " srl %0,28\n" : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); if (ccode == 0) - iucv_max_pathid = reg0; + iucv_max_pathid = reg1; kfree(param); return ccode ? -EPERM : 0; } @@ -856,7 +864,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, int rc; local_bh_disable(); - if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + if (cpus_empty(iucv_buffer_cpumask)) { rc = -EIO; goto out; } @@ -905,7 +913,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, spin_lock_bh(&iucv_table_lock); iucv_cleanup_queue(); - if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + if (cpus_empty(iucv_buffer_cpumask)) { rc = -EIO; goto out; } @@ -965,7 +973,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]) int rc; local_bh_disable(); - if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + if (cpus_empty(iucv_buffer_cpumask)) { rc = -EIO; goto out; } @@ -997,7 +1005,7 @@ int iucv_path_resume(struct iucv_path *path, u8 userdata[16]) int rc; local_bh_disable(); - if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + if (cpus_empty(iucv_buffer_cpumask)) { rc = -EIO; goto out; } @@ -1026,7 +1034,7 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) int rc; preempt_disable(); - if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + if (cpus_empty(iucv_buffer_cpumask)) { rc = -EIO; goto out; } @@ -1060,7 +1068,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, int rc; local_bh_disable(); - if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + if (cpus_empty(iucv_buffer_cpumask)) { rc = -EIO; goto out; } @@ -1152,7 +1160,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, if (msg->flags & IUCV_IPRMDATA) return iucv_message_receive_iprmdata(path, msg, flags, buffer, size, residual); - if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + if (cpus_empty(iucv_buffer_cpumask)) { rc = -EIO; goto out; } @@ -1225,7 +1233,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) int rc; local_bh_disable(); - if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + if (cpus_empty(iucv_buffer_cpumask)) { rc = -EIO; goto out; } @@ -1264,7 +1272,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, int rc; local_bh_disable(); - if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + if (cpus_empty(iucv_buffer_cpumask)) { rc = -EIO; goto out; } @@ -1314,7 +1322,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, union iucv_param *parm; int rc; - if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + if (cpus_empty(iucv_buffer_cpumask)) { rc = -EIO; goto out; } @@ -1401,7 +1409,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, int rc; local_bh_disable(); - if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + if (cpus_empty(iucv_buffer_cpumask)) { rc = -EIO; goto out; } @@ -1875,6 +1883,7 @@ static int iucv_pm_freeze(struct device *dev) #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "iucv_pm_freeze\n"); #endif + iucv_pm_state = IUCV_PM_FREEZING; for_each_cpu_mask_nr(cpu, iucv_irq_cpumask) smp_call_function_single(cpu, iucv_block_cpu_almost, NULL, 1); if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) @@ -1899,6 +1908,7 @@ static int iucv_pm_thaw(struct device *dev) #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "iucv_pm_thaw\n"); #endif + iucv_pm_state = IUCV_PM_THAWING; if (!iucv_path_table) { rc = iucv_enable(); if (rc) @@ -1933,6 +1943,10 @@ static int iucv_pm_restore(struct device *dev) #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); #endif + if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table) + pr_warning("Suspending Linux did not completely close all IUCV " + "connections\n"); + iucv_pm_state = IUCV_PM_RESTORING; if (cpus_empty(iucv_irq_cpumask)) { rc = iucv_query_maxconn(); rc = iucv_enable(); |