|
116 | 116 | static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
|
117 | 117 | static void vsock_sk_destruct(struct sock *sk);
|
118 | 118 | static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
|
| 119 | +static void vsock_close(struct sock *sk, long timeout); |
119 | 120 |
|
120 | 121 | /* Protocol family. */
|
121 | 122 | struct proto vsock_proto = {
|
122 | 123 | .name = "AF_VSOCK",
|
123 | 124 | .owner = THIS_MODULE,
|
124 | 125 | .obj_size = sizeof(struct vsock_sock),
|
| 126 | + .close = vsock_close, |
125 | 127 | #ifdef CONFIG_BPF_SYSCALL
|
126 | 128 | .psock_update_sk_prot = vsock_bpf_update_proto,
|
127 | 129 | #endif
|
@@ -334,7 +336,10 @@ EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
|
334 | 336 |
|
335 | 337 | void vsock_remove_sock(struct vsock_sock *vsk)
|
336 | 338 | {
|
337 |
| - vsock_remove_bound(vsk); |
| 339 | + /* Transport reassignment must not remove the binding. */ |
| 340 | + if (sock_flag(sk_vsock(vsk), SOCK_DEAD)) |
| 341 | + vsock_remove_bound(vsk); |
| 342 | + |
338 | 343 | vsock_remove_connected(vsk);
|
339 | 344 | }
|
340 | 345 | EXPORT_SYMBOL_GPL(vsock_remove_sock);
|
@@ -797,39 +802,44 @@ static bool sock_type_connectible(u16 type)
|
797 | 802 |
|
798 | 803 | static void __vsock_release(struct sock *sk, int level)
|
799 | 804 | {
|
800 |
| - if (sk) { |
801 |
| - struct sock *pending; |
802 |
| - struct vsock_sock *vsk; |
| 805 | + struct vsock_sock *vsk; |
| 806 | + struct sock *pending; |
803 | 807 |
|
804 |
| - vsk = vsock_sk(sk); |
805 |
| - pending = NULL; /* Compiler warning. */ |
| 808 | + vsk = vsock_sk(sk); |
| 809 | + pending = NULL; /* Compiler warning. */ |
806 | 810 |
|
807 |
| - /* When "level" is SINGLE_DEPTH_NESTING, use the nested |
808 |
| - * version to avoid the warning "possible recursive locking |
809 |
| - * detected". When "level" is 0, lock_sock_nested(sk, level) |
810 |
| - * is the same as lock_sock(sk). |
811 |
| - */ |
812 |
| - lock_sock_nested(sk, level); |
| 811 | + /* When "level" is SINGLE_DEPTH_NESTING, use the nested |
| 812 | + * version to avoid the warning "possible recursive locking |
| 813 | + * detected". When "level" is 0, lock_sock_nested(sk, level) |
| 814 | + * is the same as lock_sock(sk). |
| 815 | + */ |
| 816 | + lock_sock_nested(sk, level); |
813 | 817 |
|
814 |
| - if (vsk->transport) |
815 |
| - vsk->transport->release(vsk); |
816 |
| - else if (sock_type_connectible(sk->sk_type)) |
817 |
| - vsock_remove_sock(vsk); |
| 818 | + /* Indicate to vsock_remove_sock() that the socket is being released and |
| 819 | + * can be removed from the bound_table. Unlike transport reassignment |
| 820 | + * case, where the socket must remain bound despite vsock_remove_sock() |
| 821 | + * being called from the transport release() callback. |
| 822 | + */ |
| 823 | + sock_set_flag(sk, SOCK_DEAD); |
818 | 824 |
|
819 |
| - sock_orphan(sk); |
820 |
| - sk->sk_shutdown = SHUTDOWN_MASK; |
| 825 | + if (vsk->transport) |
| 826 | + vsk->transport->release(vsk); |
| 827 | + else if (sock_type_connectible(sk->sk_type)) |
| 828 | + vsock_remove_sock(vsk); |
821 | 829 |
|
822 |
| - skb_queue_purge(&sk->sk_receive_queue); |
| 830 | + sock_orphan(sk); |
| 831 | + sk->sk_shutdown = SHUTDOWN_MASK; |
823 | 832 |
|
824 |
| - /* Clean up any sockets that never were accepted. */ |
825 |
| - while ((pending = vsock_dequeue_accept(sk)) != NULL) { |
826 |
| - __vsock_release(pending, SINGLE_DEPTH_NESTING); |
827 |
| - sock_put(pending); |
828 |
| - } |
| 833 | + skb_queue_purge(&sk->sk_receive_queue); |
829 | 834 |
|
830 |
| - release_sock(sk); |
831 |
| - sock_put(sk); |
| 835 | + /* Clean up any sockets that never were accepted. */ |
| 836 | + while ((pending = vsock_dequeue_accept(sk)) != NULL) { |
| 837 | + __vsock_release(pending, SINGLE_DEPTH_NESTING); |
| 838 | + sock_put(pending); |
832 | 839 | }
|
| 840 | + |
| 841 | + release_sock(sk); |
| 842 | + sock_put(sk); |
833 | 843 | }
|
834 | 844 |
|
835 | 845 | static void vsock_sk_destruct(struct sock *sk)
|
@@ -910,9 +920,22 @@ void vsock_data_ready(struct sock *sk)
|
910 | 920 | }
|
911 | 921 | EXPORT_SYMBOL_GPL(vsock_data_ready);
|
912 | 922 |
|
| 923 | +/* Dummy callback required by sockmap. |
| 924 | + * See unconditional call of saved_close() in sock_map_close(). |
| 925 | + */ |
| 926 | +static void vsock_close(struct sock *sk, long timeout) |
| 927 | +{ |
| 928 | +} |
| 929 | + |
913 | 930 | static int vsock_release(struct socket *sock)
|
914 | 931 | {
|
915 |
| - __vsock_release(sock->sk, 0); |
| 932 | + struct sock *sk = sock->sk; |
| 933 | + |
| 934 | + if (!sk) |
| 935 | + return 0; |
| 936 | + |
| 937 | + sk->sk_prot->close(sk, 0); |
| 938 | + __vsock_release(sk, 0); |
916 | 939 | sock->sk = NULL;
|
917 | 940 | sock->state = SS_FREE;
|
918 | 941 |
|
|
0 commit comments