summaryrefslogtreecommitdiff
path: root/net/dccp
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@mandriva.com>2005-08-24 08:50:06 +0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-30 03:04:31 +0400
commit7ad07e7cf343181002c10c39d3f57a88e4903d4f (patch)
treec22067f3f443faebdcd3403fa8ce7c5c89662c60 /net/dccp
parent58e45131dc269eff0983c6d44494f9e687686900 (diff)
downloadlinux-7ad07e7cf343181002c10c39d3f57a88e4903d4f.tar.xz
[DCCP]: Implement the CLOSING timer
So that we retransmit CLOSE/CLOSEREQ packets till they elicit an answer or we hit a timeout. Most of the machinery uses TCP approaches, this code has to be polished & audited, but this is better than we had before. Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/dccp')
-rw-r--r--net/dccp/dccp.h2
-rw-r--r--net/dccp/input.c26
-rw-r--r--net/dccp/output.c23
-rw-r--r--net/dccp/proto.c26
4 files changed, 46 insertions, 31 deletions
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 33968a9422f2..53994f10ced5 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -255,7 +255,7 @@ extern int dccp_v4_checksum(const struct sk_buff *skb,
extern int dccp_v4_send_reset(struct sock *sk,
enum dccp_reset_codes code);
-extern void dccp_send_close(struct sock *sk);
+extern void dccp_send_close(struct sock *sk, const int active);
struct dccp_skb_cb {
__u8 dccpd_type;
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 85402532e4e9..02af05ec23a2 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -31,14 +31,9 @@ static void dccp_fin(struct sock *sk, struct sk_buff *skb)
static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
{
- switch (sk->sk_state) {
- case DCCP_PARTOPEN:
- case DCCP_OPEN:
- dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
- dccp_fin(sk, skb);
- dccp_set_state(sk, DCCP_CLOSED);
- break;
- }
+ dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
+ dccp_fin(sk, skb);
+ dccp_set_state(sk, DCCP_CLOSED);
}
static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
@@ -54,13 +49,8 @@ static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
return;
}
- switch (sk->sk_state) {
- case DCCP_PARTOPEN:
- case DCCP_OPEN:
- dccp_set_state(sk, DCCP_CLOSING);
- dccp_send_close(sk);
- break;
- }
+ dccp_set_state(sk, DCCP_CLOSING);
+ dccp_send_close(sk, 0);
}
static inline void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
@@ -562,6 +552,12 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
DCCP_PKT_SYNC);
goto discard;
+ } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
+ dccp_rcv_closereq(sk, skb);
+ goto discard;
+ } else if (dh->dccph_type == DCCP_PKT_CLOSE) {
+ dccp_rcv_close(sk, skb);
+ return 0;
}
switch (sk->sk_state) {
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 708fc3c0a969..630ca7741022 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -96,8 +96,7 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr,
inet->daddr);
- if (dcb->dccpd_type == DCCP_PKT_ACK ||
- dcb->dccpd_type == DCCP_PKT_DATAACK)
+ if (set_ack)
dccp_event_ack_sent(sk);
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
@@ -429,18 +428,15 @@ void dccp_send_sync(struct sock *sk, const u64 seq,
* cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
* any circumstances.
*/
-void dccp_send_close(struct sock *sk)
+void dccp_send_close(struct sock *sk, const int active)
{
struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb;
+ const unsigned int prio = active ? GFP_KERNEL : GFP_ATOMIC;
- /* Socket is locked, keep trying until memory is available. */
- for (;;) {
- skb = alloc_skb(sk->sk_prot->max_header, GFP_KERNEL);
- if (skb != NULL)
- break;
- yield();
- }
+ skb = alloc_skb(sk->sk_prot->max_header, prio);
+ if (skb == NULL)
+ return;
/* Reserve space for headers and prepare control bits. */
skb_reserve(skb, sk->sk_prot->max_header);
@@ -449,7 +445,12 @@ void dccp_send_close(struct sock *sk)
DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
skb_set_owner_w(skb, sk);
- dccp_transmit_skb(sk, skb);
+ if (active) {
+ BUG_TRAP(sk->sk_send_head == NULL);
+ sk->sk_send_head = skb;
+ dccp_transmit_skb(sk, skb_clone(skb, prio));
+ } else
+ dccp_transmit_skb(sk, skb);
ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 8b613c3017c5..a3f8a8095f81 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -402,12 +402,15 @@ void dccp_close(struct sock *sk, long timeout)
/* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0);
} else if (dccp_close_state(sk)) {
- dccp_send_close(sk);
+ dccp_send_close(sk, 1);
}
sk_stream_wait_close(sk, timeout);
adjudge_to_death:
+ /*
+ * It is the last release_sock in its life. It will remove backlog.
+ */
release_sock(sk);
/*
* Now socket is owned by kernel and we acquire BH lock
@@ -419,11 +422,26 @@ adjudge_to_death:
sock_hold(sk);
sock_orphan(sk);
-
- if (sk->sk_state != DCCP_CLOSED)
+
+ /*
+ * The last release_sock may have processed the CLOSE or RESET
+ * packet moving sock to CLOSED state, if not we have to fire
+ * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
+ * in draft-ietf-dccp-spec-11. -acme
+ */
+ if (sk->sk_state == DCCP_CLOSING) {
+ /* FIXME: should start at 2 * RTT */
+ /* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+ inet_csk(sk)->icsk_rto,
+ DCCP_RTO_MAX);
+#if 0
+ /* Yeah, we should use sk->sk_prot->orphan_count, etc */
dccp_set_state(sk, DCCP_CLOSED);
+#endif
+ }
- atomic_inc(&dccp_orphan_count);
+ atomic_inc(sk->sk_prot->orphan_count);
if (sk->sk_state == DCCP_CLOSED)
inet_csk_destroy_sock(sk);