tcp_dctcp.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. /* DataCenter TCP (DCTCP) congestion control.
  2. *
  3. * http://simula.stanford.edu/~alizade/Site/DCTCP.html
  4. *
  5. * This is an implementation of DCTCP over Reno, an enhancement to the
  6. * TCP congestion control algorithm designed for data centers. DCTCP
  7. * leverages Explicit Congestion Notification (ECN) in the network to
  8. * provide multi-bit feedback to the end hosts. DCTCP's goal is to meet
  9. * the following three data center transport requirements:
  10. *
  11. * - High burst tolerance (incast due to partition/aggregate)
  12. * - Low latency (short flows, queries)
  13. * - High throughput (continuous data updates, large file transfers)
  14. * with commodity shallow buffered switches
  15. *
  16. * The algorithm is described in detail in the following two papers:
  17. *
  18. * 1) Mohammad Alizadeh, Albert Greenberg, David A. Maltz, Jitendra Padhye,
  19. * Parveen Patel, Balaji Prabhakar, Sudipta Sengupta, and Murari Sridharan:
  20. * "Data Center TCP (DCTCP)", Data Center Networks session
  21. * Proc. ACM SIGCOMM, New Delhi, 2010.
  22. * http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp-final.pdf
  23. *
  24. * 2) Mohammad Alizadeh, Adel Javanmard, and Balaji Prabhakar:
  25. * "Analysis of DCTCP: Stability, Convergence, and Fairness"
  26. * Proc. ACM SIGMETRICS, San Jose, 2011.
  27. * http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp_analysis-full.pdf
  28. *
  29. * Initial prototype from Abdul Kabbani, Masato Yasuda and Mohammad Alizadeh.
  30. *
  31. * Authors:
  32. *
  33. * Daniel Borkmann <dborkman@redhat.com>
  34. * Florian Westphal <fw@strlen.de>
  35. * Glenn Judd <glenn.judd@morganstanley.com>
  36. *
  37. * This program is free software; you can redistribute it and/or modify
  38. * it under the terms of the GNU General Public License as published by
  39. * the Free Software Foundation; either version 2 of the License, or (at
  40. * your option) any later version.
  41. */
  42. #include <linux/module.h>
  43. #include <linux/mm.h>
  44. #include <net/tcp.h>
  45. #include <linux/inet_diag.h>
  46. #define DCTCP_MAX_ALPHA 1024U
  47. struct dctcp {
  48. u32 acked_bytes_ecn;
  49. u32 acked_bytes_total;
  50. u32 prior_snd_una;
  51. u32 prior_rcv_nxt;
  52. u32 dctcp_alpha;
  53. u32 next_seq;
  54. u32 ce_state;
  55. u32 delayed_ack_reserved;
  56. u32 loss_cwnd;
  57. };
  58. static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */
  59. module_param(dctcp_shift_g, uint, 0644);
  60. MODULE_PARM_DESC(dctcp_shift_g, "parameter g for updating dctcp_alpha");
  61. static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
  62. module_param(dctcp_alpha_on_init, uint, 0644);
  63. MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
  64. static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
  65. module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
  66. MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
  67. "parameter for clamping alpha on loss");
  68. static struct tcp_congestion_ops dctcp_reno;
  69. static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
  70. {
  71. ca->next_seq = tp->snd_nxt;
  72. ca->acked_bytes_ecn = 0;
  73. ca->acked_bytes_total = 0;
  74. }
  75. static void dctcp_init(struct sock *sk)
  76. {
  77. const struct tcp_sock *tp = tcp_sk(sk);
  78. if ((tp->ecn_flags & TCP_ECN_OK) ||
  79. (sk->sk_state == TCP_LISTEN ||
  80. sk->sk_state == TCP_CLOSE)) {
  81. struct dctcp *ca = inet_csk_ca(sk);
  82. ca->prior_snd_una = tp->snd_una;
  83. ca->prior_rcv_nxt = tp->rcv_nxt;
  84. ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
  85. ca->delayed_ack_reserved = 0;
  86. ca->loss_cwnd = 0;
  87. ca->ce_state = 0;
  88. dctcp_reset(tp, ca);
  89. return;
  90. }
  91. /* No ECN support? Fall back to Reno. Also need to clear
  92. * ECT from sk since it is set during 3WHS for DCTCP.
  93. */
  94. inet_csk(sk)->icsk_ca_ops = &dctcp_reno;
  95. INET_ECN_dontxmit(sk);
  96. }
  97. static u32 dctcp_ssthresh(struct sock *sk)
  98. {
  99. struct dctcp *ca = inet_csk_ca(sk);
  100. struct tcp_sock *tp = tcp_sk(sk);
  101. ca->loss_cwnd = tp->snd_cwnd;
  102. return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
  103. }
  104. /* Minimal DCTP CE state machine:
  105. *
  106. * S: 0 <- last pkt was non-CE
  107. * 1 <- last pkt was CE
  108. */
  109. static void dctcp_ce_state_0_to_1(struct sock *sk)
  110. {
  111. struct dctcp *ca = inet_csk_ca(sk);
  112. struct tcp_sock *tp = tcp_sk(sk);
  113. /* State has changed from CE=0 to CE=1 and delayed
  114. * ACK has not sent yet.
  115. */
  116. if (!ca->ce_state && ca->delayed_ack_reserved) {
  117. u32 tmp_rcv_nxt;
  118. /* Save current rcv_nxt. */
  119. tmp_rcv_nxt = tp->rcv_nxt;
  120. /* Generate previous ack with CE=0. */
  121. tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
  122. tp->rcv_nxt = ca->prior_rcv_nxt;
  123. tcp_send_ack(sk);
  124. /* Recover current rcv_nxt. */
  125. tp->rcv_nxt = tmp_rcv_nxt;
  126. }
  127. ca->prior_rcv_nxt = tp->rcv_nxt;
  128. ca->ce_state = 1;
  129. tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
  130. }
  131. static void dctcp_ce_state_1_to_0(struct sock *sk)
  132. {
  133. struct dctcp *ca = inet_csk_ca(sk);
  134. struct tcp_sock *tp = tcp_sk(sk);
  135. /* State has changed from CE=1 to CE=0 and delayed
  136. * ACK has not sent yet.
  137. */
  138. if (ca->ce_state && ca->delayed_ack_reserved) {
  139. u32 tmp_rcv_nxt;
  140. /* Save current rcv_nxt. */
  141. tmp_rcv_nxt = tp->rcv_nxt;
  142. /* Generate previous ack with CE=1. */
  143. tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
  144. tp->rcv_nxt = ca->prior_rcv_nxt;
  145. tcp_send_ack(sk);
  146. /* Recover current rcv_nxt. */
  147. tp->rcv_nxt = tmp_rcv_nxt;
  148. }
  149. ca->prior_rcv_nxt = tp->rcv_nxt;
  150. ca->ce_state = 0;
  151. tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
  152. }
  153. static void dctcp_update_alpha(struct sock *sk, u32 flags)
  154. {
  155. const struct tcp_sock *tp = tcp_sk(sk);
  156. struct dctcp *ca = inet_csk_ca(sk);
  157. u32 acked_bytes = tp->snd_una - ca->prior_snd_una;
  158. /* If ack did not advance snd_una, count dupack as MSS size.
  159. * If ack did update window, do not count it at all.
  160. */
  161. if (acked_bytes == 0 && !(flags & CA_ACK_WIN_UPDATE))
  162. acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss;
  163. if (acked_bytes) {
  164. ca->acked_bytes_total += acked_bytes;
  165. ca->prior_snd_una = tp->snd_una;
  166. if (flags & CA_ACK_ECE)
  167. ca->acked_bytes_ecn += acked_bytes;
  168. }
  169. /* Expired RTT */
  170. if (!before(tp->snd_una, ca->next_seq)) {
  171. u64 bytes_ecn = ca->acked_bytes_ecn;
  172. u32 alpha = ca->dctcp_alpha;
  173. /* alpha = (1 - g) * alpha + g * F */
  174. alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
  175. if (bytes_ecn) {
  176. /* If dctcp_shift_g == 1, a 32bit value would overflow
  177. * after 8 Mbytes.
  178. */
  179. bytes_ecn <<= (10 - dctcp_shift_g);
  180. do_div(bytes_ecn, max(1U, ca->acked_bytes_total));
  181. alpha = min(alpha + (u32)bytes_ecn, DCTCP_MAX_ALPHA);
  182. }
  183. /* dctcp_alpha can be read from dctcp_get_info() without
  184. * synchro, so we ask compiler to not use dctcp_alpha
  185. * as a temporary variable in prior operations.
  186. */
  187. WRITE_ONCE(ca->dctcp_alpha, alpha);
  188. dctcp_reset(tp, ca);
  189. }
  190. }
  191. static void dctcp_state(struct sock *sk, u8 new_state)
  192. {
  193. if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
  194. struct dctcp *ca = inet_csk_ca(sk);
  195. /* If this extension is enabled, we clamp dctcp_alpha to
  196. * max on packet loss; the motivation is that dctcp_alpha
  197. * is an indicator to the extend of congestion and packet
  198. * loss is an indicator of extreme congestion; setting
  199. * this in practice turned out to be beneficial, and
  200. * effectively assumes total congestion which reduces the
  201. * window by half.
  202. */
  203. ca->dctcp_alpha = DCTCP_MAX_ALPHA;
  204. }
  205. }
  206. static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev)
  207. {
  208. struct dctcp *ca = inet_csk_ca(sk);
  209. switch (ev) {
  210. case CA_EVENT_DELAYED_ACK:
  211. if (!ca->delayed_ack_reserved)
  212. ca->delayed_ack_reserved = 1;
  213. break;
  214. case CA_EVENT_NON_DELAYED_ACK:
  215. if (ca->delayed_ack_reserved)
  216. ca->delayed_ack_reserved = 0;
  217. break;
  218. default:
  219. /* Don't care for the rest. */
  220. break;
  221. }
  222. }
  223. static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
  224. {
  225. switch (ev) {
  226. case CA_EVENT_ECN_IS_CE:
  227. dctcp_ce_state_0_to_1(sk);
  228. break;
  229. case CA_EVENT_ECN_NO_CE:
  230. dctcp_ce_state_1_to_0(sk);
  231. break;
  232. case CA_EVENT_DELAYED_ACK:
  233. case CA_EVENT_NON_DELAYED_ACK:
  234. dctcp_update_ack_reserved(sk, ev);
  235. break;
  236. default:
  237. /* Don't care for the rest. */
  238. break;
  239. }
  240. }
  241. static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
  242. union tcp_cc_info *info)
  243. {
  244. const struct dctcp *ca = inet_csk_ca(sk);
  245. /* Fill it also in case of VEGASINFO due to req struct limits.
  246. * We can still correctly retrieve it later.
  247. */
  248. if (ext & (1 << (INET_DIAG_DCTCPINFO - 1)) ||
  249. ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
  250. memset(&info->dctcp, 0, sizeof(info->dctcp));
  251. if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) {
  252. info->dctcp.dctcp_enabled = 1;
  253. info->dctcp.dctcp_ce_state = (u16) ca->ce_state;
  254. info->dctcp.dctcp_alpha = ca->dctcp_alpha;
  255. info->dctcp.dctcp_ab_ecn = ca->acked_bytes_ecn;
  256. info->dctcp.dctcp_ab_tot = ca->acked_bytes_total;
  257. }
  258. *attr = INET_DIAG_DCTCPINFO;
  259. return sizeof(info->dctcp);
  260. }
  261. return 0;
  262. }
  263. static u32 dctcp_cwnd_undo(struct sock *sk)
  264. {
  265. const struct dctcp *ca = inet_csk_ca(sk);
  266. return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
  267. }
  268. static struct tcp_congestion_ops dctcp __read_mostly = {
  269. .init = dctcp_init,
  270. .in_ack_event = dctcp_update_alpha,
  271. .cwnd_event = dctcp_cwnd_event,
  272. .ssthresh = dctcp_ssthresh,
  273. .cong_avoid = tcp_reno_cong_avoid,
  274. .undo_cwnd = dctcp_cwnd_undo,
  275. .set_state = dctcp_state,
  276. .get_info = dctcp_get_info,
  277. .flags = TCP_CONG_NEEDS_ECN,
  278. .owner = THIS_MODULE,
  279. .name = "dctcp",
  280. };
  281. static struct tcp_congestion_ops dctcp_reno __read_mostly = {
  282. .ssthresh = tcp_reno_ssthresh,
  283. .cong_avoid = tcp_reno_cong_avoid,
  284. .get_info = dctcp_get_info,
  285. .owner = THIS_MODULE,
  286. .name = "dctcp-reno",
  287. };
  288. static int __init dctcp_register(void)
  289. {
  290. BUILD_BUG_ON(sizeof(struct dctcp) > ICSK_CA_PRIV_SIZE);
  291. return tcp_register_congestion_control(&dctcp);
  292. }
  293. static void __exit dctcp_unregister(void)
  294. {
  295. tcp_unregister_congestion_control(&dctcp);
  296. }
  297. module_init(dctcp_register);
  298. module_exit(dctcp_unregister);
  299. MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
  300. MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
  301. MODULE_AUTHOR("Glenn Judd <glenn.judd@morganstanley.com>");
  302. MODULE_LICENSE("GPL v2");
  303. MODULE_DESCRIPTION("DataCenter TCP (DCTCP)");