diff options
Diffstat (limited to 'net')
44 files changed, 95 insertions, 96 deletions
diff --git a/net/802/garp.c b/net/802/garp.c index f6012f8e59f0..fc9eb02a912f 100644 --- a/net/802/garp.c +++ b/net/802/garp.c @@ -407,7 +407,7 @@ static void garp_join_timer_arm(struct garp_applicant *app) { unsigned long delay; - delay = (u64)msecs_to_jiffies(garp_join_time) * prandom_u32() >> 32; + delay = prandom_u32_max(msecs_to_jiffies(garp_join_time)); mod_timer(&app->join_timer, jiffies + delay); } diff --git a/net/802/mrp.c b/net/802/mrp.c index 35e04cc5390c..155f74d8b14f 100644 --- a/net/802/mrp.c +++ b/net/802/mrp.c @@ -592,7 +592,7 @@ static void mrp_join_timer_arm(struct mrp_applicant *app) { unsigned long delay; - delay = (u64)msecs_to_jiffies(mrp_join_time) * prandom_u32() >> 32; + delay = prandom_u32_max(msecs_to_jiffies(mrp_join_time)); mod_timer(&app->join_timer, jiffies + delay); } diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 6a6898ee4049..db60217f911b 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -222,7 +222,7 @@ static void pick_new_mon(struct ceph_mon_client *monc) max--; } - n = prandom_u32() % max; + n = prandom_u32_max(max); if (o >= 0 && n >= o) n++; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 87b883c7bfd6..4e4f1e4bc265 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1479,7 +1479,7 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc, static int pick_random_replica(const struct ceph_osds *acting) { - int i = prandom_u32() % acting->size; + int i = prandom_u32_max(acting->size); dout("%s picked osd%d, primary osd%d\n", __func__, acting->osds[i], acting->primary); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index e93edb810103..3c4786b99907 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -111,7 +111,7 @@ static void neigh_cleanup_and_release(struct neighbour *neigh) unsigned long neigh_rand_reach_time(unsigned long base) { - return base ? (prandom_u32() % base) + (base >> 1) : 0; + return base ? prandom_u32_max(base) + (base >> 1) : 0; } EXPORT_SYMBOL(neigh_rand_reach_time); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 88906ba6d9a7..c3763056c554 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2324,7 +2324,7 @@ static inline int f_pick(struct pktgen_dev *pkt_dev) pkt_dev->curfl = 0; /*reset */ } } else { - flow = prandom_u32() % pkt_dev->cflows; + flow = prandom_u32_max(pkt_dev->cflows); pkt_dev->curfl = flow; if (pkt_dev->flows[flow].count > pkt_dev->lflow) { @@ -2380,10 +2380,9 @@ static void set_cur_queue_map(struct pktgen_dev *pkt_dev) else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) { __u16 t; if (pkt_dev->flags & F_QUEUE_MAP_RND) { - t = prandom_u32() % - (pkt_dev->queue_map_max - - pkt_dev->queue_map_min + 1) - + pkt_dev->queue_map_min; + t = prandom_u32_max(pkt_dev->queue_map_max - + pkt_dev->queue_map_min + 1) + + pkt_dev->queue_map_min; } else { t = pkt_dev->cur_queue_map + 1; if (t > pkt_dev->queue_map_max) @@ -2412,7 +2411,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) __u32 tmp; if (pkt_dev->flags & F_MACSRC_RND) - mc = prandom_u32() % pkt_dev->src_mac_count; + mc = prandom_u32_max(pkt_dev->src_mac_count); else { mc = pkt_dev->cur_src_mac_offset++; if (pkt_dev->cur_src_mac_offset >= @@ -2438,7 +2437,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) __u32 tmp; if (pkt_dev->flags & F_MACDST_RND) - mc = prandom_u32() % pkt_dev->dst_mac_count; + mc = prandom_u32_max(pkt_dev->dst_mac_count); else { mc = pkt_dev->cur_dst_mac_offset++; @@ -2465,23 +2464,23 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) for (i = 0; i < pkt_dev->nr_labels; i++) if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) pkt_dev->labels[i] = MPLS_STACK_BOTTOM | - ((__force __be32)prandom_u32() & + ((__force __be32)get_random_u32() & htonl(0x000fffff)); } if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) { - pkt_dev->vlan_id = prandom_u32() & (4096 - 1); + pkt_dev->vlan_id = prandom_u32_max(4096); } if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) { - pkt_dev->svlan_id = prandom_u32() & (4096 - 1); + pkt_dev->svlan_id = prandom_u32_max(4096); } if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) { if (pkt_dev->flags & F_UDPSRC_RND) - pkt_dev->cur_udp_src = prandom_u32() % - (pkt_dev->udp_src_max - pkt_dev->udp_src_min) - + pkt_dev->udp_src_min; + pkt_dev->cur_udp_src = prandom_u32_max( + pkt_dev->udp_src_max - pkt_dev->udp_src_min) + + pkt_dev->udp_src_min; else { pkt_dev->cur_udp_src++; @@ -2492,9 +2491,9 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) { if (pkt_dev->flags & F_UDPDST_RND) { - pkt_dev->cur_udp_dst = prandom_u32() % - (pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) - + pkt_dev->udp_dst_min; + pkt_dev->cur_udp_dst = prandom_u32_max( + pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) + + pkt_dev->udp_dst_min; } else { pkt_dev->cur_udp_dst++; if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max) @@ -2509,7 +2508,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) if (imn < imx) { __u32 t; if (pkt_dev->flags & F_IPSRC_RND) - t = prandom_u32() % (imx - imn) + imn; + t = prandom_u32_max(imx - imn) + imn; else { t = ntohl(pkt_dev->cur_saddr); t++; @@ -2531,8 +2530,8 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) if (pkt_dev->flags & F_IPDST_RND) { do { - t = prandom_u32() % - (imx - imn) + imn; + t = prandom_u32_max(imx - imn) + + imn; s = htonl(t); } while (ipv4_is_loopback(s) || ipv4_is_multicast(s) || @@ -2569,7 +2568,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) for (i = 0; i < 4; i++) { pkt_dev->cur_in6_daddr.s6_addr32[i] = - (((__force __be32)prandom_u32() | + (((__force __be32)get_random_u32() | pkt_dev->min_in6_daddr.s6_addr32[i]) & pkt_dev->max_in6_daddr.s6_addr32[i]); } @@ -2579,9 +2578,9 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) { __u32 t; if (pkt_dev->flags & F_TXSIZE_RND) { - t = prandom_u32() % - (pkt_dev->max_pkt_size - pkt_dev->min_pkt_size) - + pkt_dev->min_pkt_size; + t = prandom_u32_max(pkt_dev->max_pkt_size - + pkt_dev->min_pkt_size) + + pkt_dev->min_pkt_size; } else { t = pkt_dev->cur_pkt_size + 1; if (t > pkt_dev->max_pkt_size) @@ -2590,7 +2589,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) pkt_dev->cur_pkt_size = t; } else if (pkt_dev->n_imix_entries > 0) { struct imix_pkt *entry; - __u32 t = prandom_u32() % IMIX_PRECISION; + __u32 t = prandom_u32_max(IMIX_PRECISION); __u8 entry_index = pkt_dev->imix_distribution[t]; entry = &pkt_dev->imix_entries[entry_index]; diff --git a/net/core/stream.c b/net/core/stream.c index 1105057ce00a..75fded8495f5 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -123,7 +123,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) DEFINE_WAIT_FUNC(wait, woken_wake_function); if (sk_stream_memory_free(sk)) - current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2; + current_timeo = vm_wait = prandom_u32_max(HZ / 5) + 2; add_wait_queue(sk_sleep(sk), &wait); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 6a6e121dc00c..713b7b8dad7e 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -144,7 +144,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_daddr, inet->inet_sport, inet->inet_dport); - inet->inet_id = prandom_u32(); + inet->inet_id = get_random_u16(); err = dccp_connect(sk); rt = NULL; @@ -443,7 +443,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk, RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt)); newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; - newinet->inet_id = prandom_u32(); + newinet->inet_id = get_random_u16(); if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) goto put_and_exit; diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 405a8c2aea64..0ee7fd259730 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -73,7 +73,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len reuseport_has_conns(sk, true); sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); - inet->inet_id = prandom_u32(); + inet->inet_id = get_random_u16(); sk_dst_set(sk, &rt->dst); err = 0; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index df0660d818ac..81be3e0f0e70 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -213,7 +213,7 @@ static void igmp_stop_timer(struct ip_mc_list *im) /* It must be called with locked im->lock */ static void igmp_start_timer(struct ip_mc_list *im, int max_delay) { - int tv = prandom_u32() % max_delay; + int tv = prandom_u32_max(max_delay); im->tm_running = 1; if (!mod_timer(&im->timer, jiffies+tv+2)) @@ -222,7 +222,7 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay) static void igmp_gq_start_timer(struct in_device *in_dev) { - int tv = prandom_u32() % in_dev->mr_maxdelay; + int tv = prandom_u32_max(in_dev->mr_maxdelay); unsigned long exp = jiffies + tv + 2; if (in_dev->mr_gq_running && @@ -236,7 +236,7 @@ static void igmp_gq_start_timer(struct in_device *in_dev) static void igmp_ifc_start_timer(struct in_device *in_dev, int delay) { - int tv = prandom_u32() % delay; + int tv = prandom_u32_max(delay); if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2)) in_dev_hold(in_dev); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index ebca860e113f..4e84ed21d16f 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -314,7 +314,7 @@ other_half_scan: if (likely(remaining > 1)) remaining &= ~1U; - offset = prandom_u32() % remaining; + offset = prandom_u32_max(remaining); /* __inet_hash_connect() favors ports having @low parity * We do the opposite to not pollute connect() users. */ diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index a0ad34e4f044..d3dc28156622 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -1037,7 +1037,7 @@ ok: * on low contention the randomness is maximal and on high contention * it may be inexistent. */ - i = max_t(int, i, (prandom_u32() & 7) * 2); + i = max_t(int, i, prandom_u32_max(8) * 2); WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2); /* Head lock still held and bh's disabled */ diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 1ae83ad629b2..922c87ef1ab5 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -172,7 +172,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, * Avoid using the hashed IP ident generator. */ if (sk->sk_protocol == IPPROTO_TCP) - iph->id = (__force __be16)prandom_u32(); + iph->id = (__force __be16)get_random_u16(); else __ip_select_ident(net, iph, 1); } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 795cbe1de912..cd1fa9f70f1a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3664,7 +3664,7 @@ static __net_init int rt_genid_init(struct net *net) { atomic_set(&net->ipv4.rt_genid, 0); atomic_set(&net->fnhe_genid, 0); - atomic_set(&net->ipv4.dev_addr_genid, get_random_int()); + atomic_set(&net->ipv4.dev_addr_genid, get_random_u32()); return 0; } @@ -3719,7 +3719,7 @@ int __init ip_rt_init(void) ip_idents = idents_hash; - prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents)); + get_random_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents)); ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents); diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c index 112f28f93693..ba4d98e510e0 100644 --- a/net/ipv4/tcp_cdg.c +++ b/net/ipv4/tcp_cdg.c @@ -243,7 +243,7 @@ static bool tcp_cdg_backoff(struct sock *sk, u32 grad) struct cdg *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); - if (prandom_u32() <= nexp_u32(grad * backoff_factor)) + if (get_random_u32() <= nexp_u32(grad * backoff_factor)) return false; if (use_ineff) { diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 6376ad915765..7a250ef9d1b7 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -323,7 +323,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_daddr); } - inet->inet_id = prandom_u32(); + inet->inet_id = get_random_u16(); if (tcp_fastopen_defer_connect(sk, &err)) return err; @@ -1543,7 +1543,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, inet_csk(newsk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; - newinet->inet_id = prandom_u32(); + newinet->inet_id = get_random_u16(); /* Set ToS of the new socket based upon the value of incoming SYN. * ECT bits are set later in tcp_init_transfer(). diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 8126f67d18b3..662d717d5123 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -246,7 +246,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; - rand = prandom_u32(); + rand = get_random_u32(); first = reciprocal_scale(rand, remaining) + low; /* * force rand to be an odd multiple of UDP_HTABLE_SIZE diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 10ce86bf228e..417834b7169d 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -104,7 +104,7 @@ static inline u32 cstamp_delta(unsigned long cstamp) static inline s32 rfc3315_s14_backoff_init(s32 irt) { /* multiply 'initial retransmission time' by 0.9 .. 1.1 */ - u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt; + u64 tmp = (900000 + prandom_u32_max(200001)) * (u64)irt; do_div(tmp, 1000000); return (s32)tmp; } @@ -112,11 +112,11 @@ static inline s32 rfc3315_s14_backoff_init(s32 irt) static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt) { /* multiply 'retransmission timeout' by 1.9 .. 2.1 */ - u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt; + u64 tmp = (1900000 + prandom_u32_max(200001)) * (u64)rt; do_div(tmp, 1000000); if ((s32)tmp > mrt) { /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */ - tmp = (900000 + prandom_u32() % 200001) * (u64)mrt; + tmp = (900000 + prandom_u32_max(200001)) * (u64)mrt; do_div(tmp, 1000000); } return (s32)tmp; @@ -3967,7 +3967,7 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp) if (ifp->flags & IFA_F_OPTIMISTIC) rand_num = 0; else - rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1); + rand_num = prandom_u32_max(idev->cnf.rtr_solicit_delay ?: 1); nonce = 0; if (idev->cnf.enhanced_dad || diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index ceb85c67ce39..18481eb76a0a 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -220,7 +220,7 @@ static struct ip6_flowlabel *fl_intern(struct net *net, spin_lock_bh(&ip6_fl_lock); if (label == 0) { for (;;) { - fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK; + fl->label = htonl(get_random_u32())&IPV6_FLOWLABEL_MASK; if (fl->label) { lfl = __fl_lookup(net, fl->label); if (!lfl) diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 0566ab03ddbe..7860383295d8 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1050,7 +1050,7 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, /* called with mc_lock */ static void mld_gq_start_work(struct inet6_dev *idev) { - unsigned long tv = prandom_u32() % idev->mc_maxdelay; + unsigned long tv = prandom_u32_max(idev->mc_maxdelay); idev->mc_gq_running = 1; if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2)) @@ -1068,7 +1068,7 @@ static void mld_gq_stop_work(struct inet6_dev *idev) /* called with mc_lock */ static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay) { - unsigned long tv = prandom_u32() % delay; + unsigned long tv = prandom_u32_max(delay); if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2)) in6_dev_hold(idev); @@ -1085,7 +1085,7 @@ static void mld_ifc_stop_work(struct inet6_dev *idev) /* called with mc_lock */ static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay) { - unsigned long tv = prandom_u32() % delay; + unsigned long tv = prandom_u32_max(delay); if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2)) in6_dev_hold(idev); @@ -1130,7 +1130,7 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime) } if (delay >= resptime) - delay = prandom_u32() % resptime; + delay = prandom_u32_max(resptime); if (!mod_delayed_work(mld_wq, &ma->mca_work, delay)) refcount_inc(&ma->mca_refcnt); @@ -2574,7 +2574,7 @@ static void igmp6_join_group(struct ifmcaddr6 *ma) igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); - delay = prandom_u32() % unsolicited_report_interval(ma->idev); + delay = prandom_u32_max(unsolicited_report_interval(ma->idev)); if (cancel_delayed_work(&ma->mca_work)) { refcount_dec(&ma->mca_refcnt); diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 2880dc7d9a49..2685c3f15e9d 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -18,7 +18,7 @@ static u32 __ipv6_select_ident(struct net *net, u32 id; do { - id = prandom_u32(); + id = get_random_u32(); } while (!id); return id; diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 7f3f5f51081d..3d91b98db099 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -2036,7 +2036,7 @@ static void __init init_sample_table(void) memset(sample_table, 0xff, sizeof(sample_table)); for (col = 0; col < SAMPLE_COLUMNS; col++) { - prandom_bytes(rnd, sizeof(rnd)); + get_random_bytes(rnd, sizeof(rnd)); for (i = 0; i < MCS_GROUP_RATES; i++) { new_idx = (i + rnd[i]) % MCS_GROUP_RATES; while (sample_table[col][new_idx] != 0xff) diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 0e8c4f48c36d..dc3cdee51e66 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -641,7 +641,7 @@ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata, if (flags & IEEE80211_PROBE_FLAG_RANDOM_SN) { struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - u16 sn = get_random_u32(); + u16 sn = get_random_u16(); info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO; hdr->seq_ctrl = diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index fb67f1ca2495..8c04bb57dd6f 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -1308,7 +1308,7 @@ void ip_vs_random_dropentry(struct netns_ipvs *ipvs) * Randomly scan 1/32 of the whole table every second */ for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { - unsigned int hash = prandom_u32() & ip_vs_conn_tab_mask; + unsigned int hash = get_random_u32() & ip_vs_conn_tab_mask; hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { if (cp->ipvs != ipvs) diff --git a/net/netfilter/ipvs/ip_vs_twos.c b/net/netfilter/ipvs/ip_vs_twos.c index acb55d8393ef..f2579fc9c75b 100644 --- a/net/netfilter/ipvs/ip_vs_twos.c +++ b/net/netfilter/ipvs/ip_vs_twos.c @@ -71,8 +71,8 @@ static struct ip_vs_dest *ip_vs_twos_schedule(struct ip_vs_service *svc, * from 0 to total_weight */ total_weight += 1; - rweight1 = prandom_u32() % total_weight; - rweight2 = prandom_u32() % total_weight; + rweight1 = prandom_u32_max(total_weight); + rweight2 = prandom_u32_max(total_weight); /* Pick two weighted servers */ list_for_each_entry_rcu(dest, &svc->destinations, n_list) { diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index d8e6380f6337..18319a6e6806 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -468,7 +468,7 @@ find_free_id: if (range->flags & NF_NAT_RANGE_PROTO_OFFSET) off = (ntohs(*keyptr) - ntohs(range->base_proto.all)); else - off = prandom_u32(); + off = get_random_u16(); attempts = range_size; if (attempts > max_attempts) @@ -490,7 +490,7 @@ another_round: if (attempts >= range_size || attempts < 16) return; attempts /= 2; - off = prandom_u32(); + off = get_random_u16(); goto another_round; } diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c index 203e24ae472c..b26c1dcfc27b 100644 --- a/net/netfilter/xt_statistic.c +++ b/net/netfilter/xt_statistic.c @@ -34,7 +34,7 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par) switch (info->mode) { case XT_STATISTIC_MODE_RANDOM: - if ((prandom_u32() & 0x7FFFFFFF) < info->u.random.probability) + if ((get_random_u32() & 0x7FFFFFFF) < info->u.random.probability) ret = !ret; break; case XT_STATISTIC_MODE_NTH: diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 868db4669a29..ca3ebfdb3023 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -1033,7 +1033,7 @@ static int sample(struct datapath *dp, struct sk_buff *skb, actions = nla_next(sample_arg, &rem); if ((arg->probability != U32_MAX) && - (!arg->probability || prandom_u32() > arg->probability)) { + (!arg->probability || get_random_u32() > arg->probability)) { if (last) consume_skb(skb); return 0; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index d3f6db350de7..6ce8dd19f33c 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1350,7 +1350,7 @@ static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) if (READ_ONCE(history[i]) == rxhash) count++; - victim = prandom_u32() % ROLLOVER_HLEN; + victim = prandom_u32_max(ROLLOVER_HLEN); /* Avoid dirtying the cache line if possible */ if (READ_ONCE(history[victim]) != rxhash) diff --git a/net/rds/bind.c b/net/rds/bind.c index 5b5fb4ca8d3e..97a29172a8ee 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -104,7 +104,7 @@ static int rds_add_bound(struct rds_sock *rs, const struct in6_addr *addr, return -EINVAL; last = rover; } else { - rover = max_t(u16, prandom_u32(), 2); + rover = max_t(u16, get_random_u16(), 2); last = rover - 1; } diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index abe1bcc5c797..62d682b96b88 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -25,7 +25,7 @@ static struct tc_action_ops act_gact_ops; static int gact_net_rand(struct tcf_gact *gact) { smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */ - if (prandom_u32() % gact->tcfg_pval) + if (prandom_u32_max(gact->tcfg_pval)) return gact->tcf_action; return gact->tcfg_paction; } diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 5ba36f70e3a1..7a25477f5d99 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -168,7 +168,7 @@ static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a, psample_group = rcu_dereference_bh(s->psample_group); /* randomly sample packets according to rate */ - if (psample_group && (prandom_u32() % s->rate == 0)) { + if (psample_group && (prandom_u32_max(s->rate) == 0)) { if (!skb_at_tc_ingress(skb)) { md.in_ifindex = skb->skb_iif; md.out_ifindex = skb->dev->ifindex; diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 55c6879d2c7e..817cd0695b35 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -573,7 +573,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars, /* Simple BLUE implementation. Lack of ECN is deliberate. */ if (vars->p_drop) - drop |= (prandom_u32() < vars->p_drop); + drop |= (get_random_u32() < vars->p_drop); /* Overload the drop_next field as an activity timeout */ if (!vars->count) @@ -2092,11 +2092,11 @@ retry: WARN_ON(host_load > CAKE_QUEUES); - /* The shifted prandom_u32() is a way to apply dithering to - * avoid accumulating roundoff errors + /* The get_random_u16() is a way to apply dithering to avoid + * accumulating roundoff errors */ flow->deficit += (b->flow_quantum * quantum_div[host_load] + - (prandom_u32() >> 16)) >> 16; + get_random_u16()) >> 16; list_move_tail(&flow->flowchain, &b->old_flows); goto retry; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 18f4273a835b..fb00ac40ecb7 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -171,7 +171,7 @@ static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) static void init_crandom(struct crndstate *state, unsigned long rho) { state->rho = rho; - state->last = prandom_u32(); + state->last = get_random_u32(); } /* get_crandom - correlated random number generator @@ -184,9 +184,9 @@ static u32 get_crandom(struct crndstate *state) unsigned long answer; if (!state || state->rho == 0) /* no correlation */ - return prandom_u32(); + return get_random_u32(); - value = prandom_u32(); + value = get_random_u32(); rho = (u64)state->rho + 1; answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; state->last = answer; @@ -200,7 +200,7 @@ static u32 get_crandom(struct crndstate *state) static bool loss_4state(struct netem_sched_data *q) { struct clgstate *clg = &q->clg; - u32 rnd = prandom_u32(); + u32 rnd = get_random_u32(); /* * Makes a comparison between rnd and the transition @@ -268,15 +268,15 @@ static bool loss_gilb_ell(struct netem_sched_data *q) switch (clg->state) { case GOOD_STATE: - if (prandom_u32() < clg->a1) + if (get_random_u32() < clg->a1) clg->state = BAD_STATE; - if (prandom_u32() < clg->a4) + if (get_random_u32() < clg->a4) return true; break; case BAD_STATE: - if (prandom_u32() < clg->a2) + if (get_random_u32() < clg->a2) clg->state = GOOD_STATE; - if (prandom_u32() > clg->a3) + if (get_random_u32() > clg->a3) return true; } @@ -513,8 +513,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, goto finish_segs; } - skb->data[prandom_u32() % skb_headlen(skb)] ^= - 1<<(prandom_u32() % 8); + skb->data[prandom_u32_max(skb_headlen(skb))] ^= + 1<<prandom_u32_max(8); } if (unlikely(sch->q.qlen >= sch->limit)) { @@ -632,7 +632,7 @@ static void get_slot_next(struct netem_sched_data *q, u64 now) if (!q->slot_dist) next_delay = q->slot_config.min_delay + - (prandom_u32() * + (get_random_u32() * (q->slot_config.max_delay - q->slot_config.min_delay) >> 32); else diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index 974038ba6c7b..265c238047a4 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -72,7 +72,7 @@ bool pie_drop_early(struct Qdisc *sch, struct pie_params *params, if (vars->accu_prob >= (MAX_PROB / 2) * 17) return true; - prandom_bytes(&rnd, 8); + get_random_bytes(&rnd, 8); if ((rnd >> BITS_PER_BYTE) < local_prob) { vars->accu_prob = 0; return true; diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index e2389fa3cff8..0366a1a029a9 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -379,7 +379,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, goto enqueue; } - r = prandom_u32() & SFB_MAX_PROB; + r = get_random_u16() & SFB_MAX_PROB; if (unlikely(r < p_min)) { if (unlikely(p_min > SFB_MAX_PROB / 2)) { diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 171f1a35d205..83628c347744 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -8319,7 +8319,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; - rover = prandom_u32() % remaining + low; + rover = prandom_u32_max(remaining) + low; do { rover++; @@ -9448,7 +9448,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, newinet->inet_rcv_saddr = inet->inet_rcv_saddr; newinet->inet_dport = htons(asoc->peer.port); newinet->pmtudisc = inet->pmtudisc; - newinet->inet_id = prandom_u32(); + newinet->inet_id = get_random_u16(); newinet->uc_ttl = inet->uc_ttl; newinet->mc_loop = 1; diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 5f96e75f9eec..48337687848c 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -130,8 +130,8 @@ gss_krb5_make_confounder(char *p, u32 conflen) /* initialize to random value */ if (i == 0) { - i = prandom_u32(); - i = (i << 32) | prandom_u32(); + i = get_random_u32(); + i = (i << 32) | get_random_u32(); } switch (conflen) { diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index c3c693b51c94..f075a9fb5ccc 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -677,7 +677,7 @@ static void cache_limit_defers(void) /* Consider removing either the first or the last */ if (cache_defer_cnt > DFR_MAX) { - if (prandom_u32() & 1) + if (prandom_u32_max(2)) discard = list_entry(cache_defer_list.next, struct cache_deferred_req, recent); else diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 71dc26373444..656cec208371 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1865,7 +1865,7 @@ xprt_alloc_xid(struct rpc_xprt *xprt) static void xprt_init_xid(struct rpc_xprt *xprt) { - xprt->xid = prandom_u32(); + xprt->xid = get_random_u32(); } static void diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index f34d5427b66c..915b9902f673 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1619,7 +1619,7 @@ static int xs_get_random_port(void) if (max < min) return -EADDRINUSE; range = max - min + 1; - rand = (unsigned short) prandom_u32() % range; + rand = prandom_u32_max(range); return rand + min; } diff --git a/net/tipc/socket.c b/net/tipc/socket.c index f1c3b8eb4b3d..e902b01ea3cb 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -3010,7 +3010,7 @@ static int tipc_sk_insert(struct tipc_sock *tsk) struct net *net = sock_net(sk); struct tipc_net *tn = net_generic(net, tipc_net_id); u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1; - u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT; + u32 portid = prandom_u32_max(remaining) + TIPC_MIN_PORT; while (remaining--) { portid++; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 15dbb392c875..b3545fc68097 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1147,7 +1147,7 @@ static int unix_autobind(struct sock *sk) addr->name->sun_family = AF_UNIX; refcount_set(&addr->refcnt, 1); - ordernum = prandom_u32(); + ordernum = get_random_u32(); lastnum = ordernum & 0xFFFFF; retry: ordernum = (ordernum + 1) & 0xFFFFF; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 81df34b3da6e..3d2fe7712ac5 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2072,7 +2072,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) } else { u32 spi = 0; for (h = 0; h < high-low+1; h++) { - spi = low + prandom_u32()%(high-low+1); + spi = low + prandom_u32_max(high - low + 1); x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); if (x0 == NULL) { newspi = htonl(spi); |