1 --- a/drivers/char/random.c
2 +++ b/drivers/char/random.c
4 * The current exported interfaces for gathering environmental noise
5 * from the devices are:
7 + * void add_device_randomness(const void *buf, unsigned int size);
8 * void add_input_randomness(unsigned int type, unsigned int code,
10 - * void add_interrupt_randomness(int irq);
11 + * void add_interrupt_randomness(int irq, int irq_flags);
12 * void add_disk_randomness(struct gendisk *disk);
14 * add_input_randomness() uses the input layer interrupt timing, as well as
15 * the event type information from the hardware.
17 - * add_interrupt_randomness() uses the inter-interrupt timing as random
18 - * inputs to the entropy pool. Note that not all interrupts are good
19 - * sources of randomness! For example, the timer interrupts is not a
20 - * good choice, because the periodicity of the interrupts is too
21 - * regular, and hence predictable to an attacker. Network Interface
22 - * Controller interrupts are a better measure, since the timing of the
23 - * NIC interrupts are more unpredictable.
24 + * add_interrupt_randomness() uses the interrupt timing as random
25 + * inputs to the entropy pool. Using the cycle counters and the irq source
26 + * as inputs, it feeds the randomness roughly once a second.
28 + * add_device_randomness() is for adding data to the random pool that
29 + * is likely to differ between two devices (or possibly even per boot).
30 + * This would be things like MAC addresses or serial numbers, or the
31 + * read-out of the RTC. This does *not* add any actual entropy to the
32 + * pool, but it initializes the pool to different values for devices
33 + * that might otherwise be identical and have very little entropy
34 + * available to them (particularly common in the embedded world).
36 * add_disk_randomness() uses what amounts to the seek time of block
37 * layer request events, on a per-disk_devt basis, as input to the
39 #include <linux/percpu.h>
40 #include <linux/cryptohash.h>
41 #include <linux/fips.h>
42 +#include <linux/ptrace.h>
44 #ifdef CONFIG_GENERIC_HARDIRQS
45 # include <linux/irq.h>
47 #include <asm/processor.h>
48 #include <asm/uaccess.h>
50 +#include <asm/irq_regs.h>
53 +#define CREATE_TRACE_POINTS
54 +#include <trace/events/random.h>
57 * Configuration information
59 @@ -420,8 +430,10 @@ struct entropy_store {
60 /* read-write data: */
63 + unsigned input_rotate;
67 + unsigned int initialized:1;
68 __u8 last_data[EXTRACT_SIZE];
71 @@ -454,6 +466,10 @@ static struct entropy_store nonblocking_
72 .pool = nonblocking_pool_data
75 +static __u32 const twist_table[8] = {
76 + 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
77 + 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
80 * This function adds bytes into the entropy "pool". It does not
81 * update the entropy estimate. The caller should call
82 @@ -464,29 +480,24 @@ static struct entropy_store nonblocking_
83 * it's cheap to do so and helps slightly in the expected case where
84 * the entropy is concentrated in the low-order bits.
86 -static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
87 - int nbytes, __u8 out[64])
88 +static void _mix_pool_bytes(struct entropy_store *r, const void *in,
89 + int nbytes, __u8 out[64])
91 - static __u32 const twist_table[8] = {
92 - 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
93 - 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
94 unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
96 int wordmask = r->poolinfo->poolwords - 1;
97 const char *bytes = in;
99 - unsigned long flags;
101 - /* Taps are constant, so we can load them without holding r->lock. */
102 tap1 = r->poolinfo->tap1;
103 tap2 = r->poolinfo->tap2;
104 tap3 = r->poolinfo->tap3;
105 tap4 = r->poolinfo->tap4;
106 tap5 = r->poolinfo->tap5;
108 - spin_lock_irqsave(&r->lock, flags);
109 - input_rotate = r->input_rotate;
112 + input_rotate = ACCESS_ONCE(r->input_rotate);
113 + i = ACCESS_ONCE(r->add_ptr);
115 /* mix one byte at a time to simplify size handling and churn faster */
117 @@ -513,19 +524,61 @@ static void mix_pool_bytes_extract(struc
118 input_rotate += i ? 7 : 14;
121 - r->input_rotate = input_rotate;
123 + ACCESS_ONCE(r->input_rotate) = input_rotate;
124 + ACCESS_ONCE(r->add_ptr) = i;
128 for (j = 0; j < 16; j++)
129 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
132 +static void __mix_pool_bytes(struct entropy_store *r, const void *in,
133 + int nbytes, __u8 out[64])
135 + trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
136 + _mix_pool_bytes(r, in, nbytes, out);
139 +static void mix_pool_bytes(struct entropy_store *r, const void *in,
140 + int nbytes, __u8 out[64])
142 + unsigned long flags;
144 + trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
145 + spin_lock_irqsave(&r->lock, flags);
146 + _mix_pool_bytes(r, in, nbytes, out);
147 spin_unlock_irqrestore(&r->lock, flags);
150 -static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
153 + unsigned long last;
154 + unsigned short count;
155 + unsigned char rotate;
156 + unsigned char last_timer_intr;
160 + * This is a fast mixing routine used by the interrupt randomness
161 + * collector. It's hardcoded for an 128 bit pool and assumes that any
162 + * locks that might be needed are taken by the caller.
164 +static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
166 - mix_pool_bytes_extract(r, in, bytes, NULL);
167 + const char *bytes = in;
169 + unsigned i = f->count;
170 + unsigned input_rotate = f->rotate;
173 + w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
174 + f->pool[(i + 1) & 3];
175 + f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
176 + input_rotate += (i++ & 3) ? 7 : 14;
179 + f->rotate = input_rotate;
183 @@ -533,30 +586,38 @@ static void mix_pool_bytes(struct entrop
185 static void credit_entropy_bits(struct entropy_store *r, int nbits)
187 - unsigned long flags;
189 + int entropy_count, orig;
194 - spin_lock_irqsave(&r->lock, flags);
196 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
197 - entropy_count = r->entropy_count;
199 + entropy_count = orig = ACCESS_ONCE(r->entropy_count);
200 entropy_count += nbits;
202 if (entropy_count < 0) {
203 DEBUG_ENT("negative entropy/overflow\n");
205 } else if (entropy_count > r->poolinfo->POOLBITS)
206 entropy_count = r->poolinfo->POOLBITS;
207 - r->entropy_count = entropy_count;
208 + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
211 + if (!r->initialized && nbits > 0) {
212 + r->entropy_total += nbits;
213 + if (r->entropy_total > 128)
214 + r->initialized = 1;
217 + trace_credit_entropy_bits(r->name, nbits, entropy_count,
218 + r->entropy_total, _RET_IP_);
220 /* should we wake readers? */
221 if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
222 wake_up_interruptible(&random_read_wait);
223 kill_fasync(&fasync, SIGIO, POLL_IN);
225 - spin_unlock_irqrestore(&r->lock, flags);
228 /*********************************************************************
229 @@ -609,6 +670,25 @@ static void set_timer_rand_state(unsigne
234 + * Add device- or boot-specific data to the input and nonblocking
235 + * pools to help initialize them to unique values.
237 + * None of this adds any entropy, it is meant to avoid the
238 + * problem of the nonblocking pool having similar initial state
239 + * across largely identical devices.
241 +void add_device_randomness(const void *buf, unsigned int size)
243 + unsigned long time = get_cycles() ^ jiffies;
245 + mix_pool_bytes(&input_pool, buf, size, NULL);
246 + mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
247 + mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
248 + mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
250 +EXPORT_SYMBOL(add_device_randomness);
252 static struct timer_rand_state input_timer_state;
255 @@ -637,13 +717,9 @@ static void add_timer_randomness(struct
258 sample.jiffies = jiffies;
260 - /* Use arch random value, fall back to cycles */
261 - if (!arch_get_random_int(&sample.cycles))
262 - sample.cycles = get_cycles();
264 + sample.cycles = get_cycles();
266 - mix_pool_bytes(&input_pool, &sample, sizeof(sample));
267 + mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
270 * Calculate number of bits of randomness we probably added.
271 @@ -700,17 +776,48 @@ void add_input_randomness(unsigned int t
273 EXPORT_SYMBOL_GPL(add_input_randomness);
275 -void add_interrupt_randomness(int irq)
276 +static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
278 +void add_interrupt_randomness(int irq, int irq_flags)
280 - struct timer_rand_state *state;
281 + struct entropy_store *r;
282 + struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
283 + struct pt_regs *regs = get_irq_regs();
284 + unsigned long now = jiffies;
285 + __u32 input[4], cycles = get_cycles();
287 + input[0] = cycles ^ jiffies;
290 + __u64 ip = instruction_pointer(regs);
292 + input[3] = ip >> 32;
295 - state = get_timer_rand_state(irq);
296 + fast_mix(fast_pool, input, sizeof(input));
299 + if ((fast_pool->count & 1023) &&
300 + !time_after(now, fast_pool->last + HZ))
303 - DEBUG_ENT("irq event %d\n", irq);
304 - add_timer_randomness(state, 0x100 + irq);
305 + fast_pool->last = now;
307 + r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
308 + __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
310 + * If we don't have a valid cycle counter, and we see
311 + * back-to-back timer interrupts, then skip giving credit for
315 + if (irq_flags & __IRQF_TIMER) {
316 + if (fast_pool->last_timer_intr)
318 + fast_pool->last_timer_intr = 1;
320 + fast_pool->last_timer_intr = 0;
322 + credit_entropy_bits(r, 1);
326 @@ -742,7 +849,11 @@ static ssize_t extract_entropy(struct en
328 static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
330 - __u32 tmp[OUTPUT_POOL_WORDS];
332 + __u32 tmp[OUTPUT_POOL_WORDS];
337 if (r->pull && r->entropy_count < nbytes * 8 &&
338 r->entropy_count < r->poolinfo->POOLBITS) {
339 @@ -753,17 +864,22 @@ static void xfer_secondary_pool(struct e
340 /* pull at least as many as BYTES as wakeup BITS */
341 bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
342 /* but never more than the buffer size */
343 - bytes = min_t(int, bytes, sizeof(tmp));
344 + bytes = min_t(int, bytes, sizeof(u.tmp));
346 DEBUG_ENT("going to reseed %s with %d bits "
347 "(%d of %d requested)\n",
348 r->name, bytes * 8, nbytes * 8, r->entropy_count);
350 - bytes = extract_entropy(r->pull, tmp, bytes,
351 + bytes = extract_entropy(r->pull, u.tmp, bytes,
352 random_read_wakeup_thresh / 8, rsvd);
353 - mix_pool_bytes(r, tmp, bytes);
354 + mix_pool_bytes(r, u.tmp, bytes, NULL);
355 credit_entropy_bits(r, bytes*8);
357 + for (i = 0; i < 4; i++)
358 + if (arch_get_random_long(&u.hwrand[i]))
361 + mix_pool_bytes(r, &u.hwrand, i * sizeof(u.hwrand[0]), 0);
365 @@ -822,9 +938,11 @@ static void extract_buf(struct entropy_s
367 __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
369 + unsigned long flags;
371 /* Generate a hash across the pool, 16 words (512 bits) at a time */
373 + spin_lock_irqsave(&r->lock, flags);
374 for (i = 0; i < r->poolinfo->poolwords; i += 16)
375 sha_transform(hash, (__u8 *)(r->pool + i), workspace);
377 @@ -837,7 +955,8 @@ static void extract_buf(struct entropy_s
378 * brute-forcing the feedback as hard as brute-forcing the
381 - mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
382 + __mix_pool_bytes(r, hash, sizeof(hash), extract);
383 + spin_unlock_irqrestore(&r->lock, flags);
386 * To avoid duplicates, we atomically extract a portion of the
387 @@ -860,12 +979,12 @@ static void extract_buf(struct entropy_s
390 static ssize_t extract_entropy(struct entropy_store *r, void *buf,
391 - size_t nbytes, int min, int reserved)
392 + size_t nbytes, int min, int reserved)
395 __u8 tmp[EXTRACT_SIZE];
396 - unsigned long flags;
398 + trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
399 xfer_secondary_pool(r, nbytes);
400 nbytes = account(r, nbytes, min, reserved);
402 @@ -873,6 +992,8 @@ static ssize_t extract_entropy(struct en
406 + unsigned long flags;
408 spin_lock_irqsave(&r->lock, flags);
409 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
410 panic("Hardware RNG duplicated output!\n");
411 @@ -898,6 +1019,7 @@ static ssize_t extract_entropy_user(stru
413 __u8 tmp[EXTRACT_SIZE];
415 + trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
416 xfer_secondary_pool(r, nbytes);
417 nbytes = account(r, nbytes, 0, 0);
419 @@ -931,17 +1053,35 @@ static ssize_t extract_entropy_user(stru
422 * This function is the exported kernel interface. It returns some
423 - * number of good random numbers, suitable for seeding TCP sequence
425 + * number of good random numbers, suitable for key generation, seeding
426 + * TCP sequence numbers, etc. It does not use the hw random number
427 + * generator, if available; use get_random_bytes_arch() for that.
429 void get_random_bytes(void *buf, int nbytes)
431 + extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
433 +EXPORT_SYMBOL(get_random_bytes);
436 + * This function will use the architecture-specific hardware random
437 + * number generator if it is available. The arch-specific hw RNG will
438 + * almost certainly be faster than what we can do in software, but it
439 + * is impossible to verify that it is implemented securely (as
440 + * opposed, to, say, the AES encryption of a sequence number using a
441 + * key known by the NSA). So it's useful if we need the speed, but
442 + * only if we're willing to trust the hardware manufacturer not to
443 + * have put in a back door.
445 +void get_random_bytes_arch(void *buf, int nbytes)
449 + trace_get_random_bytes(nbytes, _RET_IP_);
452 int chunk = min(nbytes, (int)sizeof(unsigned long));
455 if (!arch_get_random_long(&v))
458 @@ -950,9 +1090,11 @@ void get_random_bytes(void *buf, int nby
462 - extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
464 + extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
466 -EXPORT_SYMBOL(get_random_bytes);
467 +EXPORT_SYMBOL(get_random_bytes_arch);
471 * init_std_data - initialize pool with system data
472 @@ -966,21 +1108,18 @@ EXPORT_SYMBOL(get_random_bytes);
473 static void init_std_data(struct entropy_store *r)
477 - unsigned long flags;
478 + ktime_t now = ktime_get_real();
481 - spin_lock_irqsave(&r->lock, flags);
482 r->entropy_count = 0;
483 - spin_unlock_irqrestore(&r->lock, flags);
485 - now = ktime_get_real();
486 - mix_pool_bytes(r, &now, sizeof(now));
487 - for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) {
488 - if (!arch_get_random_long(&flags))
489 + r->entropy_total = 0;
490 + mix_pool_bytes(r, &now, sizeof(now), NULL);
491 + for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
492 + if (!arch_get_random_long(&rv))
494 - mix_pool_bytes(r, &flags, sizeof(flags));
495 + mix_pool_bytes(r, &rv, sizeof(rv), NULL);
497 - mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
498 + mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
501 static int rand_initialize(void)
502 @@ -1117,7 +1256,7 @@ write_pool(struct entropy_store *r, cons
506 - mix_pool_bytes(r, buf, bytes);
507 + mix_pool_bytes(r, buf, bytes, NULL);
511 @@ -1274,6 +1413,7 @@ static int proc_do_uuid(ctl_table *table
514 static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
515 +extern ctl_table random_table[];
516 ctl_table random_table[] = {
518 .procname = "poolsize",
519 @@ -1339,7 +1479,7 @@ late_initcall(random_int_secret_init);
520 * value is not cryptographically secure but for several uses the cost of
521 * depleting entropy is too high
523 -DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
524 +static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
525 unsigned int get_random_int(void)
528 --- a/drivers/mfd/ab3100-core.c
529 +++ b/drivers/mfd/ab3100-core.c
530 @@ -409,8 +409,6 @@ static irqreturn_t ab3100_irq_handler(in
534 - add_interrupt_randomness(irq);
536 err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
539 --- a/drivers/usb/core/hub.c
540 +++ b/drivers/usb/core/hub.c
542 #include <linux/kthread.h>
543 #include <linux/mutex.h>
544 #include <linux/freezer.h>
545 +#include <linux/random.h>
547 #include <asm/uaccess.h>
548 #include <asm/byteorder.h>
549 @@ -1896,6 +1897,14 @@ int usb_new_device(struct usb_device *ud
550 /* Tell the world! */
551 announce_device(udev);
554 + add_device_randomness(udev->serial, strlen(udev->serial));
556 + add_device_randomness(udev->product, strlen(udev->product));
557 + if (udev->manufacturer)
558 + add_device_randomness(udev->manufacturer,
559 + strlen(udev->manufacturer));
561 device_enable_async_suspend(&udev->dev);
562 /* Register the device. The device driver is responsible
563 * for configuring the device and invoking the add-device
564 --- a/include/linux/random.h
565 +++ b/include/linux/random.h
566 @@ -50,11 +50,13 @@ struct rnd_state {
568 extern void rand_initialize_irq(int irq);
570 +extern void add_device_randomness(const void *, unsigned int);
571 extern void add_input_randomness(unsigned int type, unsigned int code,
573 -extern void add_interrupt_randomness(int irq);
574 +extern void add_interrupt_randomness(int irq, int irq_flags);
576 extern void get_random_bytes(void *buf, int nbytes);
577 +extern void get_random_bytes_arch(void *buf, int nbytes);
578 void generate_random_uuid(unsigned char uuid_out[16]);
582 +++ b/include/trace/events/random.h
585 +#define TRACE_SYSTEM random
587 +#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
588 +#define _TRACE_RANDOM_H
590 +#include <linux/writeback.h>
591 +#include <linux/tracepoint.h>
593 +DECLARE_EVENT_CLASS(random__mix_pool_bytes,
594 + TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
596 + TP_ARGS(pool_name, bytes, IP),
599 + __field( const char *, pool_name )
600 + __field( int, bytes )
601 + __field(unsigned long, IP )
605 + __entry->pool_name = pool_name;
606 + __entry->bytes = bytes;
610 + TP_printk("%s pool: bytes %d caller %pF",
611 + __entry->pool_name, __entry->bytes, (void *)__entry->IP)
614 +DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
615 + TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
617 + TP_ARGS(pool_name, bytes, IP)
620 +DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
621 + TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
623 + TP_ARGS(pool_name, bytes, IP)
626 +TRACE_EVENT(credit_entropy_bits,
627 + TP_PROTO(const char *pool_name, int bits, int entropy_count,
628 + int entropy_total, unsigned long IP),
630 + TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
633 + __field( const char *, pool_name )
634 + __field( int, bits )
635 + __field( int, entropy_count )
636 + __field( int, entropy_total )
637 + __field(unsigned long, IP )
641 + __entry->pool_name = pool_name;
642 + __entry->bits = bits;
643 + __entry->entropy_count = entropy_count;
644 + __entry->entropy_total = entropy_total;
648 + TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
649 + "caller %pF", __entry->pool_name, __entry->bits,
650 + __entry->entropy_count, __entry->entropy_total,
651 + (void *)__entry->IP)
654 +TRACE_EVENT(get_random_bytes,
655 + TP_PROTO(int nbytes, unsigned long IP),
657 + TP_ARGS(nbytes, IP),
660 + __field( int, nbytes )
661 + __field(unsigned long, IP )
665 + __entry->nbytes = nbytes;
669 + TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
672 +DECLARE_EVENT_CLASS(random__extract_entropy,
673 + TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
676 + TP_ARGS(pool_name, nbytes, entropy_count, IP),
679 + __field( const char *, pool_name )
680 + __field( int, nbytes )
681 + __field( int, entropy_count )
682 + __field(unsigned long, IP )
686 + __entry->pool_name = pool_name;
687 + __entry->nbytes = nbytes;
688 + __entry->entropy_count = entropy_count;
692 + TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
693 + __entry->pool_name, __entry->nbytes, __entry->entropy_count,
694 + (void *)__entry->IP)
698 +DEFINE_EVENT(random__extract_entropy, extract_entropy,
699 + TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
702 + TP_ARGS(pool_name, nbytes, entropy_count, IP)
705 +DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
706 + TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
709 + TP_ARGS(pool_name, nbytes, entropy_count, IP)
714 +#endif /* _TRACE_RANDOM_H */
716 +/* This part must be outside protection */
717 +#include <trace/define_trace.h>
718 --- a/kernel/irq/handle.c
719 +++ b/kernel/irq/handle.c
720 @@ -117,7 +117,7 @@ irqreturn_t
721 handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
723 irqreturn_t retval = IRQ_NONE;
724 - unsigned int random = 0, irq = desc->irq_data.irq;
725 + unsigned int flags = 0, irq = desc->irq_data.irq;
729 @@ -145,7 +145,7 @@ handle_irq_event_percpu(struct irq_desc
731 /* Fall through to add to randomness */
733 - random |= action->flags;
734 + flags |= action->flags;
738 @@ -156,8 +156,7 @@ handle_irq_event_percpu(struct irq_desc
739 action = action->next;
742 - if (random & IRQF_SAMPLE_RANDOM)
743 - add_interrupt_randomness(irq);
744 + add_interrupt_randomness(irq, flags);
747 note_interrupt(irq, desc, retval);
750 @@ -1176,6 +1176,7 @@ static int __dev_open(struct net_device
752 dev_set_rx_mode(dev);
754 + add_device_randomness(dev->dev_addr, dev->addr_len);
758 @@ -4823,6 +4824,7 @@ int dev_set_mac_address(struct net_devic
759 err = ops->ndo_set_mac_address(dev, sa);
761 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
762 + add_device_randomness(dev->dev_addr, dev->addr_len);
765 EXPORT_SYMBOL(dev_set_mac_address);
766 @@ -5602,6 +5604,7 @@ int register_netdevice(struct net_device
767 dev_init_scheduler(dev);
770 + add_device_randomness(dev->dev_addr, dev->addr_len);
772 /* Notify protocols, that a new device appeared. */
773 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
774 --- a/net/core/rtnetlink.c
775 +++ b/net/core/rtnetlink.c
776 @@ -1371,6 +1371,7 @@ static int do_setlink(struct net_device
778 send_addr_notify = 1;
780 + add_device_randomness(dev->dev_addr, dev->addr_len);