diff --git a/CHANGES b/CHANGES index 9ae334fad..f284da369 100644 --- a/CHANGES +++ b/CHANGES @@ -11,6 +11,19 @@ *) applies to 0.9.6a (/0.9.6b) and 0.9.7 +) applies to 0.9.7 only + *) Move 'if (!initialized) RAND_poll()' into regions protected by + CRYPTO_LOCK_RAND. This is not strictly necessary, but avoids + having multiple threads calling RAND_poll() concurrently. + [Bodo Moeller] + + *) In crypto/rand/md_rand.c, replace 'add_do_not_lock' flag by a + combination of a flag and a thread ID variable. + Otherwise while one thread is in ssleay_rand_bytes (which sets the + flag), *other* threads can enter ssleay_add_bytes without obeying + the CRYPTO_LOCK_RAND lock (and may even illegaly release the lock + that they do not hold after the first thread unsets add_do_not_lock). + [Bodo Moeller] + +) Implement binary inversion algorithm for BN_mod_inverse in addition to the algorithm using long divison. The binary algorithm can be used only if the modulus is odd. On 32-bit systems, it is faster diff --git a/crypto/rand/md_rand.c b/crypto/rand/md_rand.c index 98c6a39f5..c8728a07b 100644 --- a/crypto/rand/md_rand.c +++ b/crypto/rand/md_rand.c @@ -141,10 +141,11 @@ static long md_count[2]={0,0}; static double entropy=0; static int initialized=0; -/* This should be set to 1 only when ssleay_rand_add() is called inside - an already locked state, so it doesn't try to lock and thereby cause - a hang. And it should always be reset back to 0 before unlocking. */ -static int add_do_not_lock=0; +static unsigned int crypto_lock_rand = 0; /* may be set only when a thread + * holds CRYPTO_LOCK_RAND + * (to prevent double locking) */ +static unsigned long locking_thread = 0; /* valid iff crypto_lock_rand is set */ + #ifdef PREDICT int rand_predictable=0; @@ -191,6 +192,7 @@ static void ssleay_rand_add(const void *buf, int num, double add) long md_c[2]; unsigned char local_md[MD_DIGEST_LENGTH]; MD_CTX m; + int do_not_lock; /* * (Based on the rand(3) manpage) @@ -207,7 +209,10 @@ static void ssleay_rand_add(const void *buf, int num, double add) * hash function. */ - if (!add_do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND); + /* check if we already have the lock */ + do_not_lock = crypto_lock_rand && (locking_thread == CRYPTO_thread_id()); + + if (!do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND); st_idx=state_index; /* use our own copies of the counters so that even @@ -239,7 +244,7 @@ static void ssleay_rand_add(const void *buf, int num, double add) md_count[1] += (num / MD_DIGEST_LENGTH) + (num % MD_DIGEST_LENGTH > 0); - if (!add_do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND); + if (!do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND); for (i=0; i 0) @@ -498,14 +508,37 @@ static int ssleay_rand_pseudo_bytes(unsigned char *buf, int num) static int ssleay_rand_status(void) { int ret; + int do_not_lock; + /* check if we already have the lock + * (could happen if a RAND_poll() implementation calls RAND_status()) */ + do_not_lock = crypto_lock_rand && (locking_thread == CRYPTO_thread_id()); + + if (!do_not_lock) + { + CRYPTO_w_lock(CRYPTO_LOCK_RAND); + + /* prevent ssleay_rand_bytes() from trying to obtain the lock again */ + crypto_lock_rand = 1; + locking_thread = CRYPTO_thread_id(); + } + if (!initialized) + { RAND_poll(); + initialized = 1; + } - CRYPTO_w_lock(CRYPTO_LOCK_RAND); - initialized = 1; ret = entropy >= ENTROPY_NEEDED; - CRYPTO_w_unlock(CRYPTO_LOCK_RAND); + if (!do_not_lock) + { + /* before unlocking, we must clear 'crypto_lock_rand' */ + crypto_lock_rand = 0; + locking_thread = 0; + + CRYPTO_w_unlock(CRYPTO_LOCK_RAND); + } + return ret; } diff --git a/crypto/rand/rand_unix.c b/crypto/rand/rand_unix.c index 63b5efc32..c491280b1 100644 --- a/crypto/rand/rand_unix.c +++ b/crypto/rand/rand_unix.c @@ -228,8 +228,9 @@ int RAND_poll(void) #if defined(DEVRANDOM) || defined(DEVRANDOM_EGD) return 1; -#endif +#else return 0; +#endif } #endif