fix md_rand.c locking bugs
This commit is contained in:
13
CHANGES
13
CHANGES
@@ -4,6 +4,19 @@
|
||||
|
||||
Changes between 0.9.6a and 0.9.6b [XX xxx XXXX]
|
||||
|
||||
*) Move 'if (!initialized) RAND_poll()' into regions protected by
|
||||
CRYPTO_LOCK_RAND. This is not strictly necessary, but avoids
|
||||
having multiple threads calling RAND_poll() concurrently.
|
||||
[Bodo Moeller]
|
||||
|
||||
*) In crypto/rand/md_rand.c, replace 'add_do_not_lock' flag by a
|
||||
combination of a flag and a thread ID variable.
|
||||
Otherwise while one thread is in ssleay_rand_bytes (which sets the
|
||||
flag), *other* threads can enter ssleay_add_bytes without obeying
|
||||
the CRYPTO_LOCK_RAND lock (and may even illegaly release the lock
|
||||
that they do not hold after the first thread unsets add_do_not_lock).
|
||||
[Bodo Moeller]
|
||||
|
||||
*) Change bctest again: '-x' expressions are not available in all
|
||||
versions of 'test'.
|
||||
[Bodo Moeller]
|
||||
|
@@ -141,10 +141,11 @@ static long md_count[2]={0,0};
|
||||
static double entropy=0;
|
||||
static int initialized=0;
|
||||
|
||||
/* This should be set to 1 only when ssleay_rand_add() is called inside
|
||||
an already locked state, so it doesn't try to lock and thereby cause
|
||||
a hang. And it should always be reset back to 0 before unlocking. */
|
||||
static int add_do_not_lock=0;
|
||||
static unsigned int crypto_lock_rand = 0; /* may be set only when a thread
|
||||
* holds CRYPTO_LOCK_RAND
|
||||
* (to prevent double locking) */
|
||||
static unsigned long locking_thread = 0; /* valid iff crypto_lock_rand is set */
|
||||
|
||||
|
||||
#ifdef PREDICT
|
||||
int rand_predictable=0;
|
||||
@@ -191,6 +192,7 @@ static void ssleay_rand_add(const void *buf, int num, double add)
|
||||
long md_c[2];
|
||||
unsigned char local_md[MD_DIGEST_LENGTH];
|
||||
MD_CTX m;
|
||||
int do_not_lock;
|
||||
|
||||
/*
|
||||
* (Based on the rand(3) manpage)
|
||||
@@ -207,7 +209,10 @@ static void ssleay_rand_add(const void *buf, int num, double add)
|
||||
* hash function.
|
||||
*/
|
||||
|
||||
if (!add_do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND);
|
||||
/* check if we already have the lock */
|
||||
do_not_lock = crypto_lock_rand && (locking_thread == CRYPTO_thread_id());
|
||||
|
||||
if (!do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND);
|
||||
st_idx=state_index;
|
||||
|
||||
/* use our own copies of the counters so that even
|
||||
@@ -239,7 +244,7 @@ static void ssleay_rand_add(const void *buf, int num, double add)
|
||||
|
||||
md_count[1] += (num / MD_DIGEST_LENGTH) + (num % MD_DIGEST_LENGTH > 0);
|
||||
|
||||
if (!add_do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
|
||||
if (!do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
|
||||
|
||||
for (i=0; i<num; i+=MD_DIGEST_LENGTH)
|
||||
{
|
||||
@@ -281,7 +286,7 @@ static void ssleay_rand_add(const void *buf, int num, double add)
|
||||
}
|
||||
memset((char *)&m,0,sizeof(m));
|
||||
|
||||
if (!add_do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND);
|
||||
if (!do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND);
|
||||
/* Don't just copy back local_md into md -- this could mean that
|
||||
* other thread's seeding remains without effect (except for
|
||||
* the incremented counter). By XORing it we keep at least as
|
||||
@@ -292,7 +297,7 @@ static void ssleay_rand_add(const void *buf, int num, double add)
|
||||
}
|
||||
if (entropy < ENTROPY_NEEDED) /* stop counting when we have enough */
|
||||
entropy += add;
|
||||
if (!add_do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
|
||||
if (!do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
|
||||
|
||||
#if !defined(THREADS) && !defined(WIN32)
|
||||
assert(md_c[1] == md_count[1]);
|
||||
@@ -347,14 +352,18 @@ static int ssleay_rand_bytes(unsigned char *buf, int num)
|
||||
* global 'md'.
|
||||
*/
|
||||
|
||||
if (!initialized)
|
||||
RAND_poll();
|
||||
|
||||
CRYPTO_w_lock(CRYPTO_LOCK_RAND);
|
||||
add_do_not_lock = 1; /* Since we call ssleay_rand_add while in
|
||||
this locked state. */
|
||||
|
||||
initialized = 1;
|
||||
/* prevent ssleay_rand_bytes() from trying to obtain the lock again */
|
||||
crypto_lock_rand = 1;
|
||||
locking_thread = CRYPTO_thread_id();
|
||||
|
||||
if (!initialized)
|
||||
{
|
||||
RAND_poll();
|
||||
initialized = 1;
|
||||
}
|
||||
|
||||
if (!stirred_pool)
|
||||
do_stir_pool = 1;
|
||||
|
||||
@@ -418,8 +427,9 @@ static int ssleay_rand_bytes(unsigned char *buf, int num)
|
||||
|
||||
md_count[0] += 1;
|
||||
|
||||
add_do_not_lock = 0; /* If this would ever be forgotten, we can
|
||||
expect any evil god to eat our souls. */
|
||||
/* before unlocking, we must clear 'crypto_lock_rand' */
|
||||
crypto_lock_rand = 0;
|
||||
locking_thread = 0;
|
||||
CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
|
||||
|
||||
while (num > 0)
|
||||
@@ -498,14 +508,37 @@ static int ssleay_rand_pseudo_bytes(unsigned char *buf, int num)
|
||||
static int ssleay_rand_status(void)
|
||||
{
|
||||
int ret;
|
||||
int do_not_lock;
|
||||
|
||||
/* check if we already have the lock
|
||||
* (could happen if a RAND_poll() implementation calls RAND_status()) */
|
||||
do_not_lock = crypto_lock_rand && (locking_thread == CRYPTO_thread_id());
|
||||
|
||||
if (!do_not_lock)
|
||||
{
|
||||
CRYPTO_w_lock(CRYPTO_LOCK_RAND);
|
||||
|
||||
/* prevent ssleay_rand_bytes() from trying to obtain the lock again */
|
||||
crypto_lock_rand = 1;
|
||||
locking_thread = CRYPTO_thread_id();
|
||||
}
|
||||
|
||||
if (!initialized)
|
||||
{
|
||||
RAND_poll();
|
||||
initialized = 1;
|
||||
}
|
||||
|
||||
CRYPTO_w_lock(CRYPTO_LOCK_RAND);
|
||||
initialized = 1;
|
||||
ret = entropy >= ENTROPY_NEEDED;
|
||||
CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
|
||||
|
||||
if (!do_not_lock)
|
||||
{
|
||||
/* before unlocking, we must clear 'crypto_lock_rand' */
|
||||
crypto_lock_rand = 0;
|
||||
locking_thread = 0;
|
||||
|
||||
CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
Reference in New Issue
Block a user