ec_cvt.c: performance update from HEAD.
This commit is contained in:
parent
d807d4c21f
commit
17674bfdf7
@ -78,7 +78,32 @@ EC_GROUP *EC_GROUP_new_curve_GFp(const BIGNUM *p, const BIGNUM *a, const BIGNUM
|
||||
const EC_METHOD *meth;
|
||||
EC_GROUP *ret;
|
||||
|
||||
#if defined(OPENSSL_BN_ASM_MONT)
|
||||
/*
|
||||
* This might appear controversial, but the fact is that generic
|
||||
* prime method was observed to deliver better performance even
|
||||
* for NIST primes on a range of platforms, e.g.: 60%-15%
|
||||
* improvement on IA-64, ~25% on ARM, 30%-90% on P4, 20%-25%
|
||||
* in 32-bit build and 35%--12% in 64-bit build on Core2...
|
||||
* Coefficients are relative to optimized bn_nist.c for most
|
||||
* intensive ECDSA verify and ECDH operations for 192- and 521-
|
||||
* bit keys respectively. Choice of these boundary values is
|
||||
* arguable, because the dependency of improvement coefficient
|
||||
* from key length is not a "monotone" curve. For example while
|
||||
* 571-bit result is 23% on ARM, 384-bit one is -1%. But it's
|
||||
* generally faster, sometimes "respectfully" faster, sometimes
|
||||
* "tolerably" slower... What effectively happens is that loop
|
||||
* with bn_mul_add_words is put against bn_mul_mont, and the
|
||||
* latter "wins" on short vectors. Correct solution should be
|
||||
* implementing dedicated NxN multiplication subroutines for
|
||||
* small N. But till it materializes, let's stick to generic
|
||||
* prime method...
|
||||
* <appro>
|
||||
*/
|
||||
meth = EC_GFp_mont_method();
|
||||
#else
|
||||
meth = EC_GFp_nist_method();
|
||||
#endif
|
||||
|
||||
ret = EC_GROUP_new(meth);
|
||||
if (ret == NULL)
|
||||
|
Loading…
x
Reference in New Issue
Block a user