00001 #ifndef CRYPTOPP_MISC_H
00002 #define CRYPTOPP_MISC_H
00003
00004 #include "cryptlib.h"
00005 #include "smartptr.h"
00006 #include <string.h>
00007
00008 #ifdef _MSC_VER
00009 #include <stdlib.h>
00010 #if _MSC_VER >= 1400
00011
00012 #define _interlockedbittestandset CRYPTOPP_DISABLED_INTRINSIC_1
00013 #define _interlockedbittestandreset CRYPTOPP_DISABLED_INTRINSIC_2
00014 #define _interlockedbittestandset64 CRYPTOPP_DISABLED_INTRINSIC_3
00015 #define _interlockedbittestandreset64 CRYPTOPP_DISABLED_INTRINSIC_4
00016 #include <intrin.h>
00017 #undef _interlockedbittestandset
00018 #undef _interlockedbittestandreset
00019 #undef _interlockedbittestandset64
00020 #undef _interlockedbittestandreset64
00021 #define CRYPTOPP_FAST_ROTATE(x) 1
00022 #elif _MSC_VER >= 1300
00023 #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32 | (x) == 64)
00024 #else
00025 #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32)
00026 #endif
00027 #elif (defined(__MWERKS__) && TARGET_CPU_PPC) || \
00028 (defined(__GNUC__) && (defined(_ARCH_PWR2) || defined(_ARCH_PWR) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || defined(_ARCH_COM)))
00029 #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32)
00030 #elif defined(__GNUC__) && (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86) // depend on GCC's peephole optimization to generate rotate instructions
00031 #define CRYPTOPP_FAST_ROTATE(x) 1
00032 #else
00033 #define CRYPTOPP_FAST_ROTATE(x) 0
00034 #endif
00035
00036 #ifdef __BORLANDC__
00037 #include <mem.h>
00038 #endif
00039
00040 #if defined(__GNUC__) && defined(__linux__)
00041 #define CRYPTOPP_BYTESWAP_AVAILABLE
00042 #include <byteswap.h>
00043 #endif
00044
00045 NAMESPACE_BEGIN(CryptoPP)
00046
00047
00048
00049 template <bool b>
00050 struct CompileAssert
00051 {
00052 static char dummy[2*b-1];
00053 };
00054
00055 #define CRYPTOPP_COMPILE_ASSERT(assertion) CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, __LINE__)
00056 #if defined(CRYPTOPP_EXPORTS) || defined(CRYPTOPP_IMPORTS)
00057 #define CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, instance)
00058 #else
00059 #define CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, instance) static CompileAssert<(assertion)> CRYPTOPP_ASSERT_JOIN(cryptopp_assert_, instance)
00060 #endif
00061 #define CRYPTOPP_ASSERT_JOIN(X, Y) CRYPTOPP_DO_ASSERT_JOIN(X, Y)
00062 #define CRYPTOPP_DO_ASSERT_JOIN(X, Y) X##Y
00063
00064
00065
00066 class CRYPTOPP_DLL Empty
00067 {
00068 };
00069
00070
00071 template <class BASE1, class BASE2>
00072 class CRYPTOPP_NO_VTABLE TwoBases : public BASE1, public BASE2
00073 {
00074 };
00075
00076
00077 template <class BASE1, class BASE2, class BASE3>
00078 class CRYPTOPP_NO_VTABLE ThreeBases : public BASE1, public BASE2, public BASE3
00079 {
00080 };
00081
00082 template <class T>
00083 class ObjectHolder
00084 {
00085 protected:
00086 T m_object;
00087 };
00088
00089 class NotCopyable
00090 {
00091 public:
00092 NotCopyable() {}
00093 private:
00094 NotCopyable(const NotCopyable &);
00095 void operator=(const NotCopyable &);
00096 };
00097
00098 template <class T>
00099 struct NewObject
00100 {
00101 T* operator()() const {return new T;}
00102 };
00103
00104
00105
00106
00107
00108 template <class T, class F = NewObject<T>, int instance=0>
00109 class Singleton
00110 {
00111 public:
00112 Singleton(F objectFactory = F()) : m_objectFactory(objectFactory) {}
00113
00114
00115 CRYPTOPP_NOINLINE const T & Ref(CRYPTOPP_NOINLINE_DOTDOTDOT) const;
00116
00117 private:
00118 F m_objectFactory;
00119 };
00120
00121 template <class T, class F, int instance>
00122 const T & Singleton<T, F, instance>::Ref(CRYPTOPP_NOINLINE_DOTDOTDOT) const
00123 {
00124 static simple_ptr<T> s_pObject;
00125 static volatile char s_objectState = 0;
00126
00127 retry:
00128 switch (s_objectState)
00129 {
00130 case 0:
00131 s_objectState = 1;
00132 try
00133 {
00134 s_pObject.m_p = m_objectFactory();
00135 }
00136 catch(...)
00137 {
00138 s_objectState = 0;
00139 throw;
00140 }
00141 s_objectState = 2;
00142 break;
00143 case 1:
00144 goto retry;
00145 default:
00146 break;
00147 }
00148 return *s_pObject.m_p;
00149 }
00150
00151
00152
00153 #if (!__STDC_WANT_SECURE_LIB__)
00154 inline void memcpy_s(void *dest, size_t sizeInBytes, const void *src, size_t count)
00155 {
00156 if (count > sizeInBytes)
00157 throw InvalidArgument("memcpy_s: buffer overflow");
00158 memcpy(dest, src, count);
00159 }
00160
00161 inline void memmove_s(void *dest, size_t sizeInBytes, const void *src, size_t count)
00162 {
00163 if (count > sizeInBytes)
00164 throw InvalidArgument("memmove_s: buffer overflow");
00165 memmove(dest, src, count);
00166 }
00167 #endif
00168
00169 inline void * memset_z(void *ptr, int value, size_t num)
00170 {
00171
00172 #if CRYPTOPP_GCC_VERSION >= 30001
00173 if (__builtin_constant_p(num) && num==0)
00174 return ptr;
00175 #endif
00176 return memset(ptr, value, num);
00177 }
00178
00179
00180 template <class T> inline const T& STDMIN(const T& a, const T& b)
00181 {
00182 return b < a ? b : a;
00183 }
00184
00185 template <class T1, class T2> inline const T1 UnsignedMin(const T1& a, const T2& b)
00186 {
00187 CRYPTOPP_COMPILE_ASSERT((sizeof(T1)<=sizeof(T2) && T2(-1)>0) || (sizeof(T1)>sizeof(T2) && T1(-1)>0));
00188 assert(a==0 || a>0);
00189 assert(b>=0);
00190
00191 if (sizeof(T1)<=sizeof(T2))
00192 return b < (T2)a ? (T1)b : a;
00193 else
00194 return (T1)b < a ? (T1)b : a;
00195 }
00196
00197 template <class T> inline const T& STDMAX(const T& a, const T& b)
00198 {
00199 return a < b ? b : a;
00200 }
00201
00202 #define RETURN_IF_NONZERO(x) size_t returnedValue = x; if (returnedValue) return returnedValue
00203
00204
00205 #define GETBYTE(x, y) (unsigned int)byte((x)>>(8*(y)))
00206
00207
00208
00209
00210 #define CRYPTOPP_GET_BYTE_AS_BYTE(x, y) byte((x)>>(8*(y)))
00211
00212 template <class T>
00213 unsigned int Parity(T value)
00214 {
00215 for (unsigned int i=8*sizeof(value)/2; i>0; i/=2)
00216 value ^= value >> i;
00217 return (unsigned int)value&1;
00218 }
00219
00220 template <class T>
00221 unsigned int BytePrecision(const T &value)
00222 {
00223 if (!value)
00224 return 0;
00225
00226 unsigned int l=0, h=8*sizeof(value);
00227
00228 while (h-l > 8)
00229 {
00230 unsigned int t = (l+h)/2;
00231 if (value >> t)
00232 l = t;
00233 else
00234 h = t;
00235 }
00236
00237 return h/8;
00238 }
00239
00240 template <class T>
00241 unsigned int BitPrecision(const T &value)
00242 {
00243 if (!value)
00244 return 0;
00245
00246 unsigned int l=0, h=8*sizeof(value);
00247
00248 while (h-l > 1)
00249 {
00250 unsigned int t = (l+h)/2;
00251 if (value >> t)
00252 l = t;
00253 else
00254 h = t;
00255 }
00256
00257 return h;
00258 }
00259
00260 inline unsigned int TrailingZeros(word32 v)
00261 {
00262 #if defined(__GNUC__) && CRYPTOPP_GCC_VERSION >= 30400
00263 return __builtin_ctz(v);
00264 #elif defined(_MSC_VER) && _MSC_VER >= 1400
00265 unsigned long result;
00266 _BitScanForward(&result, v);
00267 return result;
00268 #else
00269
00270 static const int MultiplyDeBruijnBitPosition[32] =
00271 {
00272 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
00273 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
00274 };
00275 return MultiplyDeBruijnBitPosition[((word32)((v & -v) * 0x077CB531U)) >> 27];
00276 #endif
00277 }
00278
00279 inline unsigned int TrailingZeros(word64 v)
00280 {
00281 #if defined(__GNUC__) && CRYPTOPP_GCC_VERSION >= 30400
00282 return __builtin_ctzll(v);
00283 #elif defined(_MSC_VER) && _MSC_VER >= 1400 && (defined(_M_X64) || defined(_M_IA64))
00284 unsigned long result;
00285 _BitScanForward64(&result, v);
00286 return result;
00287 #else
00288 return word32(v) ? TrailingZeros(word32(v)) : 32 + TrailingZeros(word32(v>>32));
00289 #endif
00290 }
00291
00292 template <class T>
00293 inline T Crop(T value, size_t size)
00294 {
00295 if (size < 8*sizeof(value))
00296 return T(value & ((T(1) << size) - 1));
00297 else
00298 return value;
00299 }
00300
00301 template <class T1, class T2>
00302 inline bool SafeConvert(T1 from, T2 &to)
00303 {
00304 to = (T2)from;
00305 if (from != to || (from > 0) != (to > 0))
00306 return false;
00307 return true;
00308 }
00309
00310 inline size_t BitsToBytes(size_t bitCount)
00311 {
00312 return ((bitCount+7)/(8));
00313 }
00314
00315 inline size_t BytesToWords(size_t byteCount)
00316 {
00317 return ((byteCount+WORD_SIZE-1)/WORD_SIZE);
00318 }
00319
00320 inline size_t BitsToWords(size_t bitCount)
00321 {
00322 return ((bitCount+WORD_BITS-1)/(WORD_BITS));
00323 }
00324
00325 inline size_t BitsToDwords(size_t bitCount)
00326 {
00327 return ((bitCount+2*WORD_BITS-1)/(2*WORD_BITS));
00328 }
00329
00330 CRYPTOPP_DLL void CRYPTOPP_API xorbuf(byte *buf, const byte *mask, size_t count);
00331 CRYPTOPP_DLL void CRYPTOPP_API xorbuf(byte *output, const byte *input, const byte *mask, size_t count);
00332
00333 CRYPTOPP_DLL bool CRYPTOPP_API VerifyBufsEqual(const byte *buf1, const byte *buf2, size_t count);
00334
00335 template <class T>
00336 inline bool IsPowerOf2(const T &n)
00337 {
00338 return n > 0 && (n & (n-1)) == 0;
00339 }
00340
00341 template <class T1, class T2>
00342 inline T2 ModPowerOf2(const T1 &a, const T2 &b)
00343 {
00344 assert(IsPowerOf2(b));
00345 return T2(a) & (b-1);
00346 }
00347
00348 template <class T1, class T2>
00349 inline T1 RoundDownToMultipleOf(const T1 &n, const T2 &m)
00350 {
00351 if (IsPowerOf2(m))
00352 return n - ModPowerOf2(n, m);
00353 else
00354 return n - n%m;
00355 }
00356
00357 template <class T1, class T2>
00358 inline T1 RoundUpToMultipleOf(const T1 &n, const T2 &m)
00359 {
00360 if (n+m-1 < n)
00361 throw InvalidArgument("RoundUpToMultipleOf: integer overflow");
00362 return RoundDownToMultipleOf(n+m-1, m);
00363 }
00364
00365 template <class T>
00366 inline unsigned int GetAlignmentOf(T *dummy=NULL)
00367 {
00368 #ifdef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
00369 if (sizeof(T) < 16)
00370 return 1;
00371 #endif
00372
00373 #if (_MSC_VER >= 1300)
00374 return __alignof(T);
00375 #elif defined(__GNUC__)
00376 return __alignof__(T);
00377 #elif CRYPTOPP_BOOL_SLOW_WORD64
00378 return UnsignedMin(4U, sizeof(T));
00379 #else
00380 return sizeof(T);
00381 #endif
00382 }
00383
00384 inline bool IsAlignedOn(const void *p, unsigned int alignment)
00385 {
00386 return alignment==1 || (IsPowerOf2(alignment) ? ModPowerOf2((size_t)p, alignment) == 0 : (size_t)p % alignment == 0);
00387 }
00388
00389 template <class T>
00390 inline bool IsAligned(const void *p, T *dummy=NULL)
00391 {
00392 return IsAlignedOn(p, GetAlignmentOf<T>());
00393 }
00394
00395 #ifdef IS_LITTLE_ENDIAN
00396 typedef LittleEndian NativeByteOrder;
00397 #else
00398 typedef BigEndian NativeByteOrder;
00399 #endif
00400
00401 inline ByteOrder GetNativeByteOrder()
00402 {
00403 return NativeByteOrder::ToEnum();
00404 }
00405
00406 inline bool NativeByteOrderIs(ByteOrder order)
00407 {
00408 return order == GetNativeByteOrder();
00409 }
00410
00411 template <class T>
00412 std::string IntToString(T a, unsigned int base = 10)
00413 {
00414 if (a == 0)
00415 return "0";
00416 bool negate = false;
00417 if (a < 0)
00418 {
00419 negate = true;
00420 a = 0-a;
00421 }
00422 std::string result;
00423 while (a > 0)
00424 {
00425 T digit = a % base;
00426 result = char((digit < 10 ? '0' : ('a' - 10)) + digit) + result;
00427 a /= base;
00428 }
00429 if (negate)
00430 result = "-" + result;
00431 return result;
00432 }
00433
00434 template <class T1, class T2>
00435 inline T1 SaturatingSubtract(const T1 &a, const T2 &b)
00436 {
00437 return T1((a > b) ? (a - b) : 0);
00438 }
00439
00440 template <class T>
00441 inline CipherDir GetCipherDir(const T &obj)
00442 {
00443 return obj.IsForwardTransformation() ? ENCRYPTION : DECRYPTION;
00444 }
00445
00446 CRYPTOPP_DLL void CRYPTOPP_API CallNewHandler();
00447
00448 inline void IncrementCounterByOne(byte *inout, unsigned int s)
00449 {
00450 for (int i=s-1, carry=1; i>=0 && carry; i--)
00451 carry = !++inout[i];
00452 }
00453
00454 inline void IncrementCounterByOne(byte *output, const byte *input, unsigned int s)
00455 {
00456 int i, carry;
00457 for (i=s-1, carry=1; i>=0 && carry; i--)
00458 carry = ((output[i] = input[i]+1) == 0);
00459 memcpy_s(output, s, input, i+1);
00460 }
00461
00462 template <class T>
00463 inline void ConditionalSwap(bool c, T &a, T &b)
00464 {
00465 T t = c * (a ^ b);
00466 a ^= t;
00467 b ^= t;
00468 }
00469
00470 template <class T>
00471 inline void ConditionalSwapPointers(bool c, T &a, T &b)
00472 {
00473 ptrdiff_t t = c * (a - b);
00474 a -= t;
00475 b += t;
00476 }
00477
00478
00479
00480 template <class T>
00481 void SecureWipeBuffer(T *buf, size_t n)
00482 {
00483
00484 volatile T *p = buf+n;
00485 while (n--)
00486 *(--p) = 0;
00487 }
00488
00489 #if (_MSC_VER >= 1400 || defined(__GNUC__)) && (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86)
00490
00491 template<> inline void SecureWipeBuffer(byte *buf, size_t n)
00492 {
00493 volatile byte *p = buf;
00494 #ifdef __GNUC__
00495 asm volatile("rep stosb" : "+c"(n), "+D"(p) : "a"(0) : "memory");
00496 #else
00497 __stosb((byte *)(size_t)p, 0, n);
00498 #endif
00499 }
00500
00501 template<> inline void SecureWipeBuffer(word16 *buf, size_t n)
00502 {
00503 volatile word16 *p = buf;
00504 #ifdef __GNUC__
00505 asm volatile("rep stosw" : "+c"(n), "+D"(p) : "a"(0) : "memory");
00506 #else
00507 __stosw((word16 *)(size_t)p, 0, n);
00508 #endif
00509 }
00510
00511 template<> inline void SecureWipeBuffer(word32 *buf, size_t n)
00512 {
00513 volatile word32 *p = buf;
00514 #ifdef __GNUC__
00515 asm volatile("rep stosl" : "+c"(n), "+D"(p) : "a"(0) : "memory");
00516 #else
00517 __stosd((unsigned long *)(size_t)p, 0, n);
00518 #endif
00519 }
00520
00521 template<> inline void SecureWipeBuffer(word64 *buf, size_t n)
00522 {
00523 #if CRYPTOPP_BOOL_X64
00524 volatile word64 *p = buf;
00525 #ifdef __GNUC__
00526 asm volatile("rep stosq" : "+c"(n), "+D"(p) : "a"(0) : "memory");
00527 #else
00528 __stosq((word64 *)(size_t)p, 0, n);
00529 #endif
00530 #else
00531 SecureWipeBuffer((word32 *)buf, 2*n);
00532 #endif
00533 }
00534
00535 #endif // #if (_MSC_VER >= 1400 || defined(__GNUC__)) && (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86)
00536
00537 template <class T>
00538 inline void SecureWipeArray(T *buf, size_t n)
00539 {
00540 if (sizeof(T) % 8 == 0 && GetAlignmentOf<T>() % GetAlignmentOf<word64>() == 0)
00541 SecureWipeBuffer((word64 *)buf, n * (sizeof(T)/8));
00542 else if (sizeof(T) % 4 == 0 && GetAlignmentOf<T>() % GetAlignmentOf<word32>() == 0)
00543 SecureWipeBuffer((word32 *)buf, n * (sizeof(T)/4));
00544 else if (sizeof(T) % 2 == 0 && GetAlignmentOf<T>() % GetAlignmentOf<word16>() == 0)
00545 SecureWipeBuffer((word16 *)buf, n * (sizeof(T)/2));
00546 else
00547 SecureWipeBuffer((byte *)buf, n * sizeof(T));
00548 }
00549
00550
00551 static std::string StringNarrow(const wchar_t *str, bool throwOnError = true)
00552 {
00553 #ifdef _MSC_VER
00554 #pragma warning(push)
00555 #pragma warning(disable: 4996) // 'wcstombs': This function or variable may be unsafe.
00556 #endif
00557 size_t size = wcstombs(NULL, str, 0);
00558 if (size == -1)
00559 {
00560 if (throwOnError)
00561 throw InvalidArgument("StringNarrow: wcstombs() call failed");
00562 else
00563 return std::string();
00564 }
00565 std::string result(size, 0);
00566 wcstombs(&result[0], str, size);
00567 return result;
00568 #ifdef _MSC_VER
00569 #pragma warning(pop)
00570 #endif
00571 }
00572
00573
00574
00575 template <class T> inline T rotlFixed(T x, unsigned int y)
00576 {
00577 assert(y < sizeof(T)*8);
00578 return T((x<<y) | (x>>(sizeof(T)*8-y)));
00579 }
00580
00581 template <class T> inline T rotrFixed(T x, unsigned int y)
00582 {
00583 assert(y < sizeof(T)*8);
00584 return T((x>>y) | (x<<(sizeof(T)*8-y)));
00585 }
00586
00587 template <class T> inline T rotlVariable(T x, unsigned int y)
00588 {
00589 assert(y < sizeof(T)*8);
00590 return T((x<<y) | (x>>(sizeof(T)*8-y)));
00591 }
00592
00593 template <class T> inline T rotrVariable(T x, unsigned int y)
00594 {
00595 assert(y < sizeof(T)*8);
00596 return T((x>>y) | (x<<(sizeof(T)*8-y)));
00597 }
00598
00599 template <class T> inline T rotlMod(T x, unsigned int y)
00600 {
00601 y %= sizeof(T)*8;
00602 return T((x<<y) | (x>>(sizeof(T)*8-y)));
00603 }
00604
00605 template <class T> inline T rotrMod(T x, unsigned int y)
00606 {
00607 y %= sizeof(T)*8;
00608 return T((x>>y) | (x<<(sizeof(T)*8-y)));
00609 }
00610
00611 #ifdef _MSC_VER
00612
00613 template<> inline word32 rotlFixed<word32>(word32 x, unsigned int y)
00614 {
00615 assert(y < 8*sizeof(x));
00616 return y ? _lrotl(x, y) : x;
00617 }
00618
00619 template<> inline word32 rotrFixed<word32>(word32 x, unsigned int y)
00620 {
00621 assert(y < 8*sizeof(x));
00622 return y ? _lrotr(x, y) : x;
00623 }
00624
00625 template<> inline word32 rotlVariable<word32>(word32 x, unsigned int y)
00626 {
00627 assert(y < 8*sizeof(x));
00628 return _lrotl(x, y);
00629 }
00630
00631 template<> inline word32 rotrVariable<word32>(word32 x, unsigned int y)
00632 {
00633 assert(y < 8*sizeof(x));
00634 return _lrotr(x, y);
00635 }
00636
00637 template<> inline word32 rotlMod<word32>(word32 x, unsigned int y)
00638 {
00639 return _lrotl(x, y);
00640 }
00641
00642 template<> inline word32 rotrMod<word32>(word32 x, unsigned int y)
00643 {
00644 return _lrotr(x, y);
00645 }
00646
00647 #endif // #ifdef _MSC_VER
00648
00649 #if _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
00650
00651
00652 template<> inline word64 rotlFixed<word64>(word64 x, unsigned int y)
00653 {
00654 assert(y < 8*sizeof(x));
00655 return y ? _rotl64(x, y) : x;
00656 }
00657
00658 template<> inline word64 rotrFixed<word64>(word64 x, unsigned int y)
00659 {
00660 assert(y < 8*sizeof(x));
00661 return y ? _rotr64(x, y) : x;
00662 }
00663
00664 template<> inline word64 rotlVariable<word64>(word64 x, unsigned int y)
00665 {
00666 assert(y < 8*sizeof(x));
00667 return _rotl64(x, y);
00668 }
00669
00670 template<> inline word64 rotrVariable<word64>(word64 x, unsigned int y)
00671 {
00672 assert(y < 8*sizeof(x));
00673 return _rotr64(x, y);
00674 }
00675
00676 template<> inline word64 rotlMod<word64>(word64 x, unsigned int y)
00677 {
00678 return _rotl64(x, y);
00679 }
00680
00681 template<> inline word64 rotrMod<word64>(word64 x, unsigned int y)
00682 {
00683 return _rotr64(x, y);
00684 }
00685
00686 #endif // #if _MSC_VER >= 1310
00687
00688 #if _MSC_VER >= 1400 && !defined(__INTEL_COMPILER)
00689
00690
00691 template<> inline word16 rotlFixed<word16>(word16 x, unsigned int y)
00692 {
00693 assert(y < 8*sizeof(x));
00694 return y ? _rotl16(x, y) : x;
00695 }
00696
00697 template<> inline word16 rotrFixed<word16>(word16 x, unsigned int y)
00698 {
00699 assert(y < 8*sizeof(x));
00700 return y ? _rotr16(x, y) : x;
00701 }
00702
00703 template<> inline word16 rotlVariable<word16>(word16 x, unsigned int y)
00704 {
00705 assert(y < 8*sizeof(x));
00706 return _rotl16(x, y);
00707 }
00708
00709 template<> inline word16 rotrVariable<word16>(word16 x, unsigned int y)
00710 {
00711 assert(y < 8*sizeof(x));
00712 return _rotr16(x, y);
00713 }
00714
00715 template<> inline word16 rotlMod<word16>(word16 x, unsigned int y)
00716 {
00717 return _rotl16(x, y);
00718 }
00719
00720 template<> inline word16 rotrMod<word16>(word16 x, unsigned int y)
00721 {
00722 return _rotr16(x, y);
00723 }
00724
00725 template<> inline byte rotlFixed<byte>(byte x, unsigned int y)
00726 {
00727 assert(y < 8*sizeof(x));
00728 return y ? _rotl8(x, y) : x;
00729 }
00730
00731 template<> inline byte rotrFixed<byte>(byte x, unsigned int y)
00732 {
00733 assert(y < 8*sizeof(x));
00734 return y ? _rotr8(x, y) : x;
00735 }
00736
00737 template<> inline byte rotlVariable<byte>(byte x, unsigned int y)
00738 {
00739 assert(y < 8*sizeof(x));
00740 return _rotl8(x, y);
00741 }
00742
00743 template<> inline byte rotrVariable<byte>(byte x, unsigned int y)
00744 {
00745 assert(y < 8*sizeof(x));
00746 return _rotr8(x, y);
00747 }
00748
00749 template<> inline byte rotlMod<byte>(byte x, unsigned int y)
00750 {
00751 return _rotl8(x, y);
00752 }
00753
00754 template<> inline byte rotrMod<byte>(byte x, unsigned int y)
00755 {
00756 return _rotr8(x, y);
00757 }
00758
00759 #endif // #if _MSC_VER >= 1400
00760
00761 #if (defined(__MWERKS__) && TARGET_CPU_PPC)
00762
00763 template<> inline word32 rotlFixed<word32>(word32 x, unsigned int y)
00764 {
00765 assert(y < 32);
00766 return y ? __rlwinm(x,y,0,31) : x;
00767 }
00768
00769 template<> inline word32 rotrFixed<word32>(word32 x, unsigned int y)
00770 {
00771 assert(y < 32);
00772 return y ? __rlwinm(x,32-y,0,31) : x;
00773 }
00774
00775 template<> inline word32 rotlVariable<word32>(word32 x, unsigned int y)
00776 {
00777 assert(y < 32);
00778 return (__rlwnm(x,y,0,31));
00779 }
00780
00781 template<> inline word32 rotrVariable<word32>(word32 x, unsigned int y)
00782 {
00783 assert(y < 32);
00784 return (__rlwnm(x,32-y,0,31));
00785 }
00786
00787 template<> inline word32 rotlMod<word32>(word32 x, unsigned int y)
00788 {
00789 return (__rlwnm(x,y,0,31));
00790 }
00791
00792 template<> inline word32 rotrMod<word32>(word32 x, unsigned int y)
00793 {
00794 return (__rlwnm(x,32-y,0,31));
00795 }
00796
00797 #endif // #if (defined(__MWERKS__) && TARGET_CPU_PPC)
00798
00799
00800
00801 template <class T>
00802 inline unsigned int GetByte(ByteOrder order, T value, unsigned int index)
00803 {
00804 if (order == LITTLE_ENDIAN_ORDER)
00805 return GETBYTE(value, index);
00806 else
00807 return GETBYTE(value, sizeof(T)-index-1);
00808 }
00809
00810 inline byte ByteReverse(byte value)
00811 {
00812 return value;
00813 }
00814
00815 inline word16 ByteReverse(word16 value)
00816 {
00817 #ifdef CRYPTOPP_BYTESWAP_AVAILABLE
00818 return bswap_16(value);
00819 #elif defined(_MSC_VER) && _MSC_VER >= 1300
00820 return _byteswap_ushort(value);
00821 #else
00822 return rotlFixed(value, 8U);
00823 #endif
00824 }
00825
00826 inline word32 ByteReverse(word32 value)
00827 {
00828 #if defined(__GNUC__) && defined(CRYPTOPP_X86_ASM_AVAILABLE)
00829 __asm__ ("bswap %0" : "=r" (value) : "0" (value));
00830 return value;
00831 #elif defined(CRYPTOPP_BYTESWAP_AVAILABLE)
00832 return bswap_32(value);
00833 #elif defined(__MWERKS__) && TARGET_CPU_PPC
00834 return (word32)__lwbrx(&value,0);
00835 #elif _MSC_VER >= 1400 || (_MSC_VER >= 1300 && !defined(_DLL))
00836 return _byteswap_ulong(value);
00837 #elif CRYPTOPP_FAST_ROTATE(32)
00838
00839 return (rotrFixed(value, 8U) & 0xff00ff00) | (rotlFixed(value, 8U) & 0x00ff00ff);
00840 #else
00841
00842 value = ((value & 0xFF00FF00) >> 8) | ((value & 0x00FF00FF) << 8);
00843 return rotlFixed(value, 16U);
00844 #endif
00845 }
00846
00847 inline word64 ByteReverse(word64 value)
00848 {
00849 #if defined(__GNUC__) && defined(CRYPTOPP_X86_ASM_AVAILABLE) && defined(__x86_64__)
00850 __asm__ ("bswap %0" : "=r" (value) : "0" (value));
00851 return value;
00852 #elif defined(CRYPTOPP_BYTESWAP_AVAILABLE)
00853 return bswap_64(value);
00854 #elif defined(_MSC_VER) && _MSC_VER >= 1300
00855 return _byteswap_uint64(value);
00856 #elif CRYPTOPP_BOOL_SLOW_WORD64
00857 return (word64(ByteReverse(word32(value))) << 32) | ByteReverse(word32(value>>32));
00858 #else
00859 value = ((value & W64LIT(0xFF00FF00FF00FF00)) >> 8) | ((value & W64LIT(0x00FF00FF00FF00FF)) << 8);
00860 value = ((value & W64LIT(0xFFFF0000FFFF0000)) >> 16) | ((value & W64LIT(0x0000FFFF0000FFFF)) << 16);
00861 return rotlFixed(value, 32U);
00862 #endif
00863 }
00864
00865 inline byte BitReverse(byte value)
00866 {
00867 value = ((value & 0xAA) >> 1) | ((value & 0x55) << 1);
00868 value = ((value & 0xCC) >> 2) | ((value & 0x33) << 2);
00869 return rotlFixed(value, 4U);
00870 }
00871
00872 inline word16 BitReverse(word16 value)
00873 {
00874 value = ((value & 0xAAAA) >> 1) | ((value & 0x5555) << 1);
00875 value = ((value & 0xCCCC) >> 2) | ((value & 0x3333) << 2);
00876 value = ((value & 0xF0F0) >> 4) | ((value & 0x0F0F) << 4);
00877 return ByteReverse(value);
00878 }
00879
00880 inline word32 BitReverse(word32 value)
00881 {
00882 value = ((value & 0xAAAAAAAA) >> 1) | ((value & 0x55555555) << 1);
00883 value = ((value & 0xCCCCCCCC) >> 2) | ((value & 0x33333333) << 2);
00884 value = ((value & 0xF0F0F0F0) >> 4) | ((value & 0x0F0F0F0F) << 4);
00885 return ByteReverse(value);
00886 }
00887
00888 inline word64 BitReverse(word64 value)
00889 {
00890 #if CRYPTOPP_BOOL_SLOW_WORD64
00891 return (word64(BitReverse(word32(value))) << 32) | BitReverse(word32(value>>32));
00892 #else
00893 value = ((value & W64LIT(0xAAAAAAAAAAAAAAAA)) >> 1) | ((value & W64LIT(0x5555555555555555)) << 1);
00894 value = ((value & W64LIT(0xCCCCCCCCCCCCCCCC)) >> 2) | ((value & W64LIT(0x3333333333333333)) << 2);
00895 value = ((value & W64LIT(0xF0F0F0F0F0F0F0F0)) >> 4) | ((value & W64LIT(0x0F0F0F0F0F0F0F0F)) << 4);
00896 return ByteReverse(value);
00897 #endif
00898 }
00899
00900 template <class T>
00901 inline T BitReverse(T value)
00902 {
00903 if (sizeof(T) == 1)
00904 return (T)BitReverse((byte)value);
00905 else if (sizeof(T) == 2)
00906 return (T)BitReverse((word16)value);
00907 else if (sizeof(T) == 4)
00908 return (T)BitReverse((word32)value);
00909 else
00910 {
00911 assert(sizeof(T) == 8);
00912 return (T)BitReverse((word64)value);
00913 }
00914 }
00915
00916 template <class T>
00917 inline T ConditionalByteReverse(ByteOrder order, T value)
00918 {
00919 return NativeByteOrderIs(order) ? value : ByteReverse(value);
00920 }
00921
00922 template <class T>
00923 void ByteReverse(T *out, const T *in, size_t byteCount)
00924 {
00925 assert(byteCount % sizeof(T) == 0);
00926 size_t count = byteCount/sizeof(T);
00927 for (size_t i=0; i<count; i++)
00928 out[i] = ByteReverse(in[i]);
00929 }
00930
00931 template <class T>
00932 inline void ConditionalByteReverse(ByteOrder order, T *out, const T *in, size_t byteCount)
00933 {
00934 if (!NativeByteOrderIs(order))
00935 ByteReverse(out, in, byteCount);
00936 else if (in != out)
00937 memcpy_s(out, byteCount, in, byteCount);
00938 }
00939
00940 template <class T>
00941 inline void GetUserKey(ByteOrder order, T *out, size_t outlen, const byte *in, size_t inlen)
00942 {
00943 const size_t U = sizeof(T);
00944 assert(inlen <= outlen*U);
00945 memcpy_s(out, outlen*U, in, inlen);
00946 memset_z((byte *)out+inlen, 0, outlen*U-inlen);
00947 ConditionalByteReverse(order, out, out, RoundUpToMultipleOf(inlen, U));
00948 }
00949
00950 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
00951 inline byte UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const byte *)
00952 {
00953 return block[0];
00954 }
00955
00956 inline word16 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word16 *)
00957 {
00958 return (order == BIG_ENDIAN_ORDER)
00959 ? block[1] | (block[0] << 8)
00960 : block[0] | (block[1] << 8);
00961 }
00962
00963 inline word32 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word32 *)
00964 {
00965 return (order == BIG_ENDIAN_ORDER)
00966 ? word32(block[3]) | (word32(block[2]) << 8) | (word32(block[1]) << 16) | (word32(block[0]) << 24)
00967 : word32(block[0]) | (word32(block[1]) << 8) | (word32(block[2]) << 16) | (word32(block[3]) << 24);
00968 }
00969
00970 inline word64 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word64 *)
00971 {
00972 return (order == BIG_ENDIAN_ORDER)
00973 ?
00974 (word64(block[7]) |
00975 (word64(block[6]) << 8) |
00976 (word64(block[5]) << 16) |
00977 (word64(block[4]) << 24) |
00978 (word64(block[3]) << 32) |
00979 (word64(block[2]) << 40) |
00980 (word64(block[1]) << 48) |
00981 (word64(block[0]) << 56))
00982 :
00983 (word64(block[0]) |
00984 (word64(block[1]) << 8) |
00985 (word64(block[2]) << 16) |
00986 (word64(block[3]) << 24) |
00987 (word64(block[4]) << 32) |
00988 (word64(block[5]) << 40) |
00989 (word64(block[6]) << 48) |
00990 (word64(block[7]) << 56));
00991 }
00992
00993 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, byte value, const byte *xorBlock)
00994 {
00995 block[0] = xorBlock ? (value ^ xorBlock[0]) : value;
00996 }
00997
00998 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word16 value, const byte *xorBlock)
00999 {
01000 if (order == BIG_ENDIAN_ORDER)
01001 {
01002 if (xorBlock)
01003 {
01004 block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01005 block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01006 }
01007 else
01008 {
01009 block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01010 block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01011 }
01012 }
01013 else
01014 {
01015 if (xorBlock)
01016 {
01017 block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01018 block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01019 }
01020 else
01021 {
01022 block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01023 block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01024 }
01025 }
01026 }
01027
01028 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word32 value, const byte *xorBlock)
01029 {
01030 if (order == BIG_ENDIAN_ORDER)
01031 {
01032 if (xorBlock)
01033 {
01034 block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01035 block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01036 block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01037 block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01038 }
01039 else
01040 {
01041 block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01042 block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01043 block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01044 block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01045 }
01046 }
01047 else
01048 {
01049 if (xorBlock)
01050 {
01051 block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01052 block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01053 block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01054 block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01055 }
01056 else
01057 {
01058 block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01059 block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01060 block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01061 block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01062 }
01063 }
01064 }
01065
01066 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word64 value, const byte *xorBlock)
01067 {
01068 if (order == BIG_ENDIAN_ORDER)
01069 {
01070 if (xorBlock)
01071 {
01072 block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
01073 block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
01074 block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
01075 block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
01076 block[4] = xorBlock[4] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01077 block[5] = xorBlock[5] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01078 block[6] = xorBlock[6] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01079 block[7] = xorBlock[7] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01080 }
01081 else
01082 {
01083 block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
01084 block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
01085 block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
01086 block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
01087 block[4] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01088 block[5] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01089 block[6] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01090 block[7] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01091 }
01092 }
01093 else
01094 {
01095 if (xorBlock)
01096 {
01097 block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01098 block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01099 block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01100 block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01101 block[4] = xorBlock[4] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
01102 block[5] = xorBlock[5] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
01103 block[6] = xorBlock[6] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
01104 block[7] = xorBlock[7] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
01105 }
01106 else
01107 {
01108 block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01109 block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01110 block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01111 block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01112 block[4] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
01113 block[5] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
01114 block[6] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
01115 block[7] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
01116 }
01117 }
01118 }
01119 #endif // #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
01120
01121 template <class T>
01122 inline T GetWord(bool assumeAligned, ByteOrder order, const byte *block)
01123 {
01124 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
01125 if (!assumeAligned)
01126 return UnalignedGetWordNonTemplate(order, block, (T*)NULL);
01127 assert(IsAligned<T>(block));
01128 #endif
01129 return ConditionalByteReverse(order, *reinterpret_cast<const T *>(block));
01130 }
01131
01132 template <class T>
01133 inline void GetWord(bool assumeAligned, ByteOrder order, T &result, const byte *block)
01134 {
01135 result = GetWord<T>(assumeAligned, order, block);
01136 }
01137
01138 template <class T>
01139 inline void PutWord(bool assumeAligned, ByteOrder order, byte *block, T value, const byte *xorBlock = NULL)
01140 {
01141 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
01142 if (!assumeAligned)
01143 return UnalignedPutWordNonTemplate(order, block, value, xorBlock);
01144 assert(IsAligned<T>(block));
01145 assert(IsAligned<T>(xorBlock));
01146 #endif
01147 *reinterpret_cast<T *>(block) = ConditionalByteReverse(order, value) ^ (xorBlock ? *reinterpret_cast<const T *>(xorBlock) : 0);
01148 }
01149
01150 template <class T, class B, bool A=false>
01151 class GetBlock
01152 {
01153 public:
01154 GetBlock(const void *block)
01155 : m_block((const byte *)block) {}
01156
01157 template <class U>
01158 inline GetBlock<T, B, A> & operator()(U &x)
01159 {
01160 CRYPTOPP_COMPILE_ASSERT(sizeof(U) >= sizeof(T));
01161 x = GetWord<T>(A, B::ToEnum(), m_block);
01162 m_block += sizeof(T);
01163 return *this;
01164 }
01165
01166 private:
01167 const byte *m_block;
01168 };
01169
01170 template <class T, class B, bool A=false>
01171 class PutBlock
01172 {
01173 public:
01174 PutBlock(const void *xorBlock, void *block)
01175 : m_xorBlock((const byte *)xorBlock), m_block((byte *)block) {}
01176
01177 template <class U>
01178 inline PutBlock<T, B, A> & operator()(U x)
01179 {
01180 PutWord(A, B::ToEnum(), m_block, (T)x, m_xorBlock);
01181 m_block += sizeof(T);
01182 if (m_xorBlock)
01183 m_xorBlock += sizeof(T);
01184 return *this;
01185 }
01186
01187 private:
01188 const byte *m_xorBlock;
01189 byte *m_block;
01190 };
01191
01192 template <class T, class B, bool GA=false, bool PA=false>
01193 struct BlockGetAndPut
01194 {
01195
01196 static inline GetBlock<T, B, GA> Get(const void *block) {return GetBlock<T, B, GA>(block);}
01197 typedef PutBlock<T, B, PA> Put;
01198 };
01199
01200 template <class T>
01201 std::string WordToString(T value, ByteOrder order = BIG_ENDIAN_ORDER)
01202 {
01203 if (!NativeByteOrderIs(order))
01204 value = ByteReverse(value);
01205
01206 return std::string((char *)&value, sizeof(value));
01207 }
01208
01209 template <class T>
01210 T StringToWord(const std::string &str, ByteOrder order = BIG_ENDIAN_ORDER)
01211 {
01212 T value = 0;
01213 memcpy_s(&value, sizeof(value), str.data(), UnsignedMin(str.size(), sizeof(value)));
01214 return NativeByteOrderIs(order) ? value : ByteReverse(value);
01215 }
01216
01217
01218
01219 template <bool overflow> struct SafeShifter;
01220
01221 template<> struct SafeShifter<true>
01222 {
01223 template <class T>
01224 static inline T RightShift(T value, unsigned int bits)
01225 {
01226 return 0;
01227 }
01228
01229 template <class T>
01230 static inline T LeftShift(T value, unsigned int bits)
01231 {
01232 return 0;
01233 }
01234 };
01235
01236 template<> struct SafeShifter<false>
01237 {
01238 template <class T>
01239 static inline T RightShift(T value, unsigned int bits)
01240 {
01241 return value >> bits;
01242 }
01243
01244 template <class T>
01245 static inline T LeftShift(T value, unsigned int bits)
01246 {
01247 return value << bits;
01248 }
01249 };
01250
01251 template <unsigned int bits, class T>
01252 inline T SafeRightShift(T value)
01253 {
01254 return SafeShifter<(bits>=(8*sizeof(T)))>::RightShift(value, bits);
01255 }
01256
01257 template <unsigned int bits, class T>
01258 inline T SafeLeftShift(T value)
01259 {
01260 return SafeShifter<(bits>=(8*sizeof(T)))>::LeftShift(value, bits);
01261 }
01262
01263
01264
01265 #define CRYPTOPP_BLOCK_1(n, t, s) t* m_##n() {return (t *)(m_aggregate+0);} size_t SS1() {return sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01266 #define CRYPTOPP_BLOCK_2(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS1());} size_t SS2() {return SS1()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01267 #define CRYPTOPP_BLOCK_3(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS2());} size_t SS3() {return SS2()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01268 #define CRYPTOPP_BLOCK_4(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS3());} size_t SS4() {return SS3()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01269 #define CRYPTOPP_BLOCK_5(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS4());} size_t SS5() {return SS4()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01270 #define CRYPTOPP_BLOCK_6(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS5());} size_t SS6() {return SS5()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01271 #define CRYPTOPP_BLOCK_7(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS6());} size_t SS7() {return SS6()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01272 #define CRYPTOPP_BLOCK_8(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS7());} size_t SS8() {return SS7()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01273 #define CRYPTOPP_BLOCKS_END(i) size_t SST() {return SS##i();} void AllocateBlocks() {m_aggregate.New(SST());} AlignedSecByteBlock m_aggregate;
01274
01275 NAMESPACE_END
01276
01277 #endif