Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade rainforest algo to v2 #39

Open
wants to merge 3 commits into
base: linux
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ cpuminer_SOURCES = \
algo/nist5.c \
algo/pluck.c \
algo/qubit.c \
algo/rainforest.c \
algo/rfv2_cpuminer.c \
algo/scrypt.c \
algo/scrypt-jane.c \
algo/sha2.c \
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ Algorithms
* ✓ __pluck__ (Supcoin [SUP])
* ✓ __quark__ (Quarkcoin)
* ✓ __qubit__ (GeoCoin)
* ✓ __rfv2__ (MicroBitcoin [MBC])
* ✓ __skein__ (Skeincoin, Myriadcoin, Xedoscoin, ...)
* ✓ __skein2__ (Woodcoin)
* ✓ __s3__ (OneCoin)
Expand Down Expand Up @@ -79,7 +80,6 @@ Algorithms
* ? keccak (Maxcoin HelixCoin, CryptoMeth, Galleon, 365coin, Slothcoin, BitcointalkCoin)
* ? keccakc (Creativecoin)
* ? luffa (Joincoin, Doomcoin)
* ? rainforest
* ? shavite3 (INKcoin)

#### Planned support for
Expand Down
893 changes: 0 additions & 893 deletions algo/rainforest.c

This file was deleted.

205 changes: 205 additions & 0 deletions algo/rf_aes2r.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
#include <stdint.h>

// Two round implementation optimized for x86_64+AES-NI and ARMv8+crypto
// extensions. Test pattern :
//
// Plaintext:
// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
//
// Ciphertext (encryption result):
// 0x16, 0xcd, 0xb8, 0x7a, 0xc6, 0xae, 0xdb, 0x19, 0xe9, 0x32, 0x47, 0x85, 0x39, 0x51, 0x24, 0xe6
//
// Plaintext (decryption result):
// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00

/* Rijndael's substitution box for sub_bytes step */
static uint8_t SBOX[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
};

/*--- The parts below are not used when crypto extensions are available ---*/
/* Use -march=armv8-a+crypto on ARMv8 to use crypto extensions */
/* Use -maes on x86_64 to use AES-NI */
#if defined(RF_NOASM) || (!defined(__aarch64__) || !defined(__ARM_FEATURE_CRYPTO)) && (!defined(__x86_64__) || !defined(__AES__))

/* shifts to do for shift_rows step */
static uint8_t shifts[16] = {
0, 5, 10, 15,
4, 9, 14, 3,
8, 13, 2, 7,
12, 1, 6, 11
};

/* add the round key to the state with simple XOR operation */
static void add_round_key(uint8_t * state, const uint8_t * rkey)
{
uint8_t i;

for (i = 0; i < 16; i++)
state[i] ^= rkey[i];
}

/* substitute all bytes using Rijndael's substitution box */
static void sub_bytes(uint8_t * state)
{
uint8_t i;

for (i = 0; i < 16; i++)
state[i] = SBOX[state[i]];
}

/* imagine the state not as 1-dimensional, but a 4x4 grid;
* this step shifts the rows of this grid around */
static void shift_rows(uint8_t * state)
{
uint8_t temp[16];
uint8_t i;

for (i = 0; i < 16; i++)
temp[i] = state[shifts[i]];

for (i = 0; i < 16; i++)
state[i] = temp[i];
}

/* mix columns */
static void mix_columns(uint8_t * state)
{
uint8_t a[4];
uint8_t b[4];
uint8_t h, i, k;

for (k = 0; k < 4; k++) {
for (i = 0; i < 4; i++) {
a[i] = state[i + 4 * k];
h = state[i + 4 * k] & 0x80; /* hi bit */
b[i] = state[i + 4 * k] << 1;

if (h == 0x80)
b[i] ^= 0x1b; /* Rijndael's Galois field */
}

state[4 * k] = b[0] ^ a[3] ^ a[2] ^ b[1] ^ a[1];
state[1 + 4 * k] = b[1] ^ a[0] ^ a[3] ^ b[2] ^ a[2];
state[2 + 4 * k] = b[2] ^ a[1] ^ a[0] ^ b[3] ^ a[3];
state[3 + 4 * k] = b[3] ^ a[2] ^ a[1] ^ b[0] ^ a[0];
}
}
#endif // (!defined(__aarch64__) || !defined(__ARM_FEATURE_CRYPTO)) && (!defined(__x86_64__) || !defined(__AES__))


/* key schedule stuff */

/* simple function to rotate 4 byte array */
static inline uint32_t rotate32(uint32_t in)
{
#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
in = (in >> 8) | (in << 24);
#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
in = (in << 8) | (in >> 24);
#else
uint8_t *b = (uint8_t *)&in, temp = b[0];
b[0] = b[1]; b[1] = b[2]; b[2] = b[3]; b[3] = temp;
#endif
return in;
}

/* key schedule core operation */
static inline uint32_t sbox(uint32_t in, uint8_t n)
{
in = (SBOX[in & 255]) | (SBOX[(in >> 8) & 255] << 8) | (SBOX[(in >> 16) & 255] << 16) | (SBOX[(in >> 24) & 255] << 24);
#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
in ^= n;
#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
in ^= n << 24;
#else
*(uint8_t *)&in ^= n;
#endif
return in;
}

// this version is optimized for exactly two rounds.
// _state_ must be 16-byte aligned.
static inline void aes2r_encrypt(uint8_t * state, const uint8_t * key)
{
uint32_t RF_ALIGN(16) key_schedule[12];
uint32_t t;

/* initialize key schedule; its first 16 bytes are the key */
key_schedule[0] = ((uint32_t *)key)[0];
key_schedule[1] = ((uint32_t *)key)[1];
key_schedule[2] = ((uint32_t *)key)[2];
key_schedule[3] = ((uint32_t *)key)[3];
t = key_schedule[3];

t = rotate32(t);
t = sbox(t, 1);
t = key_schedule[4] = key_schedule[0] ^ t;
t = key_schedule[5] = key_schedule[1] ^ t;
t = key_schedule[6] = key_schedule[2] ^ t;
t = key_schedule[7] = key_schedule[3] ^ t;

t = rotate32(t);
t = sbox(t, 2);
t = key_schedule[8] = key_schedule[4] ^ t;
t = key_schedule[9] = key_schedule[5] ^ t;
t = key_schedule[10] = key_schedule[6] ^ t;
t = key_schedule[11] = key_schedule[7] ^ t;

// Use -march=armv8-a+crypto+crc to get this one
#if !defined(RF_NOASM) && defined(__aarch64__) && defined(__ARM_FEATURE_CRYPTO)
__asm__ volatile(
"ld1 {v0.16b},[%0] \n"
"ld1 {v1.16b,v2.16b,v3.16b},[%1] \n"
"aese v0.16b,v1.16b \n" // round1: add_round_key,sub_bytes,shift_rows
"aesmc v0.16b,v0.16b \n" // round1: mix_columns
"aese v0.16b,v2.16b \n" // round2: add_round_key,sub_bytes,shift_rows
"eor v0.16b,v0.16b,v3.16b \n" // finish: add_round_key
"st1 {v0.16b},[%0] \n"
: /* only output is in *state */
: "r"(state), "r"(key_schedule)
: "v0", "v1", "v2", "v3", "cc", "memory");

// Use -maes to get this one
#elif !defined(RF_NOASM) && defined(__x86_64__) && defined(__AES__)
__asm__ volatile(
"movups (%0), %%xmm0 \n"
"movups (%1), %%xmm1 \n"
"pxor %%xmm1,%%xmm0 \n" // add_round_key(state, key_schedule)
"movups 16(%1),%%xmm2 \n"
"movups 32(%1),%%xmm1 \n"
"aesenc %%xmm2,%%xmm0 \n" // first round
"aesenclast %%xmm1,%%xmm0 \n" // final round
"movups %%xmm0, (%0) \n"
: /* only output is in *state */
: "r"(state), "r" (key_schedule)
: "xmm0", "xmm1", "xmm2", "cc", "memory");
#else
/* first round of the algorithm */
add_round_key(state, (const uint8_t*)&key_schedule[0]);
sub_bytes(state);
shift_rows(state);
mix_columns(state);
add_round_key(state, (const uint8_t*)&key_schedule[4]);

/* final round of the algorithm */
sub_bytes(state);
shift_rows(state);
add_round_key(state, (const uint8_t*)&key_schedule[8]);
#endif
}
164 changes: 164 additions & 0 deletions algo/rf_crc32.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
// RainForest hash algorithm - CRC32 calculation
// Author: Bill Schneider
// Date: Feb 13th, 2018

#include <stdint.h>

#if defined(RF_NOASM) || !defined(__aarch64__) || !defined(__ARM_FEATURE_CRC32)
// crc32 lookup tables
static const uint32_t rf_crc32_table[256] = {
/* 0x00 */ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
/* 0x04 */ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
/* 0x08 */ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
/* 0x0c */ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
/* 0x10 */ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
/* 0x14 */ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
/* 0x18 */ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
/* 0x1c */ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
/* 0x20 */ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
/* 0x24 */ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
/* 0x28 */ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
/* 0x2c */ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
/* 0x30 */ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
/* 0x34 */ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
/* 0x38 */ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
/* 0x3c */ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
/* 0x40 */ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
/* 0x44 */ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
/* 0x48 */ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
/* 0x4c */ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
/* 0x50 */ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
/* 0x54 */ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
/* 0x58 */ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
/* 0x5c */ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
/* 0x60 */ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
/* 0x64 */ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
/* 0x68 */ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
/* 0x6c */ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
/* 0x70 */ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
/* 0x74 */ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
/* 0x78 */ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
/* 0x7c */ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
/* 0x80 */ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
/* 0x84 */ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
/* 0x88 */ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
/* 0x8c */ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
/* 0x90 */ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
/* 0x94 */ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
/* 0x98 */ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
/* 0x9c */ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
/* 0xa0 */ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
/* 0xa4 */ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
/* 0xa8 */ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
/* 0xac */ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
/* 0xb0 */ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
/* 0xb4 */ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
/* 0xb8 */ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
/* 0xbc */ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
/* 0xc0 */ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
/* 0xc4 */ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
/* 0xc8 */ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
/* 0xcc */ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
/* 0xd0 */ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
/* 0xd4 */ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
/* 0xd8 */ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
/* 0xdc */ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
/* 0xe0 */ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
/* 0xe4 */ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
/* 0xe8 */ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
/* 0xec */ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
/* 0xf0 */ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
/* 0xf4 */ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
/* 0xf8 */ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
/* 0xfc */ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,
};
#endif

// compute the crc32 of 32-bit message _msg_ from previous crc _crc_.
// build with -mcpu=cortex-a53+crc to enable native CRC instruction on ARM
static inline uint32_t rf_crc32_32(uint32_t crc, uint32_t msg)
{
#if !defined(RF_NOASM) && defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
__asm__("crc32w %w0,%w0,%w1\n" : "+r"(crc) : "r"(msg));
#else
crc = crc ^ msg;
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
#endif
return crc;
}

static inline uint32_t rf_crc32_24(uint32_t crc, uint32_t msg)
{
#if !defined(RF_NOASM) && defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
__asm__("crc32b %w0,%w0,%w1\n" : "+r"(crc) : "r"(msg));
__asm__("crc32h %w0,%w0,%w1\n" : "+r"(crc) : "r"(msg >> 8));
#else
crc = crc ^ msg;
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
#endif
return crc;
}

static inline uint32_t rf_crc32_16(uint32_t crc, uint32_t msg)
{
#if !defined(RF_NOASM) && defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
__asm__("crc32h %w0,%w0,%w1\n" : "+r"(crc) : "r"(msg));
#else
crc = crc ^ msg;
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
#endif
return crc;
}

static inline uint32_t rf_crc32_8(uint32_t crc, uint32_t msg)
{
#if !defined(RF_NOASM) && defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
__asm__("crc32b %w0,%w0,%w1\n" : "+r"(crc) : "r"(msg));
#else
crc = crc ^ msg;
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
#endif
return crc;
}

/* returns a CRC32 of message <msg> from previous <crc>; the 32 highest bits
* are zeroed.
*/
static inline uint64_t rf_crc32_64(uint32_t crc, uint64_t msg)
{
#if !defined(RF_NOASM) && defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
uint64_t crc64 = crc;

__asm__("crc32x %w0,%w0,%x2\n" : "=r"(crc64) : "0"(crc), "r"(msg));
return crc64;
#else
crc ^= (uint32_t)msg;
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);

crc ^= msg >> 32;
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
crc = rf_crc32_table[crc & 0xff] ^ (crc >> 8);
return crc;
#endif
}

/* performs a CRC32 on a memory area */
static inline uint32_t rf_crc32_mem(uint32_t crc, const void *msg, size_t len)
{
const uint8_t *msg8 = (uint8_t *)msg;
while (len--) {
crc = rf_crc32_8(crc, *msg8++);
}
return crc;
}
Loading