@@ -20,11 +20,11 @@ asmlinkage u32 crc32_be_arm64(u32 crc, unsigned char const *p, size_t len);
asmlinkage u32 crc32_le_arm64_4way(u32 crc, unsigned char const *p, size_t len);
asmlinkage u32 crc32c_le_arm64_4way(u32 crc, unsigned char const *p, size_t len);
asmlinkage u32 crc32_be_arm64_4way(u32 crc, unsigned char const *p, size_t len);
-u32 __pure crc32_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
{
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
return crc32_le_base(crc, p, len);
if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
@@ -41,11 +41,11 @@ u32 __pure crc32_le_arch(u32 crc, const u8 *p, size_t len)
return crc32_le_arm64(crc, p, len);
}
EXPORT_SYMBOL(crc32_le_arch);
-u32 __pure crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
{
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
return crc32c_le_base(crc, p, len);
if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
@@ -62,11 +62,11 @@ u32 __pure crc32c_le_arch(u32 crc, const u8 *p, size_t len)
return crc32c_le_arm64(crc, p, len);
}
EXPORT_SYMBOL(crc32c_le_arch);
-u32 __pure crc32_be_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
return crc32_be_base(crc, p, len);
if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
@@ -173,14 +173,13 @@ static inline u32 crc32_le_unaligned(u32 crc, unsigned char const *p,
crc ^= crc_low;
return crc;
}
-static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
- size_t len, u32 poly,
- unsigned long poly_qt,
- fallback crc_fb)
+static inline u32 crc32_le_generic(u32 crc, unsigned char const *p, size_t len,
+ u32 poly, unsigned long poly_qt,
+ fallback crc_fb)
{
size_t offset, head_len, tail_len;
unsigned long const *p_ul;
unsigned long s;
@@ -216,18 +215,18 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
legacy:
return crc_fb(crc, p, len);
}
-u32 __pure crc32_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
{
return crc32_le_generic(crc, p, len, CRC32_POLY_LE, CRC32_POLY_QT_LE,
crc32_le_base);
}
EXPORT_SYMBOL(crc32_le_arch);
-u32 __pure crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
{
return crc32_le_generic(crc, p, len, CRC32C_POLY_LE,
CRC32C_POLY_QT_LE, crc32c_le_base);
}
EXPORT_SYMBOL(crc32c_le_arch);
@@ -254,11 +253,11 @@ static inline u32 crc32_be_unaligned(u32 crc, unsigned char const *p,
crc ^= crc_low;
return crc;
}
-u32 __pure crc32_be_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
size_t offset, head_len, tail_len;
unsigned long const *p_ul;
unsigned long s;
@@ -6,33 +6,33 @@
#define _LINUX_CRC32_H
#include <linux/types.h>
#include <linux/bitrev.h>
-u32 __pure crc32_le_arch(u32 crc, const u8 *p, size_t len);
-u32 __pure crc32_le_base(u32 crc, const u8 *p, size_t len);
-u32 __pure crc32_be_arch(u32 crc, const u8 *p, size_t len);
-u32 __pure crc32_be_base(u32 crc, const u8 *p, size_t len);
-u32 __pure crc32c_le_arch(u32 crc, const u8 *p, size_t len);
-u32 __pure crc32c_le_base(u32 crc, const u8 *p, size_t len);
+u32 crc32_le_arch(u32 crc, const u8 *p, size_t len);
+u32 crc32_le_base(u32 crc, const u8 *p, size_t len);
+u32 crc32_be_arch(u32 crc, const u8 *p, size_t len);
+u32 crc32_be_base(u32 crc, const u8 *p, size_t len);
+u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len);
+u32 crc32c_le_base(u32 crc, const u8 *p, size_t len);
-static inline u32 __pure crc32_le(u32 crc, const void *p, size_t len)
+static inline u32 crc32_le(u32 crc, const void *p, size_t len)
{
if (IS_ENABLED(CONFIG_CRC32_ARCH))
return crc32_le_arch(crc, p, len);
return crc32_le_base(crc, p, len);
}
-static inline u32 __pure crc32_be(u32 crc, const void *p, size_t len)
+static inline u32 crc32_be(u32 crc, const void *p, size_t len)
{
if (IS_ENABLED(CONFIG_CRC32_ARCH))
return crc32_be_arch(crc, p, len);
return crc32_be_base(crc, p, len);
}
/* TODO: leading underscores should be dropped once callers have been updated */
-static inline u32 __pure __crc32c_le(u32 crc, const void *p, size_t len)
+static inline u32 __crc32c_le(u32 crc, const void *p, size_t len)
{
if (IS_ENABLED(CONFIG_CRC32_ARCH))
return crc32c_le_arch(crc, p, len);
return crc32c_le_base(crc, p, len);
}
@@ -68,11 +68,11 @@ static inline u32 crc32_optimizations(void) { return 0; }
* the crc32_le() value of seq_full, then crc_full ==
* crc32_le_combine(crc1, crc2, len2) when crc_full was seeded
* with the same initializer as crc1, and crc2 seed was 0. See
* also crc32_combine_test().
*/
-u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len);
+u32 crc32_le_shift(u32 crc, size_t len);
static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
{
return crc32_le_shift(crc1, len2) ^ crc2;
}
@@ -93,11 +93,11 @@ static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
* the __crc32c_le() value of seq_full, then crc_full ==
* __crc32c_le_combine(crc1, crc2, len2) when crc_full was
* seeded with the same initializer as crc1, and crc2 seed
* was 0. See also crc32c_combine_test().
*/
-u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len);
+u32 __crc32c_le_shift(u32 crc, size_t len);
static inline u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
{
return __crc32c_le_shift(crc1, len2) ^ crc2;
}
@@ -35,19 +35,19 @@
MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Various CRC32 calculations");
MODULE_LICENSE("GPL");
-u32 __pure crc32_le_base(u32 crc, const u8 *p, size_t len)
+u32 crc32_le_base(u32 crc, const u8 *p, size_t len)
{
while (len--)
crc = (crc >> 8) ^ crc32table_le[(crc & 255) ^ *p++];
return crc;
}
EXPORT_SYMBOL(crc32_le_base);
-u32 __pure crc32c_le_base(u32 crc, const u8 *p, size_t len)
+u32 crc32c_le_base(u32 crc, const u8 *p, size_t len)
{
while (len--)
crc = (crc >> 8) ^ crc32ctable_le[(crc & 255) ^ *p++];
return crc;
}
@@ -56,11 +56,11 @@ EXPORT_SYMBOL(crc32c_le_base);
/*
* This multiplies the polynomials x and y modulo the given modulus.
* This follows the "little-endian" CRC convention that the lsbit
* represents the highest power of x, and the msbit represents x^0.
*/
-static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
+static u32 gf2_multiply(u32 x, u32 y, u32 modulus)
{
u32 product = x & 1 ? y : 0;
int i;
for (i = 0; i < 31; i++) {
@@ -82,12 +82,11 @@ static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
* over separate ranges of a buffer, then summing them.
* This shifts the given CRC by 8*len bits (i.e. produces the same effect
* as appending len bytes of zero to the data), in time proportional
* to log(len).
*/
-static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
- u32 polynomial)
+static u32 crc32_generic_shift(u32 crc, size_t len, u32 polynomial)
{
u32 power = polynomial; /* CRC of x^32 */
int i;
/* Shift up to 32 bits in the simple linear way */
@@ -112,23 +111,23 @@ static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
}
return crc;
}
-u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
+u32 crc32_le_shift(u32 crc, size_t len)
{
return crc32_generic_shift(crc, len, CRC32_POLY_LE);
}
-u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
+u32 __crc32c_le_shift(u32 crc, size_t len)
{
return crc32_generic_shift(crc, len, CRC32C_POLY_LE);
}
EXPORT_SYMBOL(crc32_le_shift);
EXPORT_SYMBOL(__crc32c_le_shift);
-u32 __pure crc32_be_base(u32 crc, const u8 *p, size_t len)
+u32 crc32_be_base(u32 crc, const u8 *p, size_t len)
{
while (len--)
crc = (crc << 8) ^ crc32table_be[(crc >> 24) ^ *p++];
return crc;
}