Browse Source

Generic wear-leveling algorithm (#16996)

* Initial import of wear-leveling algorithm.

* Alignment.

* Docs tweaks.

* Lock/unlock.

* Update quantum/wear_leveling/wear_leveling_internal.h

Co-authored-by: Stefan Kerkmann <karlk90@pm.me>

* More tests, fix issue with consolidation when unlocked.

* More tests.

* Review comments.

* Add plumbing for FNV1a.

* Another test checking that checksum mismatch clears the cache.

* Check that the write log still gets played back.

Co-authored-by: Stefan Kerkmann <karlk90@pm.me>
pull/17488/head
Nick Brassel 1 year ago
committed by GitHub
parent
commit
01ecf332ff
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 7519 additions and 0 deletions
  1. +1
    -0
      builddefs/build_test.mk
  2. +6
    -0
      builddefs/common_features.mk
  3. +1
    -0
      builddefs/testlist.mk
  4. +304
    -0
      lib/fnv/Makefile
  5. +158
    -0
      lib/fnv/README
  6. +249
    -0
      lib/fnv/fnv.h
  7. +467
    -0
      lib/fnv/fnv32.c
  8. +591
    -0
      lib/fnv/fnv64.c
  9. +156
    -0
      lib/fnv/hash_32.c
  10. +144
    -0
      lib/fnv/hash_32a.c
  11. +312
    -0
      lib/fnv/hash_64.c
  12. +291
    -0
      lib/fnv/hash_64a.c
  13. +58
    -0
      lib/fnv/have_ulong64.c
  14. +18
    -0
      lib/fnv/longlong.h
  15. +14
    -0
      lib/fnv/qmk_fnv_type_validation.c
  16. +2237
    -0
      lib/fnv/test_fnv.c
  17. +154
    -0
      quantum/wear_leveling/tests/backing_mocks.cpp
  18. +210
    -0
      quantum/wear_leveling/tests/backing_mocks.hpp
  19. +66
    -0
      quantum/wear_leveling/tests/rules.mk
  20. +6
    -0
      quantum/wear_leveling/tests/testlist.mk
  21. +228
    -0
      quantum/wear_leveling/tests/wear_leveling_2byte.cpp
  22. +295
    -0
      quantum/wear_leveling/tests/wear_leveling_2byte_optimized_writes.cpp
  23. +193
    -0
      quantum/wear_leveling/tests/wear_leveling_4byte.cpp
  24. +178
    -0
      quantum/wear_leveling/tests/wear_leveling_8byte.cpp
  25. +204
    -0
      quantum/wear_leveling/tests/wear_leveling_general.cpp
  26. +779
    -0
      quantum/wear_leveling/wear_leveling.c
  27. +54
    -0
      quantum/wear_leveling/wear_leveling.h
  28. +145
    -0
      quantum/wear_leveling/wear_leveling_internal.h

+ 1
- 0
builddefs/build_test.mk View File

@ -63,6 +63,7 @@ include $(TMK_PATH)/protocol.mk
include $(QUANTUM_PATH)/debounce/tests/rules.mk
include $(QUANTUM_PATH)/encoder/tests/rules.mk
include $(QUANTUM_PATH)/sequencer/tests/rules.mk
include $(QUANTUM_PATH)/wear_leveling/tests/rules.mk
include $(PLATFORM_PATH)/test/rules.mk
ifneq ($(filter $(FULL_TESTS),$(TEST)),)
include $(BUILDDEFS_PATH)/build_full_test.mk


+ 6
- 0
builddefs/common_features.mk View File

@ -650,6 +650,12 @@ ifeq ($(strip $(CRC_ENABLE)), yes)
SRC += crc.c
endif
ifeq ($(strip $(FNV_ENABLE)), yes)
OPT_DEFS += -DFNV_ENABLE
VPATH += $(LIB_PATH)/fnv
SRC += qmk_fnv_type_validation.c hash_32a.c hash_64a.c
endif
ifeq ($(strip $(HAPTIC_ENABLE)),yes)
COMMON_VPATH += $(DRIVER_PATH)/haptic


+ 1
- 0
builddefs/testlist.mk View File

@ -4,6 +4,7 @@ FULL_TESTS := $(notdir $(TEST_LIST))
include $(QUANTUM_PATH)/debounce/tests/testlist.mk
include $(QUANTUM_PATH)/encoder/tests/testlist.mk
include $(QUANTUM_PATH)/sequencer/tests/testlist.mk
include $(QUANTUM_PATH)/wear_leveling/tests/testlist.mk
include $(PLATFORM_PATH)/test/testlist.mk
define VALIDATE_TEST_LIST


+ 304
- 0
lib/fnv/Makefile View File

@ -0,0 +1,304 @@
#!/bin/make
#
# hash - makefile for FNV hash tools
#
# @(#) $Revision: 5.2 $
# @(#) $Id: Makefile,v 5.2 2012/03/21 01:42:15 chongo Exp $
# @(#) $Source: /usr/local/src/cmd/fnv/RCS/Makefile,v $
#
# See:
# http://www.isthe.com/chongo/tech/comp/fnv/index.html
#
# for the most up to date copy of this code and the FNV hash home page.
#
# Please do not copyright this code. This code is in the public domain.
#
# LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
# EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
# USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
# By:
# chongo <Landon Curt Noll> /\oo/\
# http://www.isthe.com/chongo/
#
# Share and Enjoy! :-)
# make tools
#
SHELL= /bin/sh
CFLAGS= -O3 -g3
#CFLAGS= -O2 -g3
#CC= cc
AR= ar
TAR= tar
EGREP= egrep
GZIP_BIN= gzip
INSTALL= install
# If your system needs ranlib use:
# RANLIB= ranlib
# otherwise use:
# RANLIB= :
#
#RANLIB= ranlib
RANLIB= :
# where to install things
#
DESTBIN= /usr/local/bin
DESTLIB= /usr/local/lib
DESTINC= /usr/local/include
# what to build
#
SRC= hash_32.c hash_32a.c hash_64.c hash_64a.c \
fnv32.c fnv64.c \
have_ulong64.c test_fnv.c
NO64BIT_SRC= no64bit_fnv64.c no64bit_hash_64.c \
no64bit_hash_64a.c no64bit_test_fnv.c
HSRC= fnv.h \
longlong.h
ALL= ${SRC} ${HSRC} \
README Makefile
PROGS= fnv032 fnv064 fnv132 fnv164 fnv1a32 fnv1a64
OBSOLETE_PROGS= fnv0_32 fnv0_64 fnv1_32 fnv1_64 fnv1a_32 fnv1a_64
NO64BIT_PROGS= no64bit_fnv064 no64bit_fnv164 no64bit_fnv1a64
LIBS= libfnv.a
LIBOBJ= hash_32.o hash_64.o hash_32a.o hash_64a.o test_fnv.o
NO64BIT_OBJ= no64bit_fnv64.o no64bit_hash_64.o \
no64bit_hash_64a.o no64bit_test_fnv.o
OTHEROBJ= fnv32.o fnv64.o
TARGETS= ${LIBOBJ} ${LIBS} ${PROGS}
# default rule
#
all: ${TARGETS}
# things to build
#
hash_32.o: hash_32.c longlong.h fnv.h
${CC} ${CFLAGS} hash_32.c -c
hash_64.o: hash_64.c longlong.h fnv.h
${CC} ${CFLAGS} hash_64.c -c
hash_32a.o: hash_32a.c longlong.h fnv.h
${CC} ${CFLAGS} hash_32a.c -c
hash_64a.o: hash_64a.c longlong.h fnv.h
${CC} ${CFLAGS} hash_64a.c -c
test_fnv.o: test_fnv.c longlong.h fnv.h
${CC} ${CFLAGS} test_fnv.c -c
fnv32.o: fnv32.c longlong.h fnv.h
${CC} ${CFLAGS} fnv32.c -c
fnv032: fnv32.o libfnv.a
${CC} fnv32.o libfnv.a -o fnv032
fnv64.o: fnv64.c longlong.h fnv.h
${CC} ${CFLAGS} fnv64.c -c
fnv064: fnv64.o libfnv.a
${CC} fnv64.o libfnv.a -o fnv064
libfnv.a: ${LIBOBJ}
rm -f $@
${AR} rv $@ ${LIBOBJ}
${RANLIB} $@
fnv132: fnv032
-rm -f $@
-cp -f $? $@
fnv1a32: fnv032
-rm -f $@
-cp -f $? $@
fnv164: fnv064
-rm -f $@
-cp -f $? $@
fnv1a64: fnv064
-rm -f $@
-cp -f $? $@
longlong.h: have_ulong64.c Makefile
-@rm -f have_ulong64 have_ulong64.o ll_tmp longlong.h
@echo 'forming longlong.h'
@echo '/*' > longlong.h
@echo ' * DO NOT EDIT -- generated by the Makefile' >> longlong.h
@echo ' */' >> longlong.h
@echo '' >> longlong.h
@echo '#if !defined(__LONGLONG_H__)' >> longlong.h
@echo '#define __LONGLONG_H__' >> longlong.h
@echo '' >> longlong.h
@echo '/* do we have/want to use a long long type? */' >> longlong.h
-@rm -f have_ulong64.o have_ulong64
-@${CC} ${CFLAGS} have_ulong64.c -c 2>/dev/null; true
-@${CC} ${CFLAGS} have_ulong64.o -o have_ulong64 2>/dev/null; true
-@${SHELL} -c "./have_ulong64 > ll_tmp 2>/dev/null" \
>/dev/null 2>&1; true
-@if [ -s ll_tmp ]; then \
cat ll_tmp >> longlong.h; \
else \
echo '#undef HAVE_64BIT_LONG_LONG /* no */' >> longlong.h; \
fi
@echo '' >> longlong.h
@echo '/*' >> longlong.h
@echo ' * NO64BIT_LONG_LONG undef HAVE_64BIT_LONG_LONG' >> longlong.h
@echo ' */' >> longlong.h
@echo '#if defined(NO64BIT_LONG_LONG)' >> longlong.h
@echo '#undef HAVE_64BIT_LONG_LONG' >> longlong.h
@echo '#endif /* NO64BIT_LONG_LONG */' >> longlong.h
@echo '' >> longlong.h
@echo '#endif /* !__LONGLONG_H__ */' >> longlong.h
-@rm -f have_ulong64 have_ulong64.o ll_tmp
@echo 'longlong.h formed'
# utilities
#
install: all
-@if [ -d "${DESTBIN}" ]; then \
echo " mkdir -p ${DESTBIN}"; \
mkdir -p ${DESTBIN}; \
fi
-@if [ -d "${DESTLIB}" ]; then \
echo " mkdir -p ${DESTLIB}"; \
mkdir -p ${DESTLIB}; \
fi
-@if [ -d "${DESTINC}" ]; then \
echo " mkdir -p ${DESTINC}"; \
mkdir -p ${DESTINC}; \
fi
${INSTALL} -m 0755 ${PROGS} ${DESTBIN}
${INSTALL} -m 0644 ${LIBS} ${DESTLIB}
${RANLIB} ${DESTLIB}/libfnv.a
${INSTALL} -m 0644 ${HSRC} ${DESTINC}
@# remove osolete programs
for i in ${OBSOLETE_PROGS}; do \
if [ -f "${DESTBIN}/$$i" ]; then \
echo "rm -f ${DESTBIN}/$$i"; \
rm -f "${DESTBIN}/$$i"; \
fi; \
done
clean:
-rm -f have_ulong64 have_ulong64.o ll_tmp ll_tmp2 longlong.h
-rm -f ${LIBOBJ}
-rm -f ${OTHEROBJ}
clobber: clean
-rm -f ${TARGETS}
-rm -f ${OBSOLETE_PROGS} lltmp lltmp2 ll_tmp
-rm -f ${NO64BIT_SRC}
-rm -f ${NO64BIT_OBJ}
-rm -f ${NO64BIT_PROGS}
-rm -f vector.c
check: ${PROGS}
@echo -n "FNV-0 32 bit tests: "
@./fnv032 -t 1 -v
@echo -n "FNV-1 32 bit tests: "
@./fnv132 -t 1 -v
@echo -n "FNV-1a 32 bit tests: "
@./fnv1a32 -t 1 -v
@echo -n "FNV-0 64 bit tests: "
@./fnv064 -t 1 -v
@echo -n "FNV-1 64 bit tests: "
@./fnv164 -t 1 -v
@echo -n "FNV-1a 64 bit tests: "
@./fnv1a64 -t 1 -v
###############################
# generate test vector source #
###############################
no64bit_fnv64.c: fnv64.c
-rm -f $@
-cp -f $? $@
no64bit_hash_64.c: hash_64.c
-rm -f $@
-cp -f $? $@
no64bit_hash_64a.c: hash_64a.c
-rm -f $@
-cp -f $? $@
no64bit_test_fnv.c: test_fnv.c
-rm -f $@
-cp -f $? $@
no64bit_fnv64.o: no64bit_fnv64.c longlong.h fnv.h
${CC} ${CFLAGS} -DNO64BIT_LONG_LONG no64bit_fnv64.c -c
no64bit_hash_64.o: no64bit_hash_64.c longlong.h fnv.h
${CC} ${CFLAGS} -DNO64BIT_LONG_LONG no64bit_hash_64.c -c
no64bit_hash_64a.o: no64bit_hash_64a.c longlong.h fnv.h
${CC} ${CFLAGS} -DNO64BIT_LONG_LONG no64bit_hash_64a.c -c
no64bit_test_fnv.o: no64bit_test_fnv.c longlong.h fnv.h
${CC} ${CFLAGS} -DNO64BIT_LONG_LONG no64bit_test_fnv.c -c
no64bit_fnv064: no64bit_fnv64.o no64bit_hash_64.o \
no64bit_hash_64a.o no64bit_test_fnv.o
${CC} ${CFLAGS} no64bit_fnv64.o no64bit_hash_64.o \
no64bit_hash_64a.o no64bit_test_fnv.o -o $@
no64bit_fnv164: no64bit_fnv064
-rm -f $@
-cp -f $? $@
no64bit_fnv1a64: no64bit_fnv064
-rm -f $@
-cp -f $? $@
vector.c: ${PROGS} ${NO64BIT_PROGS}
-rm -f $@
echo '/* start of output generated by make $@ */' >> $@
echo '' >> $@
#@
echo '/* FNV-0 32 bit test vectors */' >> $@
./fnv032 -t 0 >> $@
echo '' >> $@
#@
echo '/* FNV-1 32 bit test vectors */' >> $@
./fnv132 -t 0 >> $@
echo '' >> $@
#@
echo '/* FNV-1a 32 bit test vectors */' >> $@
./fnv1a32 -t 0 >> $@
echo '' >> $@
#@
echo '/* FNV-0 64 bit test vectors */' >> $@
echo '#if defined(HAVE_64BIT_LONG_LONG)' >> $@
./fnv064 -t 0 >> $@
echo '#else /* HAVE_64BIT_LONG_LONG */' >> $@
./no64bit_fnv064 -t 0 >> $@
echo '#endif /* HAVE_64BIT_LONG_LONG */' >> $@
echo '' >> $@
#@
echo '/* FNV-1 64 bit test vectors */' >> $@
echo '#if defined(HAVE_64BIT_LONG_LONG)' >> $@
./fnv164 -t 0 >> $@
echo '#else /* HAVE_64BIT_LONG_LONG */' >> $@
./no64bit_fnv164 -t 0 >> $@
echo '#endif /* HAVE_64BIT_LONG_LONG */' >> $@
echo '' >> $@
#@
echo '/* FNV-1a 64 bit test vectors */' >> $@
echo '#if defined(HAVE_64BIT_LONG_LONG)' >> $@
./fnv1a64 -t 0 >> $@
echo '#else /* HAVE_64BIT_LONG_LONG */' >> $@
./no64bit_fnv1a64 -t 0 >> $@
echo '#endif /* HAVE_64BIT_LONG_LONG */' >> $@
echo '' >> $@
#@
echo '/* end of output generated by make $@ */' >> $@

+ 158
- 0
lib/fnv/README View File

@ -0,0 +1,158 @@
#=====================#
# Fowler/Noll/Vo hash #
#=====================#
The basis of this hash algorithm was taken from an idea sent
as reviewer comments to the IEEE POSIX P1003.2 committee by:
Phong Vo (http://www.research.att.com/info/kpv)
Glenn Fowler (http://www.research.att.com/~gsf/)
In a subsequent ballot round:
Landon Curt Noll (http://www.isthe.com/chongo)
improved on their algorithm. Some people tried this hash
and found that it worked rather well. In an EMail message
to Landon, they named it the ``Fowler/Noll/Vo'' or FNV hash.
FNV hashes are designed to be fast while maintaining a low
collision rate. The FNV speed allows one to quickly hash lots
of data while maintaining a reasonable collision rate. See:
http://www.isthe.com/chongo/tech/comp/fnv/index.html
for more details as well as other forms of the FNV hash.
Comments, questions, bug fixes and suggestions welcome at
the address given in the above URL.
#==================#
# FNV hash utility #
#==================#
Two hash utilities (32 bit and 64 bit) are provided:
fnv032 [-b bcnt] [-m] [-s arg] [-t code] [-v] [arg ...]
fnv132 [-b bcnt] [-m] [-s arg] [-t code] [-v] [arg ...]
fnv1a32 [-b bcnt] [-m] [-s arg] [-t code] [-v] [arg ...]
fnv064 [-b bcnt] [-m] [-s arg] [-t code] [-v] [arg ...]
fnv164 [-b bcnt] [-m] [-s arg] [-t code] [-v] [arg ...]
fnv1a64 [-b bcnt] [-m] [-s arg] [-t code] [-v] [arg ...]
-b bcnt mask off all but the lower bcnt bits (default: 32)
-m multiple hashes, one per line for each arg
-s hash arg as a string (ignoring terminating NUL bytes)
-t code 0 ==> generate test vectors, 1 ==> test FNV hash
-v verbose mode, print arg after hash (implies -m)
arg string (if -s was given) or filename (default stdin)
The fnv032, fnv064 implement the historic FNV-0 hash.
The fnv132, fnv164 implement the recommended FNV-1 hash.
The fnv1a32, fnv1a64 implement the recommended FNV-1a hash.
This is the original historic FNV algorithm with a 0 offset basis.
It is recommended that FNV-1, with a non-0 offset basis be used instead.
To test FNV hashes, try:
fnv032 -t 1 -v
fnv132 -t 1 -v
fnv1a32 -t 1 -v
fnv064 -t 1 -v
fnv164 -t 1 -v
fnv1a64 -t 1 -v
If you are compiling, try:
make check
#==================#
# FNV hash library #
#==================#
The libfnv.a library implements both a 32 bit and a 64 bit FNV hash
on collections of bytes, a NUL terminated strings or on an open file
descriptor.
Here is the 32 bit FNV 1 hash:
Fnv32_t fnv_32_buf(void *buf, int len, Fnv32_t hval); /* byte buf */
Fnv32_t fnv_32_str(char *string, Fnv32_t hval); /* string */
Here is the 32 bit FNV 1a hash:
Fnv32_t fnv_32a_buf(void *buf, int len, Fnv32_t hval); /* byte buf */
Fnv32_t fnv_32a_str(char *string, Fnv32_t hval); /* string */
Here is the 64 bit FNV 1 hash:
Fnv64_t fnv_64_buf(void *buf, int len, Fnv64_t hval); /* byte buf */
Fnv64_t fnv_64_str(char *string, Fnv64_t hval); /* string */
Here is the 64 bit FNV 1a hash:
Fnv64_t fnv_64a_buf(void *buf, int len, Fnv64_t hval); /* byte buf */
Fnv64_t fnv_64a_str(char *string, Fnv64_t hval); /* string */
On the first call to a hash function, one must supply the initial basis
that is appropriate for the hash in question:
FNV-0: (not recommended)
FNV0_32_INIT /* 32 bit FNV-0 initial basis */
FNV0_64_INIT /* 64 bit FNV-0 initial basis */
FNV-1:
FNV1_32_INIT /* 32 bit FNV-1 initial basis */
FNV1_64_INIT /* 64 bit FNV-1 initial basis */
FNV-1a:
FNV1A_32_INIT /* 32 bit FNV-1a initial basis */
FNV1A_64_INIT /* 64 bit FNV-1a initial basis */
For example to perform a 64 bit FNV-1 hash:
#include "fnv.h"
Fnv64_t hash_val;
hash_val = fnv_64_str("a string", FNV1_64_INIT);
hash_val = fnv_64_str("more string", hash_val);
produces the same final hash value as:
hash_val = fnv_64_str("a stringmore string", FNV1_64_INIT);
NOTE: If one used 'FNV0_64_INIT' instead of 'FNV1_64_INIT' one would get the
historic FNV-0 hash instead recommended FNV-1 hash.
To perform a 32 bit FNV-1 hash:
#include "fnv.h"
Fnv32_t hash_val;
hash_val = fnv_32_buf(buf, length_of_buf, FNV1_32_INIT);
hash_val = fnv_32_str("more data", hash_val);
To perform a 64 bit FNV-1a hash:
#include "fnv.h"
Fnv64_t hash_val;
hash_val = fnv_64a_buf(buf, length_of_buf, FNV1_64_INIT);
hash_val = fnv_64a_str("more data", hash_val);
=-=
chongo <Landon Curt Noll> /\oo/\
http://www.isthe.com/chongo
Share and Enjoy!

+ 249
- 0
lib/fnv/fnv.h View File

@ -0,0 +1,249 @@
/*
* fnv - Fowler/Noll/Vo- hash code
*
* @(#) $Revision: 5.4 $
* @(#) $Id: fnv.h,v 5.4 2009/07/30 22:49:13 chongo Exp $
* @(#) $Source: /usr/local/src/cmd/fnv/RCS/fnv.h,v $
*
***
*
* Fowler/Noll/Vo- hash
*
* The basis of this hash algorithm was taken from an idea sent
* as reviewer comments to the IEEE POSIX P1003.2 committee by:
*
* Phong Vo (http://www.research.att.com/info/kpv/)
* Glenn Fowler (http://www.research.att.com/~gsf/)
*
* In a subsequent ballot round:
*
* Landon Curt Noll (http://www.isthe.com/chongo/)
*
* improved on their algorithm. Some people tried this hash
* and found that it worked rather well. In an EMail message
* to Landon, they named it the ``Fowler/Noll/Vo'' or FNV hash.
*
* FNV hashes are designed to be fast while maintaining a low
* collision rate. The FNV speed allows one to quickly hash lots
* of data while maintaining a reasonable collision rate. See:
*
* http://www.isthe.com/chongo/tech/comp/fnv/index.html
*
* for more details as well as other forms of the FNV hash.
*
***
*
* NOTE: The FNV-0 historic hash is not recommended. One should use
* the FNV-1 hash instead.
*
* To use the 32 bit FNV-0 historic hash, pass FNV0_32_INIT as the
* Fnv32_t hashval argument to fnv_32_buf() or fnv_32_str().
*
* To use the 64 bit FNV-0 historic hash, pass FNV0_64_INIT as the
* Fnv64_t hashval argument to fnv_64_buf() or fnv_64_str().
*
* To use the recommended 32 bit FNV-1 hash, pass FNV1_32_INIT as the
* Fnv32_t hashval argument to fnv_32_buf() or fnv_32_str().
*
* To use the recommended 64 bit FNV-1 hash, pass FNV1_64_INIT as the
* Fnv64_t hashval argument to fnv_64_buf() or fnv_64_str().
*
* To use the recommended 32 bit FNV-1a hash, pass FNV1_32A_INIT as the
* Fnv32_t hashval argument to fnv_32a_buf() or fnv_32a_str().
*
* To use the recommended 64 bit FNV-1a hash, pass FNV1A_64_INIT as the
* Fnv64_t hashval argument to fnv_64a_buf() or fnv_64a_str().
*
***
*
* Please do not copyright this code. This code is in the public domain.
*
* LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
* EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* By:
* chongo <Landon Curt Noll> /\oo/\
* http://www.isthe.com/chongo/
*
* Share and Enjoy! :-)
*/
#if !defined(__FNV_H__)
#define __FNV_H__
#include <sys/types.h>
#define FNV_VERSION "5.0.2" /* @(#) FNV Version */
/*
* 32 bit FNV-0 hash type
*/
typedef u_int32_t Fnv32_t;
/*
* 32 bit FNV-0 zero initial basis
*
* This historic hash is not recommended. One should use
* the FNV-1 hash and initial basis instead.
*/
#define FNV0_32_INIT ((Fnv32_t)0)
/*
* 32 bit FNV-1 and FNV-1a non-zero initial basis
*
* The FNV-1 initial basis is the FNV-0 hash of the following 32 octets:
*
* chongo <Landon Curt Noll> /\../\
*
* NOTE: The \'s above are not back-slashing escape characters.
* They are literal ASCII backslash 0x5c characters.
*
* NOTE: The FNV-1a initial basis is the same value as FNV-1 by definition.
*/
#define FNV1_32_INIT ((Fnv32_t)0x811c9dc5)
#define FNV1_32A_INIT FNV1_32_INIT
/*
* determine how 64 bit unsigned values are represented
*/
#include "longlong.h"
/*
* 64 bit FNV-0 hash
*/
#if defined(HAVE_64BIT_LONG_LONG)
typedef u_int64_t Fnv64_t;
#else /* HAVE_64BIT_LONG_LONG */
typedef struct {
u_int32_t w32[2]; /* w32[0] is low order, w32[1] is high order word */
} Fnv64_t;
#endif /* HAVE_64BIT_LONG_LONG */
/*
* 64 bit FNV-0 zero initial basis
*
* This historic hash is not recommended. One should use
* the FNV-1 hash and initial basis instead.
*/
#if defined(HAVE_64BIT_LONG_LONG)
#define FNV0_64_INIT ((Fnv64_t)0)
#else /* HAVE_64BIT_LONG_LONG */
extern const Fnv64_t fnv0_64_init;
#define FNV0_64_INIT (fnv0_64_init)
#endif /* HAVE_64BIT_LONG_LONG */
/*
* 64 bit FNV-1 non-zero initial basis
*
* The FNV-1 initial basis is the FNV-0 hash of the following 32 octets:
*
* chongo <Landon Curt Noll> /\../\
*
* NOTE: The \'s above are not back-slashing escape characters.
* They are literal ASCII backslash 0x5c characters.
*
* NOTE: The FNV-1a initial basis is the same value as FNV-1 by definition.
*/
#if defined(HAVE_64BIT_LONG_LONG)
#define FNV1_64_INIT ((Fnv64_t)0xcbf29ce484222325ULL)
#define FNV1A_64_INIT FNV1_64_INIT
#else /* HAVE_64BIT_LONG_LONG */
extern const fnv1_64_init;
extern const Fnv64_t fnv1a_64_init;
#define FNV1_64_INIT (fnv1_64_init)
#define FNV1A_64_INIT (fnv1a_64_init)
#endif /* HAVE_64BIT_LONG_LONG */
/*
* hash types
*/
enum fnv_type {
FNV_NONE = 0, /* invalid FNV hash type */
FNV0_32 = 1, /* FNV-0 32 bit hash */
FNV1_32 = 2, /* FNV-1 32 bit hash */
FNV1a_32 = 3, /* FNV-1a 32 bit hash */
FNV0_64 = 4, /* FNV-0 64 bit hash */
FNV1_64 = 5, /* FNV-1 64 bit hash */
FNV1a_64 = 6, /* FNV-1a 64 bit hash */
};
/*
* these test vectors are used as part o the FNV test suite
*/
struct test_vector {
void *buf; /* start of test vector buffer */
int len; /* length of test vector */
};
struct fnv0_32_test_vector {
struct test_vector *test; /* test vector buffer to hash */
Fnv32_t fnv0_32; /* expected FNV-0 32 bit hash value */
};
struct fnv1_32_test_vector {
struct test_vector *test; /* test vector buffer to hash */
Fnv32_t fnv1_32; /* expected FNV-1 32 bit hash value */
};
struct fnv1a_32_test_vector {
struct test_vector *test; /* test vector buffer to hash */
Fnv32_t fnv1a_32; /* expected FNV-1a 32 bit hash value */
};
struct fnv0_64_test_vector {
struct test_vector *test; /* test vector buffer to hash */
Fnv64_t fnv0_64; /* expected FNV-0 64 bit hash value */
};
struct fnv1_64_test_vector {
struct test_vector *test; /* test vector buffer to hash */
Fnv64_t fnv1_64; /* expected FNV-1 64 bit hash value */
};
struct fnv1a_64_test_vector {
struct test_vector *test; /* test vector buffer to hash */
Fnv64_t fnv1a_64; /* expected FNV-1a 64 bit hash value */
};
/*
* external functions
*/
/* hash_32.c */
extern Fnv32_t fnv_32_buf(void *buf, size_t len, Fnv32_t hashval);
extern Fnv32_t fnv_32_str(char *buf, Fnv32_t hashval);
/* hash_32a.c */
extern Fnv32_t fnv_32a_buf(void *buf, size_t len, Fnv32_t hashval);
extern Fnv32_t fnv_32a_str(char *buf, Fnv32_t hashval);
/* hash_64.c */
extern Fnv64_t fnv_64_buf(void *buf, size_t len, Fnv64_t hashval);
extern Fnv64_t fnv_64_str(char *buf, Fnv64_t hashval);
/* hash_64a.c */
extern Fnv64_t fnv_64a_buf(void *buf, size_t len, Fnv64_t hashval);
extern Fnv64_t fnv_64a_str(char *buf, Fnv64_t hashval);
/* test_fnv.c */
extern struct test_vector fnv_test_str[];
extern struct fnv0_32_test_vector fnv0_32_vector[];
extern struct fnv1_32_test_vector fnv1_32_vector[];
extern struct fnv1a_32_test_vector fnv1a_32_vector[];
extern struct fnv0_64_test_vector fnv0_64_vector[];
extern struct fnv1_64_test_vector fnv1_64_vector[];
extern struct fnv1a_64_test_vector fnv1a_64_vector[];
extern void unknown_hash_type(char *prog, enum fnv_type type, int code);
extern void print_fnv32(Fnv32_t hval, Fnv32_t mask, int verbose, char *arg);
extern void print_fnv64(Fnv64_t hval, Fnv64_t mask, int verbose, char *arg);
#endif /* __FNV_H__ */

+ 467
- 0
lib/fnv/fnv32.c View File

@ -0,0 +1,467 @@
/*
* fnv32 - 32 bit Fowler/Noll/Vo hash of a buffer or string
*
* @(#) $Revision: 5.5 $
* @(#) $Id: fnv32.c,v 5.5 2012/03/21 01:38:12 chongo Exp $
* @(#) $Source: /usr/local/src/cmd/fnv/RCS/fnv32.c,v $
*
***
*
* Fowler/Noll/Vo hash
*
* The basis of this hash algorithm was taken from an idea sent
* as reviewer comments to the IEEE POSIX P1003.2 committee by:
*
* Phong Vo (http://www.research.att.com/info/kpv/)
* Glenn Fowler (http://www.research.att.com/~gsf/)
*
* In a subsequent ballot round:
*
* Landon Curt Noll (http://www.isthe.com/chongo/)
*
* improved on their algorithm. Some people tried this hash
* and found that it worked rather well. In an EMail message
* to Landon, they named it the ``Fowler/Noll/Vo'' or FNV hash.
*
* FNV hashes are designed to be fast while maintaining a low
* collision rate. The FNV speed allows one to quickly hash lots
* of data while maintaining a reasonable collision rate. See:
*
* http://www.isthe.com/chongo/tech/comp/fnv/index.html
*
* for more details as well as other forms of the FNV hash.
*
***
*
* Please do not copyright this code. This code is in the public domain.
*
* LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
* EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* By:
* chongo <Landon Curt Noll> /\oo/\
* http://www.isthe.com/chongo/
*
* Share and Enjoy! :-)
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
#include "longlong.h"
#include "fnv.h"
#define WIDTH 32 /* bit width of hash */
#define BUF_SIZE (32*1024) /* number of bytes to hash at a time */
static char *usage =
"usage: %s [-b bcnt] [-m] [-s arg] [-t code] [-v] [arg ...]\n"
"\n"
"\t-b bcnt\tmask off all but the lower bcnt bits (default 32)\n"
"\t-m\tmultiple hashes, one per line for each arg\n"
"\t-s\thash arg as a string (ignoring terminating NUL bytes)\n"
"\t-t code\t test hash code: (0 ==> generate test vectors\n"
"\t\t\t\t 1 ==> validate against FNV test vectors)\n"
"\t-v\tverbose mode, print arg after hash (implies -m)\n"
"\targ\tstring (if -s was given) or filename (default stdin)\n"
"\n"
"\tNOTE: Programs that begin with fnv0 implement the FNV-0 hash.\n"
"\t The FNV-0 hash is historic FNV algorithm that is now deprecated.\n"
"\n"
"\tSee http://www.isthe.com/chongo/tech/comp/fnv/index.html for more info.\n"
"\n"
"\t@(#) FNV Version: %s\n";
static char *program; /* our name */
/*
* test_fnv32 - test the FNV32 hash
*
* given:
* hash_type type of FNV hash to test
* init_hval initial hash value
* mask lower bit mask
* v_flag 1 => print test failure info on stderr
* code 0 ==> generate FNV test vectors
* 1 ==> validate against FNV test vectors
*
* returns: 0 ==> OK, else test vector failure number
*/
static int
test_fnv32(enum fnv_type hash_type, Fnv32_t init_hval,
Fnv32_t mask, int v_flag, int code)
{
struct test_vector *t; /* FNV test vestor */
Fnv32_t hval; /* current hash value */
int tstnum; /* test vector that failed, starting at 1 */
/*
* print preamble if generating test vectors
*/
if (code == 0) {
switch (hash_type) {
case FNV0_32:
printf("struct fnv0_32_test_vector fnv0_32_vector[] = {\n");
break;
case FNV1_32:
printf("struct fnv1_32_test_vector fnv1_32_vector[] = {\n");
break;
case FNV1a_32:
printf("struct fnv1a_32_test_vector fnv1a_32_vector[] = {\n");
break;
default:
unknown_hash_type(program, hash_type, 12); /* exit(12) */
/*NOTREACHED*/
}
}
/*
* loop thru all test vectors
*/
for (t = fnv_test_str, tstnum = 1; t->buf != NULL; ++t, ++tstnum) {
/*
* compute the FNV hash
*/
hval = init_hval;
switch (hash_type) {
case FNV0_32:
case FNV1_32:
hval = fnv_32_buf(t->buf, t->len, hval);
break;
case FNV1a_32:
hval = fnv_32a_buf(t->buf, t->len, hval);
break;
default:
unknown_hash_type(program, hash_type, 13); /* exit(13) */
/*NOTREACHED*/
}
/*
* print the vector
*/
switch (code) {
case 0: /* generate the test vector */
printf(" { &fnv_test_str[%d], (Fnv32_t) 0x%08lxUL },\n",
tstnum-1, hval & mask);
break;
case 1: /* validate against test vector */
switch (hash_type) {
case FNV0_32:
if ((hval&mask) != (fnv0_32_vector[tstnum-1].fnv0_32 & mask)) {
if (v_flag) {
fprintf(stderr, "%s: failed fnv0_32 test # %d\n",
program, tstnum);
fprintf(stderr, "%s: test # 1 is 1st test\n", program);
fprintf(stderr,
"%s: expected 0x%08lx != generated: 0x%08lx\n",
program, (hval&mask),
(fnv0_32_vector[tstnum-1].fnv0_32 & mask));
}
return tstnum;
}
break;
case FNV1_32:
if ((hval&mask) != (fnv1_32_vector[tstnum-1].fnv1_32 & mask)) {
if (v_flag) {
fprintf(stderr, "%s: failed fnv1_32 test # %d\n",
program, tstnum);
fprintf(stderr, "%s: test # 1 is 1st test\n", program);
fprintf(stderr,
"%s: expected 0x%08lx != generated: 0x%08lx\n",
program, (hval&mask),
(fnv1_32_vector[tstnum-1].fnv1_32 & mask));
}
return tstnum;
}
break;
case FNV1a_32:
if ((hval&mask) != (fnv1a_32_vector[tstnum-1].fnv1a_32 &mask)) {
if (v_flag) {
fprintf(stderr, "%s: failed fnv1a_32 test # %d\n",
program, tstnum);
fprintf(stderr, "%s: test # 1 is 1st test\n", program);
fprintf(stderr,
"%s: expected 0x%08lx != generated: 0x%08lx\n",
program, (hval&mask),
(fnv1a_32_vector[tstnum-1].fnv1a_32 & mask));
}
return tstnum;
}
break;
}
break;
default:
fprintf(stderr, "%s: -m %d not implemented yet\n", program, code);
exit(14);
}
}
/*
* print completion if generating test vectors
*/
if (code == 0) {
printf(" { NULL, 0 }\n");
printf("};\n");
}
/*
* no failures, return code 0 ==> all OK
*/
return 0;
}
/*
* main - the main function
*
* See the above usage for details.
*/
int
main(int argc, char *argv[])
{
char buf[BUF_SIZE+1]; /* read buffer */
int readcnt; /* number of characters written */
Fnv32_t hval; /* current hash value */
int s_flag = 0; /* 1 => -s was given, hash args as strings */
int m_flag = 0; /* 1 => print multiple hashes, one per arg */
int v_flag = 0; /* 1 => verbose hash print */
int b_flag = WIDTH; /* -b flag value */
int t_flag = -1; /* FNV test vector code (0=>print, 1=>test) */
enum fnv_type hash_type = FNV_NONE; /* type of FNV hash to perform */
Fnv32_t bmask; /* mask to apply to output */
extern char *optarg; /* option argument */
extern int optind; /* argv index of the next arg */
int fd; /* open file to process */
char *p;
int i;
/*
* parse args
*/
program = argv[0];
while ((i = getopt(argc, argv, "b:mst:v")) != -1) {
switch (i) {
case 'b': /* bcnt bit mask count */
b_flag = atoi(optarg);
break;
case 'm': /* print multiple hashes, one per arg */
m_flag = 1;
break;
case 's': /* hash args as strings */
s_flag = 1;
break;
case 't': /* FNV test vector code */
t_flag = atoi(optarg);
if (t_flag < 0 || t_flag > 1) {
fprintf(stderr, "%s: -t code must be 0 or 1\n", program);
fprintf(stderr, usage, program, FNV_VERSION);
exit(1);
}
m_flag = 1;
break;
case 'v': /* verbose hash print */
m_flag = 1;
v_flag = 1;
break;
default:
fprintf(stderr, usage, program, FNV_VERSION);
exit(1);
}
}
/* -t code incompatible with -b, -m and args */
if (t_flag >= 0) {
if (b_flag != WIDTH) {
fprintf(stderr, "%s: -t code incompatible with -b\n", program);
exit(2);
}
if (s_flag != 0) {
fprintf(stderr, "%s: -t code incompatible with -s\n", program);
exit(3);
}
if (optind < argc) {
fprintf(stderr, "%s: -t code incompatible args\n", program);
exit(4);
}
}
/* -s requires at least 1 arg */
if (s_flag && optind >= argc) {
fprintf(stderr, usage, program, FNV_VERSION);
exit(5);
}
/* limit -b values */
if (b_flag < 0 || b_flag > WIDTH) {
fprintf(stderr, "%s: -b bcnt: %d must be >= 0 and < %d\n",
program, b_flag, WIDTH);
exit(6);
}
if (b_flag == WIDTH) {
bmask = (Fnv32_t)0xffffffff;
} else {
bmask = (Fnv32_t)((1 << b_flag) - 1);
}
/*
* start with the initial basis depending on the hash type
*/
p = strrchr(program, '/');
if (p == NULL) {
p = program;
} else {
++p;
}
if (strcmp(p, "fnv032") == 0) {
/* using non-recommended FNV-0 and zero initial basis */
hval = FNV0_32_INIT;
hash_type = FNV0_32;
} else if (strcmp(p, "fnv132") == 0) {
/* using FNV-1 and non-zero initial basis */
hval = FNV1_32_INIT;
hash_type = FNV1_32;
} else if (strcmp(p, "fnv1a32") == 0) {
/* start with the FNV-1a initial basis */
hval = FNV1_32A_INIT;
hash_type = FNV1a_32;
} else {
fprintf(stderr, "%s: unknown program name, unknown hash type\n",
program);
exit(7);
}
/*
* FNV test vector processing, if needed
*/
if (t_flag >= 0) {
int code; /* test vector that failed, starting at 1 */
/*
* perform all tests
*/
code = test_fnv32(hash_type, hval, bmask, v_flag, t_flag);
/*
* evaluate the tests
*/
if (code == 0) {
if (v_flag) {
printf("passed\n");
}
exit(0);
} else {
printf("failed vector (1 is 1st test): %d\n", code);
exit(8);
}
}
/*
* string hashing
*/
if (s_flag) {
/* hash any other strings */
for (i=optind; i < argc; ++i) {
switch (hash_type) {
case FNV0_32:
case FNV1_32:
hval = fnv_32_str(argv[i], hval);
break;
case FNV1a_32:
hval = fnv_32a_str(argv[i], hval);
break;
default:
unknown_hash_type(program, hash_type, 9); /* exit(9) */
/*NOTREACHED*/
}
if (m_flag) {
print_fnv32(hval, bmask, v_flag, argv[i]);
}
}
/*
* file hashing
*/
} else {
/*
* case: process only stdin
*/
if (optind >= argc) {
/* case: process only stdin */
while ((readcnt = read(0, buf, BUF_SIZE)) > 0) {
switch (hash_type) {
case FNV0_32:
case FNV1_32:
hval = fnv_32_buf(buf, readcnt, hval);
break;
case FNV1a_32:
hval = fnv_32a_buf(buf, readcnt, hval);
break;
default:
unknown_hash_type(program, hash_type, 10); /* exit(10) */
/*NOTREACHED*/
}
}
if (m_flag) {
print_fnv32(hval, bmask, v_flag, "(stdin)");
}
} else {
/*
* process any other files
*/
for (i=optind; i < argc; ++i) {
/* open the file */
fd = open(argv[i], O_RDONLY);
if (fd < 0) {
fprintf(stderr, "%s: unable to open file: %s\n",
program, argv[i]);
exit(4);
}
/* hash the file */
while ((readcnt = read(fd, buf, BUF_SIZE)) > 0) {
switch (hash_type) {
case FNV0_32:
case FNV1_32:
hval = fnv_32_buf(buf, readcnt, hval);
break;
case FNV1a_32:
hval = fnv_32a_buf(buf, readcnt, hval);
break;
default:
unknown_hash_type(program, hash_type, 11);/* exit(11) */
/*NOTREACHED*/
}
}
/* finish processing the file */
if (m_flag) {
print_fnv32(hval, bmask, v_flag, argv[i]);
}
close(fd);
}
}
}
/*
* report hash and exit
*/
if (!m_flag) {
print_fnv32(hval, bmask, v_flag, "");
}
return 0; /* exit(0); */
}

+ 591
- 0
lib/fnv/fnv64.c View File

@ -0,0 +1,591 @@
/*
* fnv_64 - 64 bit Fowler/Noll/Vo hash of a buffer or string
*
* @(#) $Revision: 5.5 $
* @(#) $Id: fnv64.c,v 5.5 2012/03/21 01:38:12 chongo Exp $
* @(#) $Source: /usr/local/src/cmd/fnv/RCS/fnv64.c,v $
*
***
*
* Fowler/Noll/Vo hash
*
* The basis of this hash algorithm was taken from an idea sent
* as reviewer comments to the IEEE POSIX P1003.2 committee by:
*
* Phong Vo (http://www.research.att.com/info/kpv/)
* Glenn Fowler (http://www.research.att.com/~gsf/)
*
* In a subsequent ballot round:
*
* Landon Curt Noll (http://www.isthe.com/chongo/)
*
* improved on their algorithm. Some people tried this hash
* and found that it worked rather well. In an EMail message
* to Landon, they named it the ``Fowler/Noll/Vo'' or FNV hash.
*
* FNV hashes are designed to be fast while maintaining a low
* collision rate. The FNV speed allows one to quickly hash lots
* of data while maintaining a reasonable collision rate. See:
*
* http://www.isthe.com/chongo/tech/comp/fnv/index.html
*
* for more details as well as other forms of the FNV hash.
*
***
*
* Please do not copyright this code. This code is in the public domain.
*
* LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
* EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* By:
* chongo <Landon Curt Noll> /\oo/\
* http://www.isthe.com/chongo/
*
* Share and Enjoy! :-)
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
#include "longlong.h"
#include "fnv.h"
#define WIDTH 64 /* bit width of hash */
#define BUF_SIZE (32*1024) /* number of bytes to hash at a time */
static char *usage =
"usage: %s [-b bcnt] [-m] [-s arg] [-t code] [-v] [arg ...]\n"
"\n"
"\t-b bcnt\tmask off all but the lower bcnt bits (default 64)\n"
"\t-m\tmultiple hashes, one per line for each arg\n"
"\t-s\thash arg as a string (ignoring terminating NUL bytes)\n"
"\t-t code\t test hash code: (0 ==> generate test vectors\n"
"\t\t\t\t 1 ==> validate against FNV test vectors)\n"
"\t-v\tverbose mode, print arg after hash (implies -m)\n"
"\targ\tstring (if -s was given) or filename (default stdin)\n"
"\n"
"\tNOTE: Programs that begin with fnv0 implement the FNV-0 hash.\n"
"\t The FNV-0 hash is historic FNV algorithm that is now deprecated.\n"
"\n"
"\tSee http://www.isthe.com/chongo/tech/comp/fnv/index.html for more info.\n"
"\n"
"\t@(#) FNV Version: %s\n";
static char *program; /* our name */
/*
* test_fnv64 - test the FNV64 hash
*
* given:
* hash_type type of FNV hash to test
* init_hval initial hash value
* mask lower bit mask
* v_flag 1 => print test failure info on stderr
* code 0 ==> generate FNV test vectors
* 1 ==> validate against FNV test vectors
*
* returns: 0 ==> OK, else test vector failure number
*/
static int
test_fnv64(enum fnv_type hash_type, Fnv64_t init_hval,
Fnv64_t mask, int v_flag, int code)
{
struct test_vector *t; /* FNV test vestor */
Fnv64_t hval; /* current hash value */
int tstnum; /* test vector that failed, starting at 1 */
/*
* print preamble if generating test vectors
*/
if (code == 0) {
switch (hash_type) {
case FNV0_64:
printf("struct fnv0_64_test_vector fnv0_64_vector[] = {\n");
break;
case FNV1_64:
printf("struct fnv1_64_test_vector fnv1_64_vector[] = {\n");
break;
case FNV1a_64:
printf("struct fnv1a_64_test_vector fnv1a_64_vector[] = {\n");
break;
default:
unknown_hash_type(program, hash_type, 12); /* exit(12) */
/*NOTREACHED*/
}
}
/*
* loop thru all test vectors
*/
for (t = fnv_test_str, tstnum = 1; t->buf != NULL; ++t, ++tstnum) {
/*
* compute the FNV hash
*/
hval = init_hval;
switch (hash_type) {
case FNV0_64:
case FNV1_64:
hval = fnv_64_buf(t->buf, t->len, hval);
break;
case FNV1a_64:
hval = fnv_64a_buf(t->buf, t->len, hval);
break;
default:
unknown_hash_type(program, hash_type, 13); /* exit(13) */
/*NOTREACHED*/
}
/*
* print the vector
*/
#if defined(HAVE_64BIT_LONG_LONG)
/*
* HAVE_64BIT_LONG_LONG testing
*/
switch (code) {
case 0: /* generate the test vector */
printf(" { &fnv_test_str[%d], (Fnv64_t) 0x%016llxULL },\n",
tstnum-1, hval & mask);
break;
case 1: /* validate against test vector */
switch (hash_type) {
case FNV0_64:
if ((hval&mask) != (fnv0_64_vector[tstnum-1].fnv0_64 & mask)) {
if (v_flag) {
fprintf(stderr, "%s: failed fnv0_64 test # %d\n",
program, tstnum);
fprintf(stderr, "%s: test # 1 is 1st test\n", program);
fprintf(stderr,
"%s: expected 0x%016llx != generated: 0x%016llx\n",
program,
(hval&mask),
(fnv0_64_vector[tstnum-1].fnv0_64 & mask));
}
return tstnum;
}
break;
case FNV1_64:
if ((hval&mask) != (fnv1_64_vector[tstnum-1].fnv1_64 & mask)) {
if (v_flag) {
fprintf(stderr, "%s: failed fnv1_64 test # %d\n",
program, tstnum);
fprintf(stderr, "%s: test # 1 is 1st test\n", program);
fprintf(stderr,
"%s: expected 0x%016llx != generated: 0x%016llx\n",
program,
(hval&mask),
(fnv1_64_vector[tstnum-1].fnv1_64 & mask));
}
return tstnum;
}
break;
case FNV1a_64:
if ((hval&mask) != (fnv1a_64_vector[tstnum-1].fnv1a_64 &mask)) {
if (v_flag) {
fprintf(stderr, "%s: failed fnv1a_64 test # %d\n",
program, tstnum);
fprintf(stderr, "%s: test # 1 is 1st test\n", program);
fprintf(stderr,
"%s: expected 0x%016llx != generated: 0x%016llx\n",
program,
(hval&mask),
(fnv1a_64_vector[tstnum-1].fnv1a_64 & mask));
}
return tstnum;
}
break;
}
break;
default:
fprintf(stderr, "%s: -m %d not implemented yet\n", program, code);
exit(14);
}
#else /* HAVE_64BIT_LONG_LONG */
/*
* non HAVE_64BIT_LONG_LONG testing
*/
switch (code) {
case 0: /* generate the test vector */
printf(" { &fnv_test_str[%d], "
"(Fnv64_t) {0x%08lxUL, 0x%08lxUL} },\n",
tstnum-1,
(hval.w32[0] & mask.w32[0]),
(hval.w32[1] & mask.w32[1]));
break;
case 1: /* validate against test vector */
switch (hash_type) {
case FNV0_64:
if (((hval.w32[0] & mask.w32[0]) !=
(fnv0_64_vector[tstnum-1].fnv0_64.w32[0] &
mask.w32[0])) &&
((hval.w32[1] & mask.w32[1]) !=
(fnv0_64_vector[tstnum-1].fnv0_64.w32[1] &
mask.w32[1]))) {
if (v_flag) {
fprintf(stderr, "%s: failed fnv0_64 test # %d\n",
program, tstnum);
fprintf(stderr, "%s: test # 1 is 1st test\n", program);
fprintf(stderr,
"%s: expected 0x%08llx%08llx != "
"generated: 0x%08llx%08llx\n",
program,
(hval.w32[0] & mask.w32[0]),
(hval.w32[1] & mask.w32[1]),
((fnv0_64_vector[tstnum-1].fnv0_64.w32[0] &
mask.w32[0])),
((fnv0_64_vector[tstnum-1].fnv0_64.w32[1] &
mask.w32[1])));
}
return tstnum;
}
break;
case FNV1_64:
if (((hval.w32[0] & mask.w32[0]) !=
(fnv1_64_vector[tstnum-1].fnv1_64.w32[0] &
mask.w32[0])) &&
((hval.w32[1] & mask.w32[1]) !=
(fnv1_64_vector[tstnum-1].fnv1_64.w32[1] &
mask.w32[1]))) {
if (v_flag) {
fprintf(stderr, "%s: failed fnv1_64 test # %d\n",
program, tstnum);
fprintf(stderr, "%s: test # 1 is 1st test\n", program);
fprintf(stderr,
"%s: expected 0x%08llx%08llx != "
"generated: 0x%08llx%08llx\n",
program,
(hval.w32[0] & mask.w32[0]),
(hval.w32[1] & mask.w32[1]),
((fnv1_64_vector[tstnum-1].fnv1_64.w32[0] &
mask.w32[0])),
((fnv1_64_vector[tstnum-1].fnv1_64.w32[1] &
mask.w32[1])));
}
return tstnum;
}
break;
case FNV1a_64:
if (((hval.w32[0] & mask.w32[0]) !=
(fnv1a_64_vector[tstnum-1].fnv1a_64.w32[0] &
mask.w32[0])) &&
((hval.w32[1] & mask.w32[1]) !=
(fnv1a_64_vector[tstnum-1].fnv1a_64.w32[1] &
mask.w32[1]))) {
if (v_flag) {
fprintf(stderr, "%s: failed fnv1a_64 test # %d\n",
program, tstnum);
fprintf(stderr, "%s: test # 1 is 1st test\n", program);
fprintf(stderr,
"%s: expected 0x%08llx%08llx != "
"generated: 0x%08llx%08llx\n",
program,
(hval.w32[0] & mask.w32[0]),
(hval.w32[1] & mask.w32[1]),
((fnv1a_64_vector[tstnum-1].fnv1a_64.w32[0] &
mask.w32[0])),
((fnv1a_64_vector[tstnum-1].fnv1a_64.w32[1] &
mask.w32[1])));
}
return tstnum;
}
break;
}
break;
default:
fprintf(stderr, "%s: -m %d not implemented yet\n", program, code);
exit(15);
}
#endif /* HAVE_64BIT_LONG_LONG */
}
/*
* print completion if generating test vectors
*/
if (code == 0) {
#if defined(HAVE_64BIT_LONG_LONG)
printf(" { NULL, (Fnv64_t) 0 }\n");
#else /* HAVE_64BIT_LONG_LONG */
printf(" { NULL, (Fnv64_t) {0,0} }\n");
#endif /* HAVE_64BIT_LONG_LONG */
printf("};\n");
}
/*
* no failures, return code 0 ==> all OK
*/
return 0;
}
/*
* main - the main function
*
* See the above usage for details.
*/
int
main(int argc, char *argv[])
{
char buf[BUF_SIZE+1]; /* read buffer */
int readcnt; /* number of characters written */
Fnv64_t hval; /* current hash value */
int s_flag = 0; /* 1 => -s was given, hash args as strings */
int m_flag = 0; /* 1 => print multiple hashes, one per arg */
int v_flag = 0; /* 1 => verbose hash print */
int b_flag = WIDTH; /* -b flag value */
int t_flag = -1; /* FNV test vector code (0=>print, 1=>test) */
enum fnv_type hash_type = FNV_NONE; /* type of FNV hash to perform */
Fnv64_t bmask; /* mask to apply to output */
extern char *optarg; /* option argument */
extern int optind; /* argv index of the next arg */
int fd; /* open file to process */
char *p;
int i;
/*
* parse args
*/
program = argv[0];
while ((i = getopt(argc, argv, "b:mst:v")) != -1) {
switch (i) {
case 'b': /* bcnt bit mask count */
b_flag = atoi(optarg);
break;
case 'm': /* print multiple hashes, one per arg */
m_flag = 1;
break;
case 's': /* hash args as strings */
s_flag = 1;
break;
case 't': /* FNV test vector code */
t_flag = atoi(optarg);
if (t_flag < 0 || t_flag > 1) {
fprintf(stderr, "%s: -t code must be 0 or 1\n", program);
fprintf(stderr, usage, program, FNV_VERSION);
exit(1);
}
m_flag = 1;
break;
case 'v': /* verbose hash print */
m_flag = 1;
v_flag = 1;
break;
default:
fprintf(stderr, usage, program, FNV_VERSION);
exit(1);
}
}
/* -t code incompatible with -b, -m and args */
if (t_flag >= 0) {
if (b_flag != WIDTH) {
fprintf(stderr, "%s: -t code incompatible with -b\n", program);
exit(2);
}
if (s_flag != 0) {
fprintf(stderr, "%s: -t code incompatible with -s\n", program);
exit(3);
}
if (optind < argc) {
fprintf(stderr, "%s: -t code incompatible args\n", program);
exit(4);
}
}
/* -s requires at least 1 arg */
if (s_flag && optind >= argc) {
fprintf(stderr, usage, program, FNV_VERSION);
exit(5);
}
/* limit -b values */
if (b_flag < 0 || b_flag > WIDTH) {
fprintf(stderr, "%s: -b bcnt: %d must be >= 0 and < %d\n",
program, b_flag, WIDTH);
exit(6);
}
#if defined(HAVE_64BIT_LONG_LONG)
if (b_flag == WIDTH) {
bmask = (Fnv64_t)0xffffffffffffffffULL;
} else {
bmask = (Fnv64_t)((1ULL << b_flag) - 1ULL);
}
#else /* HAVE_64BIT_LONG_LONG */
if (b_flag == WIDTH) {
bmask.w32[0] = 0xffffffffUL;
bmask.w32[1] = 0xffffffffUL;
} else if (b_flag >= WIDTH/2) {
bmask.w32[0] = 0xffffffffUL;
bmask.w32[1] = ((1UL << (b_flag-(WIDTH/2))) - 1UL);
} else {
bmask.w32[0] = ((1UL << b_flag) - 1UL);
bmask.w32[1] = 0UL;
}
#endif /* HAVE_64BIT_LONG_LONG */
/*
* start with the initial basis depending on the hash type
*/
p = strrchr(program, '/');
if (p == NULL) {
p = program;
} else {
++p;
}
if (strcmp(p, "fnv064") == 0 || strcmp(p, "no64bit_fnv064") == 0) {
/* using non-recommended FNV-0 and zero initial basis */
hval = FNV0_64_INIT;
hash_type = FNV0_64;
} else if (strcmp(p, "fnv164") == 0 || strcmp(p, "no64bit_fnv164") == 0) {
/* using FNV-1 and non-zero initial basis */
hval = FNV1_64_INIT;
hash_type = FNV1_64;
} else if (strcmp(p, "fnv1a64") == 0 || strcmp(p, "no64bit_fnv1a64") == 0) {
/* start with the FNV-1a initial basis */
hval = FNV1A_64_INIT;
hash_type = FNV1a_64;
} else {
fprintf(stderr, "%s: unknown program name, unknown hash type\n",
program);
exit(7);
}
/*
* FNV test vector processing, if needed
*/
if (t_flag >= 0) {
int code; /* test vector that failed, starting at 1 */
/*
* perform all tests
*/
code = test_fnv64(hash_type, hval, bmask, v_flag, t_flag);
/*
* evaluate the tests
*/
if (code == 0) {
if (v_flag) {
printf("passed\n");
}
exit(0);
} else {
printf("failed vector (1 is 1st test): %d\n", code);
exit(8);
}
}
/*
* string hashing
*/
if (s_flag) {
/* hash any other strings */
for (i=optind; i < argc; ++i) {
switch (hash_type) {
case FNV0_64:
case FNV1_64:
hval = fnv_64_str(argv[i], hval);
break;
case FNV1a_64:
hval = fnv_64a_str(argv[i], hval);
break;
default:
unknown_hash_type(program, hash_type, 9); /* exit(9) */
/*NOTREACHED*/
}
if (m_flag) {
print_fnv64(hval, bmask, v_flag, argv[i]);
}
}
/*
* file hashing
*/
} else {
/*
* case: process only stdin
*/
if (optind >= argc) {
/* case: process only stdin */
while ((readcnt = read(0, buf, BUF_SIZE)) > 0) {
switch (hash_type) {
case FNV0_64:
case FNV1_64:
hval = fnv_64_buf(buf, readcnt, hval);
break;
case FNV1a_64:
hval = fnv_64a_buf(buf, readcnt, hval);
break;
default:
unknown_hash_type(program, hash_type, 10); /* exit(10) */
/*NOTREACHED*/
}
}
if (m_flag) {
print_fnv64(hval, bmask, v_flag, "(stdin)");
}
} else {
/*
* process any other files
*/
for (i=optind; i < argc; ++i) {
/* open the file */
fd = open(argv[i], O_RDONLY);
if (fd < 0) {
fprintf(stderr, "%s: unable to open file: %s\n",
program, argv[i]);
exit(4);
}
/* hash the file */
while ((readcnt = read(fd, buf, BUF_SIZE)) > 0) {
switch (hash_type) {
case FNV0_64:
case FNV1_64:
hval = fnv_64_buf(buf, readcnt, hval);
break;
case FNV1a_64:
hval = fnv_64a_buf(buf, readcnt, hval);
break;
default:
unknown_hash_type(program, hash_type, 11);/* exit(11) */
/*NOTREACHED*/
}
}
/* finish processing the file */
if (m_flag) {
print_fnv64(hval, bmask, v_flag, argv[i]);
}
close(fd);
}
}
}
/*
* report hash and exit
*/
if (!m_flag) {
print_fnv64(hval, bmask, v_flag, "");
}
return 0; /* exit(0); */
}

+ 156
- 0
lib/fnv/hash_32.c View File

@ -0,0 +1,156 @@
/*
* hash_32 - 32 bit Fowler/Noll/Vo hash code
*
* @(#) $Revision: 5.1 $
* @(#) $Id: hash_32.c,v 5.1 2009/06/30 09:13:32 chongo Exp $
* @(#) $Source: /usr/local/src/cmd/fnv/RCS/hash_32.c,v $
*
***
*
* Fowler/Noll/Vo hash
*
* The basis of this hash algorithm was taken from an idea sent
* as reviewer comments to the IEEE POSIX P1003.2 committee by:
*
* Phong Vo (http://www.research.att.com/info/kpv/)
* Glenn Fowler (http://www.research.att.com/~gsf/)
*
* In a subsequent ballot round:
*
* Landon Curt Noll (http://www.isthe.com/chongo/)
*
* improved on their algorithm. Some people tried this hash
* and found that it worked rather well. In an EMail message
* to Landon, they named it the ``Fowler/Noll/Vo'' or FNV hash.
*
* FNV hashes are designed to be fast while maintaining a low
* collision rate. The FNV speed allows one to quickly hash lots
* of data while maintaining a reasonable collision rate. See:
*
* http://www.isthe.com/chongo/tech/comp/fnv/index.html
*
* for more details as well as other forms of the FNV hash.
***
*
* NOTE: The FNV-0 historic hash is not recommended. One should use
* the FNV-1 hash instead.
*
* To use the 32 bit FNV-0 historic hash, pass FNV0_32_INIT as the
* Fnv32_t hashval argument to fnv_32_buf() or fnv_32_str().
*
* To use the recommended 32 bit FNV-1 hash, pass FNV1_32_INIT as the
* Fnv32_t hashval argument to fnv_32_buf() or fnv_32_str().
*
***
*
* Please do not copyright this code. This code is in the public domain.
*
* LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
* EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* By:
* chongo <Landon Curt Noll> /\oo/\
* http://www.isthe.com/chongo/
*
* Share and Enjoy! :-)
*/
#include <stdlib.h>
#include "fnv.h"
/*
* 32 bit magic FNV-0 and FNV-1 prime
*/
#define FNV_32_PRIME ((Fnv32_t)0x01000193)
/*
* fnv_32_buf - perform a 32 bit Fowler/Noll/Vo hash on a buffer
*
* input:
* buf - start of buffer to hash
* len - length of buffer in octets
* hval - previous hash value or 0 if first call
*
* returns:
* 32 bit hash as a static hash type
*
* NOTE: To use the 32 bit FNV-0 historic hash, use FNV0_32_INIT as the hval
* argument on the first call to either fnv_32_buf() or fnv_32_str().
*
* NOTE: To use the recommended 32 bit FNV-1 hash, use FNV1_32_INIT as the hval
* argument on the first call to either fnv_32_buf() or fnv_32_str().
*/
Fnv32_t
fnv_32_buf(void *buf, size_t len, Fnv32_t hval)
{
unsigned char *bp = (unsigned char *)buf; /* start of buffer */
unsigned char *be = bp + len; /* beyond end of buffer */
/*
* FNV-1 hash each octet in the buffer
*/
while (bp < be) {
/* multiply by the 32 bit FNV magic prime mod 2^32 */
#if defined(NO_FNV_GCC_OPTIMIZATION)
hval *= FNV_32_PRIME;
#else
hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24);
#endif
/* xor the bottom with the current octet */
hval ^= (Fnv32_t)*bp++;
}
/* return our new hash value */
return hval;
}
/*
* fnv_32_str - perform a 32 bit Fowler/Noll/Vo hash on a string
*
* input:
* str - string to hash
* hval - previous hash value or 0 if first call
*
* returns:
* 32 bit hash as a static hash type
*
* NOTE: To use the 32 bit FNV-0 historic hash, use FNV0_32_INIT as the hval
* argument on the first call to either fnv_32_buf() or fnv_32_str().
*
* NOTE: To use the recommended 32 bit FNV-1 hash, use FNV1_32_INIT as the hval
* argument on the first call to either fnv_32_buf() or fnv_32_str().
*/
Fnv32_t
fnv_32_str(char *str, Fnv32_t hval)
{
unsigned char *s = (unsigned char *)str; /* unsigned string */
/*
* FNV-1 hash each octet in the buffer
*/
while (*s) {
/* multiply by the 32 bit FNV magic prime mod 2^32 */
#if defined(NO_FNV_GCC_OPTIMIZATION)
hval *= FNV_32_PRIME;
#else
hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24);
#endif
/* xor the bottom with the current octet */
hval ^= (Fnv32_t)*s++;
}
/* return our new hash value */
return hval;
}

+ 144
- 0
lib/fnv/hash_32a.c View File

@ -0,0 +1,144 @@
/*
* hash_32 - 32 bit Fowler/Noll/Vo FNV-1a hash code
*
* @(#) $Revision: 5.1 $
* @(#) $Id: hash_32a.c,v 5.1 2009/06/30 09:13:32 chongo Exp $
* @(#) $Source: /usr/local/src/cmd/fnv/RCS/hash_32a.c,v $
*
***
*
* Fowler/Noll/Vo hash
*
* The basis of this hash algorithm was taken from an idea sent
* as reviewer comments to the IEEE POSIX P1003.2 committee by:
*
* Phong Vo (http://www.research.att.com/info/kpv/)
* Glenn Fowler (http://www.research.att.com/~gsf/)
*
* In a subsequent ballot round:
*
* Landon Curt Noll (http://www.isthe.com/chongo/)
*
* improved on their algorithm. Some people tried this hash
* and found that it worked rather well. In an EMail message
* to Landon, they named it the ``Fowler/Noll/Vo'' or FNV hash.
*
* FNV hashes are designed to be fast while maintaining a low
* collision rate. The FNV speed allows one to quickly hash lots
* of data while maintaining a reasonable collision rate. See:
*
* http://www.isthe.com/chongo/tech/comp/fnv/index.html
*
* for more details as well as other forms of the FNV hash.
***
*
* To use the recommended 32 bit FNV-1a hash, pass FNV1_32A_INIT as the
* Fnv32_t hashval argument to fnv_32a_buf() or fnv_32a_str().
*
***
*
* Please do not copyright this code. This code is in the public domain.
*
* LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
* EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* By:
* chongo <Landon Curt Noll> /\oo/\
* http://www.isthe.com/chongo/
*
* Share and Enjoy! :-)
*/
#include <stdlib.h>
#include "fnv.h"
/*
* 32 bit magic FNV-1a prime
*/
#define FNV_32_PRIME ((Fnv32_t)0x01000193)
/*
* fnv_32a_buf - perform a 32 bit Fowler/Noll/Vo FNV-1a hash on a buffer
*
* input:
* buf - start of buffer to hash
* len - length of buffer in octets
* hval - previous hash value or 0 if first call
*
* returns:
* 32 bit hash as a static hash type
*
* NOTE: To use the recommended 32 bit FNV-1a hash, use FNV1_32A_INIT as the
* hval arg on the first call to either fnv_32a_buf() or fnv_32a_str().
*/
Fnv32_t
fnv_32a_buf(void *buf, size_t len, Fnv32_t hval)
{
unsigned char *bp = (unsigned char *)buf; /* start of buffer */
unsigned char *be = bp + len; /* beyond end of buffer */
/*
* FNV-1a hash each octet in the buffer
*/
while (bp < be) {
/* xor the bottom with the current octet */
hval ^= (Fnv32_t)*bp++;
/* multiply by the 32 bit FNV magic prime mod 2^32 */
#if defined(NO_FNV_GCC_OPTIMIZATION)
hval *= FNV_32_PRIME;
#else
hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24);
#endif
}
/* return our new hash value */
return hval;
}
/*
* fnv_32a_str - perform a 32 bit Fowler/Noll/Vo FNV-1a hash on a string
*
* input:
* str - string to hash
* hval - previous hash value or 0 if first call
*
* returns:
* 32 bit hash as a static hash type
*
* NOTE: To use the recommended 32 bit FNV-1a hash, use FNV1_32A_INIT as the
* hval arg on the first call to either fnv_32a_buf() or fnv_32a_str().
*/
Fnv32_t
fnv_32a_str(char *str, Fnv32_t hval)
{
unsigned char *s = (unsigned char *)str; /* unsigned string */
/*
* FNV-1a hash each octet in the buffer
*/
while (*s) {
/* xor the bottom with the current octet */
hval ^= (Fnv32_t)*s++;
/* multiply by the 32 bit FNV magic prime mod 2^32 */
#if defined(NO_FNV_GCC_OPTIMIZATION)
hval *= FNV_32_PRIME;
#else
hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24);
#endif
}
/* return our new hash value */
return hval;
}

+ 312
- 0
lib/fnv/hash_64.c View File

@ -0,0 +1,312 @@
/*
* hash_64 - 64 bit Fowler/Noll/Vo-0 hash code
*
* @(#) $Revision: 5.1 $
* @(#) $Id: hash_64.c,v 5.1 2009/06/30 09:01:38 chongo Exp $
* @(#) $Source: /usr/local/src/cmd/fnv/RCS/hash_64.c,v $
*
***
*
* Fowler/Noll/Vo hash
*
* The basis of this hash algorithm was taken from an idea sent
* as reviewer comments to the IEEE POSIX P1003.2 committee by:
*
* Phong Vo (http://www.research.att.com/info/kpv/)
* Glenn Fowler (http://www.research.att.com/~gsf/)
*
* In a subsequent ballot round:
*
* Landon Curt Noll (http://www.isthe.com/chongo/)
*
* improved on their algorithm. Some people tried this hash
* and found that it worked rather well. In an EMail message
* to Landon, they named it the ``Fowler/Noll/Vo'' or FNV hash.
*
* FNV hashes are designed to be fast while maintaining a low
* collision rate. The FNV speed allows one to quickly hash lots
* of data while maintaining a reasonable collision rate. See:
*
* http://www.isthe.com/chongo/tech/comp/fnv/index.html
*
* for more details as well as other forms of the FNV hash.
*
***
*
* NOTE: The FNV-0 historic hash is not recommended. One should use
* the FNV-1 hash instead.
*
* To use the 64 bit FNV-0 historic hash, pass FNV0_64_INIT as the
* Fnv64_t hashval argument to fnv_64_buf() or fnv_64_str().
*
* To use the recommended 64 bit FNV-1 hash, pass FNV1_64_INIT as the
* Fnv64_t hashval argument to fnv_64_buf() or fnv_64_str().
*
***
*
* Please do not copyright this code. This code is in the public domain.
*
* LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
* EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* By:
* chongo <Landon Curt Noll> /\oo/\
* http://www.isthe.com/chongo/
*
* Share and Enjoy! :-)
*/
#include <stdlib.h>
#include "fnv.h"
/*
* FNV-0 defines the initial basis to be zero
*/
#if !defined(HAVE_64BIT_LONG_LONG)
const Fnv64_t fnv0_64_init = { 0UL, 0UL };
#endif /* ! HAVE_64BIT_LONG_LONG */
/*
* FNV-1 defines the initial basis to be non-zero
*/
#if !defined(HAVE_64BIT_LONG_LONG)
const Fnv64_t fnv1_64_init = { 0x84222325UL, 0xcbf29ce4UL };
#endif /* ! HAVE_64BIT_LONG_LONG */
/*
* 64 bit magic FNV-0 and FNV-1 prime
*/
#if defined(HAVE_64BIT_LONG_LONG)
#define FNV_64_PRIME ((Fnv64_t)0x100000001b3ULL)
#else /* HAVE_64BIT_LONG_LONG */
#define FNV_64_PRIME_LOW ((unsigned long)0x1b3) /* lower bits of FNV prime */
#define FNV_64_PRIME_SHIFT (8) /* top FNV prime shift above 2^32 */
#endif /* HAVE_64BIT_LONG_LONG */
/*
* fnv_64_buf - perform a 64 bit Fowler/Noll/Vo hash on a buffer
*
* input:
* buf - start of buffer to hash
* len - length of buffer in octets
* hval - previous hash value or 0 if first call
*
* returns:
* 64 bit hash as a static hash type
*
* NOTE: To use the 64 bit FNV-0 historic hash, use FNV0_64_INIT as the hval
* argument on the first call to either fnv_64_buf() or fnv_64_str().
*
* NOTE: To use the recommended 64 bit FNV-1 hash, use FNV1_64_INIT as the hval
* argument on the first call to either fnv_64_buf() or fnv_64_str().
*/
Fnv64_t
fnv_64_buf(void *buf, size_t len, Fnv64_t hval)
{
unsigned char *bp = (unsigned char *)buf; /* start of buffer */
unsigned char *be = bp + len; /* beyond end of buffer */
#if defined(HAVE_64BIT_LONG_LONG)
/*
* FNV-1 hash each octet of the buffer
*/
while (bp < be) {
/* multiply by the 64 bit FNV magic prime mod 2^64 */
#if defined(NO_FNV_GCC_OPTIMIZATION)
hval *= FNV_64_PRIME;
#else /* NO_FNV_GCC_OPTIMIZATION */
hval += (hval << 1) + (hval << 4) + (hval << 5) +
(hval << 7) + (hval << 8) + (hval << 40);
#endif /* NO_FNV_GCC_OPTIMIZATION */
/* xor the bottom with the current octet */
hval ^= (Fnv64_t)*bp++;
}
#else /* HAVE_64BIT_LONG_LONG */
unsigned long val[4]; /* hash value in base 2^16 */
unsigned long tmp[4]; /* tmp 64 bit value */
/*
* Convert Fnv64_t hval into a base 2^16 array
*/
val[0] = hval.w32[0];
val[1] = (val[0] >> 16);
val[0] &= 0xffff;
val[2] = hval.w32[1];
val[3] = (val[2] >> 16);
val[2] &= 0xffff;
/*
* FNV-1 hash each octet of the buffer
*/
while (bp < be) {
/*
* multiply by the 64 bit FNV magic prime mod 2^64
*
* Using 0x100000001b3 we have the following digits base 2^16:
*
* 0x0 0x100 0x0 0x1b3
*
* which is the same as:
*
* 0x0 1<<FNV_64_PRIME_SHIFT 0x0 FNV_64_PRIME_LOW
*/
/* multiply by the lowest order digit base 2^16 */
tmp[0] = val[0] * FNV_64_PRIME_LOW;
tmp[1] = val[1] * FNV_64_PRIME_LOW;
tmp[2] = val[2] * FNV_64_PRIME_LOW;
tmp[3] = val[3] * FNV_64_PRIME_LOW;
/* multiply by the other non-zero digit */
tmp[2] += val[0] << FNV_64_PRIME_SHIFT; /* tmp[2] += val[0] * 0x100 */
tmp[3] += val[1] << FNV_64_PRIME_SHIFT; /* tmp[3] += val[1] * 0x100 */
/* propagate carries */
tmp[1] += (tmp[0] >> 16);
val[0] = tmp[0] & 0xffff;
tmp[2] += (tmp[1] >> 16);
val[1] = tmp[1] & 0xffff;
val[3] = tmp[3] + (tmp[2] >> 16);
val[2] = tmp[2] & 0xffff;
/*
* Doing a val[3] &= 0xffff; is not really needed since it simply
* removes multiples of 2^64. We can discard these excess bits
* outside of the loop when we convert to Fnv64_t.
*/
/* xor the bottom with the current octet */
val[0] ^= (unsigned long)*bp++;
}
/*
* Convert base 2^16 array back into an Fnv64_t
*/
hval.w32[1] = ((val[3]<<16) | val[2]);
hval.w32[0] = ((val[1]<<16) | val[0]);
#endif /* HAVE_64BIT_LONG_LONG */
/* return our new hash value */
return hval;
}
/*
* fnv_64_str - perform a 64 bit Fowler/Noll/Vo hash on a buffer
*
* input:
* buf - start of buffer to hash
* hval - previous hash value or 0 if first call
*
* returns:
* 64 bit hash as a static hash type
*
* NOTE: To use the 64 bit FNV-0 historic hash, use FNV0_64_INIT as the hval
* argument on the first call to either fnv_64_buf() or fnv_64_str().
*
* NOTE: To use the recommended 64 bit FNV-1 hash, use FNV1_64_INIT as the hval
* argument on the first call to either fnv_64_buf() or fnv_64_str().
*/
Fnv64_t
fnv_64_str(char *str, Fnv64_t hval)
{
unsigned char *s = (unsigned char *)str; /* unsigned string */
#if defined(HAVE_64BIT_LONG_LONG)
/*
* FNV-1 hash each octet of the string
*/
while (*s) {
/* multiply by the 64 bit FNV magic prime mod 2^64 */
#if defined(NO_FNV_GCC_OPTIMIZATION)
hval *= FNV_64_PRIME;
#else /* NO_FNV_GCC_OPTIMIZATION */
hval += (hval << 1) + (hval << 4) + (hval << 5) +
(hval << 7) + (hval << 8) + (hval << 40);
#endif /* NO_FNV_GCC_OPTIMIZATION */
/* xor the bottom with the current octet */
hval ^= (Fnv64_t)*s++;
}
#else /* !HAVE_64BIT_LONG_LONG */
unsigned long val[4]; /* hash value in base 2^16 */
unsigned long tmp[4]; /* tmp 64 bit value */
/*
* Convert Fnv64_t hval into a base 2^16 array
*/
val[0] = hval.w32[0];
val[1] = (val[0] >> 16);
val[0] &= 0xffff;
val[2] = hval.w32[1];
val[3] = (val[2] >> 16);
val[2] &= 0xffff;
/*
* FNV-1 hash each octet of the string
*/
while (*s) {
/*
* multiply by the 64 bit FNV magic prime mod 2^64
*
* Using 1099511628211, we have the following digits base 2^16:
*
* 0x0 0x100 0x0 0x1b3
*
* which is the same as:
*
* 0x0 1<<FNV_64_PRIME_SHIFT 0x0 FNV_64_PRIME_LOW
*/
/* multiply by the lowest order digit base 2^16 */
tmp[0] = val[0] * FNV_64_PRIME_LOW;
tmp[1] = val[1] * FNV_64_PRIME_LOW;
tmp[2] = val[2] * FNV_64_PRIME_LOW;
tmp[3] = val[3] * FNV_64_PRIME_LOW;
/* multiply by the other non-zero digit */
tmp[2] += val[0] << FNV_64_PRIME_SHIFT; /* tmp[2] += val[0] * 0x100 */
tmp[3] += val[1] << FNV_64_PRIME_SHIFT; /* tmp[3] += val[1] * 0x100 */
/* propagate carries */
tmp[1] += (tmp[0] >> 16);
val[0] = tmp[0] & 0xffff;
tmp[2] += (tmp[1] >> 16);
val[1] = tmp[1] & 0xffff;
val[3] = tmp[3] + (tmp[2] >> 16);
val[2] = tmp[2] & 0xffff;
/*
* Doing a val[3] &= 0xffff; is not really needed since it simply
* removes multiples of 2^64. We can discard these excess bits
* outside of the loop when we convert to Fnv64_t.
*/
/* xor the bottom with the current octet */
val[0] ^= (unsigned long)(*s++);
}
/*
* Convert base 2^16 array back into an Fnv64_t
*/
hval.w32[1] = ((val[3]<<16) | val[2]);
hval.w32[0] = ((val[1]<<16) | val[0]);
#endif /* !HAVE_64BIT_LONG_LONG */
/* return our new hash value */
return hval;
}

+ 291
- 0
lib/fnv/hash_64a.c View File

@ -0,0 +1,291 @@
/*
* hash_64 - 64 bit Fowler/Noll/Vo-0 FNV-1a hash code
*
* @(#) $Revision: 5.1 $
* @(#) $Id: hash_64a.c,v 5.1 2009/06/30 09:01:38 chongo Exp $
* @(#) $Source: /usr/local/src/cmd/fnv/RCS/hash_64a.c,v $
*
***
*
* Fowler/Noll/Vo hash
*
* The basis of this hash algorithm was taken from an idea sent
* as reviewer comments to the IEEE POSIX P1003.2 committee by:
*
* Phong Vo (http://www.research.att.com/info/kpv/)
* Glenn Fowler (http://www.research.att.com/~gsf/)
*
* In a subsequent ballot round:
*
* Landon Curt Noll (http://www.isthe.com/chongo/)
*
* improved on their algorithm. Some people tried this hash
* and found that it worked rather well. In an EMail message
* to Landon, they named it the ``Fowler/Noll/Vo'' or FNV hash.
*
* FNV hashes are designed to be fast while maintaining a low
* collision rate. The FNV speed allows one to quickly hash lots
* of data while maintaining a reasonable collision rate. See:
*
* http://www.isthe.com/chongo/tech/comp/fnv/index.html
*
* for more details as well as other forms of the FNV hash.
*
***
*
* To use the recommended 64 bit FNV-1a hash, pass FNV1A_64_INIT as the
* Fnv64_t hashval argument to fnv_64a_buf() or fnv_64a_str().
*
***
*
* Please do not copyright this code. This code is in the public domain.
*
* LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
* EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* By:
* chongo <Landon Curt Noll> /\oo/\
* http://www.isthe.com/chongo/
*
* Share and Enjoy! :-)
*/
#include <stdlib.h>
#include "fnv.h"
/*
* FNV-1a defines the initial basis to be non-zero
*/
#if !defined(HAVE_64BIT_LONG_LONG)
const Fnv64_t fnv1a_64_init = { 0x84222325, 0xcbf29ce4 };
#endif /* ! HAVE_64BIT_LONG_LONG */
/*
* 64 bit magic FNV-1a prime
*/
#if defined(HAVE_64BIT_LONG_LONG)
#define FNV_64_PRIME ((Fnv64_t)0x100000001b3ULL)
#else /* HAVE_64BIT_LONG_LONG */
#define FNV_64_PRIME_LOW ((unsigned long)0x1b3) /* lower bits of FNV prime */
#define FNV_64_PRIME_SHIFT (8) /* top FNV prime shift above 2^32 */
#endif /* HAVE_64BIT_LONG_LONG */
/*
* fnv_64a_buf - perform a 64 bit Fowler/Noll/Vo FNV-1a hash on a buffer
*
* input:
* buf - start of buffer to hash
* len - length of buffer in octets
* hval - previous hash value or 0 if first call
*
* returns:
* 64 bit hash as a static hash type
*
* NOTE: To use the recommended 64 bit FNV-1a hash, use FNV1A_64_INIT as the
* hval arg on the first call to either fnv_64a_buf() or fnv_64a_str().
*/
Fnv64_t
fnv_64a_buf(void *buf, size_t len, Fnv64_t hval)
{
unsigned char *bp = (unsigned char *)buf; /* start of buffer */
unsigned char *be = bp + len; /* beyond end of buffer */
#if defined(HAVE_64BIT_LONG_LONG)
/*
* FNV-1a hash each octet of the buffer
*/
while (bp < be) {
/* xor the bottom with the current octet */
hval ^= (Fnv64_t)*bp++;
/* multiply by the 64 bit FNV magic prime mod 2^64 */
#if defined(NO_FNV_GCC_OPTIMIZATION)
hval *= FNV_64_PRIME;
#else /* NO_FNV_GCC_OPTIMIZATION */
hval += (hval << 1) + (hval << 4) + (hval << 5) +
(hval << 7) + (hval << 8) + (hval << 40);
#endif /* NO_FNV_GCC_OPTIMIZATION */
}
#else /* HAVE_64BIT_LONG_LONG */
unsigned long val[4]; /* hash value in base 2^16 */
unsigned long tmp[4]; /* tmp 64 bit value */
/*
* Convert Fnv64_t hval into a base 2^16 array
*/
val[0] = hval.w32[0];
val[1] = (val[0] >> 16);
val[0] &= 0xffff;
val[2] = hval.w32[1];
val[3] = (val[2] >> 16);
val[2] &= 0xffff;
/*
* FNV-1a hash each octet of the buffer
*/
while (bp < be) {
/* xor the bottom with the current octet */
val[0] ^= (unsigned long)*bp++;
/*
* multiply by the 64 bit FNV magic prime mod 2^64
*
* Using 0x100000001b3 we have the following digits base 2^16:
*
* 0x0 0x100 0x0 0x1b3
*
* which is the same as:
*
* 0x0 1<<FNV_64_PRIME_SHIFT 0x0 FNV_64_PRIME_LOW
*/
/* multiply by the lowest order digit base 2^16 */
tmp[0] = val[0] * FNV_64_PRIME_LOW;
tmp[1] = val[1] * FNV_64_PRIME_LOW;
tmp[2] = val[2] * FNV_64_PRIME_LOW;
tmp[3] = val[3] * FNV_64_PRIME_LOW;
/* multiply by the other non-zero digit */
tmp[2] += val[0] << FNV_64_PRIME_SHIFT; /* tmp[2] += val[0] * 0x100 */
tmp[3] += val[1] << FNV_64_PRIME_SHIFT; /* tmp[3] += val[1] * 0x100 */
/* propagate carries */
tmp[1] += (tmp[0] >> 16);
val[0] = tmp[0] & 0xffff;
tmp[2] += (tmp[1] >> 16);
val[1] = tmp[1] & 0xffff;
val[3] = tmp[3] + (tmp[2] >> 16);
val[2] = tmp[2] & 0xffff;
/*
* Doing a val[3] &= 0xffff; is not really needed since it simply
* removes multiples of 2^64. We can discard these excess bits
* outside of the loop when we convert to Fnv64_t.
*/
}
/*
* Convert base 2^16 array back into an Fnv64_t
*/
hval.w32[1] = ((val[3]<<16) | val[2]);
hval.w32[0] = ((val[1]<<16) | val[0]);
#endif /* HAVE_64BIT_LONG_LONG */
/* return our new hash value */
return hval;
}
/*
* fnv_64a_str - perform a 64 bit Fowler/Noll/Vo FNV-1a hash on a buffer
*
* input:
* buf - start of buffer to hash
* hval - previous hash value or 0 if first call
*
* returns:
* 64 bit hash as a static hash type
*
* NOTE: To use the recommended 64 bit FNV-1a hash, use FNV1A_64_INIT as the
* hval arg on the first call to either fnv_64a_buf() or fnv_64a_str().
*/
Fnv64_t
fnv_64a_str(char *str, Fnv64_t hval)
{
unsigned char *s = (unsigned char *)str; /* unsigned string */
#if defined(HAVE_64BIT_LONG_LONG)
/*
* FNV-1a hash each octet of the string
*/
while (*s) {
/* xor the bottom with the current octet */
hval ^= (Fnv64_t)*s++;
/* multiply by the 64 bit FNV magic prime mod 2^64 */
#if defined(NO_FNV_GCC_OPTIMIZATION)
hval *= FNV_64_PRIME;
#else /* NO_FNV_GCC_OPTIMIZATION */
hval += (hval << 1) + (hval << 4) + (hval << 5) +
(hval << 7) + (hval << 8) + (hval << 40);
#endif /* NO_FNV_GCC_OPTIMIZATION */
}
#else /* !HAVE_64BIT_LONG_LONG */
unsigned long val[4]; /* hash value in base 2^16 */
unsigned long tmp[4]; /* tmp 64 bit value */
/*
* Convert Fnv64_t hval into a base 2^16 array
*/
val[0] = hval.w32[0];
val[1] = (val[0] >> 16);
val[0] &= 0xffff;
val[2] = hval.w32[1];
val[3] = (val[2] >> 16);
val[2] &= 0xffff;
/*
* FNV-1a hash each octet of the string
*/
while (*s) {
/* xor the bottom with the current octet */
/*
* multiply by the 64 bit FNV magic prime mod 2^64
*
* Using 1099511628211, we have the following digits base 2^16:
*
* 0x0 0x100 0x0 0x1b3
*
* which is the same as:
*
* 0x0 1<<FNV_64_PRIME_SHIFT 0x0 FNV_64_PRIME_LOW
*/
/* multiply by the lowest order digit base 2^16 */
tmp[0] = val[0] * FNV_64_PRIME_LOW;
tmp[1] = val[1] * FNV_64_PRIME_LOW;
tmp[2] = val[2] * FNV_64_PRIME_LOW;
tmp[3] = val[3] * FNV_64_PRIME_LOW;
/* multiply by the other non-zero digit */
tmp[2] += val[0] << FNV_64_PRIME_SHIFT; /* tmp[2] += val[0] * 0x100 */
tmp[3] += val[1] << FNV_64_PRIME_SHIFT; /* tmp[3] += val[1] * 0x100 */
/* propagate carries */
tmp[1] += (tmp[0] >> 16);
val[0] = tmp[0] & 0xffff;
tmp[2] += (tmp[1] >> 16);
val[1] = tmp[1] & 0xffff;
val[3] = tmp[3] + (tmp[2] >> 16);
val[2] = tmp[2] & 0xffff;
/*
* Doing a val[3] &= 0xffff; is not really needed since it simply
* removes multiples of 2^64. We can discard these excess bits
* outside of the loop when we convert to Fnv64_t.
*/
val[0] ^= (unsigned long)(*s++);
}
/*
* Convert base 2^16 array back into an Fnv64_t
*/
hval.w32[1] = ((val[3]<<16) | val[2]);
hval.w32[0] = ((val[1]<<16) | val[0]);
#endif /* !HAVE_64BIT_LONG_LONG */
/* return our new hash value */
return hval;
}

+ 58
- 0
lib/fnv/have_ulong64.c View File

@ -0,0 +1,58 @@
/*
* have_ulong64 - Determine if we have a 64 bit unsigned long long
*
* usage:
* have_ulong64 > longlong.h
*
* Not all systems have a 'long long type' so this may not compile on
* your system.
*
* This prog outputs the define:
*
* HAVE_64BIT_LONG_LONG
* defined ==> we have a 64 bit unsigned long long
* undefined ==> we must simulate a 64 bit unsigned long long
*/
/*
*
* Please do not copyright this code. This code is in the public domain.
*
* LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
* EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* By:
* chongo <Landon Curt Noll> /\oo/\
* http://www.isthe.com/chongo/
*
* Share and Enjoy! :-)
*/
/*
* have the compiler try its hand with unsigned and signed long longs
*/
#if ! defined(NO64BIT_LONG_LONG)
unsigned long long val = 1099511628211ULL;
#endif /* NO64BIT_LONG_LONG */
int
main(void)
{
/*
* ensure that the length of long long val is what we expect
*/
#if defined(NO64BIT_LONG_LONG)
printf("#undef HAVE_64BIT_LONG_LONG\t/* no */\n");
#else /* NO64BIT_LONG_LONG */
if (val == 1099511628211ULL && sizeof(val) == 8) {
printf("#define HAVE_64BIT_LONG_LONG\t/* yes */\n");
}
#endif /* NO64BIT_LONG_LONG */
/* exit(0); */
return 0;
}

+ 18
- 0
lib/fnv/longlong.h View File

@ -0,0 +1,18 @@
/*
* DO NOT EDIT -- generated by the Makefile
*/
#if !defined(__LONGLONG_H__)
#define __LONGLONG_H__
/* do we have/want to use a long long type? */
#define HAVE_64BIT_LONG_LONG /* yes */
/*
* NO64BIT_LONG_LONG undef HAVE_64BIT_LONG_LONG
*/
#if defined(NO64BIT_LONG_LONG)
#undef HAVE_64BIT_LONG_LONG
#endif /* NO64BIT_LONG_LONG */
#endif /* !__LONGLONG_H__ */

+ 14
- 0
lib/fnv/qmk_fnv_type_validation.c View File

@ -0,0 +1,14 @@
// Copyright 2022 Nick Brassel (@tzarc)
// SPDX-License-Identifier: GPL-2.0-or-later
#include "fnv.h"
// This library was originally sourced from:
// http://www.isthe.com/chongo/tech/comp/fnv/index.html
//
// Version at the time of retrieval on 2022-06-26: v5.0.3
_Static_assert(sizeof(long long) == 8, "long long should be 64 bits");
_Static_assert(sizeof(unsigned long long) == 8, "unsigned long long should be 64 bits");
_Static_assert(sizeof(Fnv32_t) == 4, "Fnv32_t should be 32 bits");
_Static_assert(sizeof(Fnv64_t) == 8, "Fnv64_t should be 64 bits");

+ 2237
- 0
lib/fnv/test_fnv.c
File diff suppressed because it is too large
View File


+ 154
- 0
quantum/wear_leveling/tests/backing_mocks.cpp View File

@ -0,0 +1,154 @@
// Copyright 2022 Nick Brassel (@tzarc)
// SPDX-License-Identifier: GPL-2.0-or-later
#include "gtest/gtest.h"
#include "gmock/gmock.h"
#include "backing_mocks.hpp"
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Backing Store Mock implementation
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void MockBackingStore::reset_instance() {
for (auto&& e : backing_storage)
e.reset();
locked = true;
backing_erasure_count = 0;
backing_max_write_count = 0;
backing_total_write_count = 0;
backing_init_invoke_count = 0;
backing_unlock_invoke_count = 0;
backing_erase_invoke_count = 0;
backing_write_invoke_count = 0;
backing_lock_invoke_count = 0;
init_success_callback = [](std::uint64_t) { return true; };
erase_success_callback = [](std::uint64_t) { return true; };
unlock_success_callback = [](std::uint64_t) { return true; };
write_success_callback = [](std::uint64_t, std::uint32_t) { return true; };
lock_success_callback = [](std::uint64_t) { return true; };
write_log.clear();
}
bool MockBackingStore::init(void) {
++backing_init_invoke_count;
if (init_success_callback) {
return init_success_callback(backing_init_invoke_count);
}
return true;
}
bool MockBackingStore::unlock(void) {
++backing_unlock_invoke_count;
EXPECT_TRUE(is_locked()) << "Attempted to unlock but was not locked";
locked = false;
if (unlock_success_callback) {
return unlock_success_callback(backing_unlock_invoke_count);
}
return true;
}
bool MockBackingStore::erase(void) {
++backing_erase_invoke_count;
// Erase each slot
for (std::size_t i = 0; i < backing_storage.size(); ++i) {
// Drop out of erase early with failure if we need to
if (erase_success_callback && !erase_success_callback(backing_erase_invoke_count)) {
append_log(true);
return false;
}
backing_storage[i].erase();
}
// Keep track of the erase in the write log so that we can verify during tests
append_log(true);
++backing_erasure_count;
return true;
}
bool MockBackingStore::write(uint32_t address, backing_store_int_t value) {
++backing_write_invoke_count;
// precondition: value's buffer size already matches BACKING_STORE_WRITE_SIZE
EXPECT_TRUE(address % BACKING_STORE_WRITE_SIZE == 0) << "Supplied address was not aligned with the backing store integral size";
EXPECT_TRUE(address + BACKING_STORE_WRITE_SIZE <= WEAR_LEVELING_BACKING_SIZE) << "Address would result of out-of-bounds access";
EXPECT_FALSE(is_locked()) << "Write was attempted without being unlocked first";
// Drop out of write early with failure if we need to
if (write_success_callback && !write_success_callback(backing_write_invoke_count, address)) {
return false;
}
// Write the complement as we're simulating flash memory -- 0xFF means 0x00
std::size_t index = address / BACKING_STORE_WRITE_SIZE;
backing_storage[index].set(~value);
// Keep track of the write log so that we can verify during tests
append_log(address, value);
// Keep track of the total number of writes into the backing store
++backing_total_write_count;
return true;
}
bool MockBackingStore::lock(void) {
++backing_lock_invoke_count;
EXPECT_FALSE(is_locked()) << "Attempted to lock but was not unlocked";
locked = true;
if (lock_success_callback) {
return lock_success_callback(backing_lock_invoke_count);
}
return true;
}
bool MockBackingStore::read(uint32_t address, backing_store_int_t& value) const {
// precondition: value's buffer size already matches BACKING_STORE_WRITE_SIZE
EXPECT_TRUE(address % BACKING_STORE_WRITE_SIZE == 0) << "Supplied address was not aligned with the backing store integral size";
EXPECT_TRUE(address + BACKING_STORE_WRITE_SIZE <= WEAR_LEVELING_BACKING_SIZE) << "Address would result of out-of-bounds access";
// Read and take the complement as we're simulating flash memory -- 0xFF means 0x00
std::size_t index = address / BACKING_STORE_WRITE_SIZE;
value = ~backing_storage[index].get();
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Backing Implementation
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C" bool backing_store_init(void) {
return MockBackingStore::Instance().init();
}
extern "C" bool backing_store_unlock(void) {
return MockBackingStore::Instance().unlock();
}
extern "C" bool backing_store_erase(void) {
return MockBackingStore::Instance().erase();
}
extern "C" bool backing_store_write(uint32_t address, backing_store_int_t value) {
return MockBackingStore::Instance().write(address, value);
}
extern "C" bool backing_store_lock(void) {
return MockBackingStore::Instance().lock();
}
extern "C" bool backing_store_read(uint32_t address, backing_store_int_t* value) {
return MockBackingStore::Instance().read(address, *value);
}

+ 210
- 0
quantum/wear_leveling/tests/backing_mocks.hpp View File

@ -0,0 +1,210 @@
// Copyright 2022 Nick Brassel (@tzarc)
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <algorithm>
#include <array>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <type_traits>
#include <vector>
extern "C" {
#include "fnv.h"
#include "wear_leveling.h"
#include "wear_leveling_internal.h"
};
// Maximum number of mock write log entries to keep
using MOCK_WRITE_LOG_MAX_ENTRIES = std::integral_constant<std::size_t, 1024>;
// Complement to the backing store integral, for emulating flash erases of all bytes=0xFF
using BACKING_STORE_INTEGRAL_COMPLEMENT = std::integral_constant<backing_store_int_t, ((backing_store_int_t)(~(backing_store_int_t)0))>;
// Total number of elements stored in the backing arrays
using BACKING_STORE_ELEMENT_COUNT = std::integral_constant<std::size_t, (WEAR_LEVELING_BACKING_SIZE / sizeof(backing_store_int_t))>;
class MockBackingStoreElement {
private:
backing_store_int_t value;
std::size_t writes;
std::size_t erases;
public:
MockBackingStoreElement() : value(BACKING_STORE_INTEGRAL_COMPLEMENT::value), writes(0), erases(0) {}
void reset() {
erase();
writes = 0;
erases = 0;
}
void erase() {
if (!is_erased()) {
++erases;
}
value = BACKING_STORE_INTEGRAL_COMPLEMENT::value;
}
backing_store_int_t get() const {
return value;
}
void set(const backing_store_int_t& v) {
EXPECT_TRUE(is_erased()) << "Attempted write at index which isn't empty.";
value = v;
++writes;
}
std::size_t num_writes() const {
return writes;
}
std::size_t num_erases() const {
return erases;
}
bool is_erased() const {
return value == BACKING_STORE_INTEGRAL_COMPLEMENT::value;
}
};
struct MockBackingStoreLogEntry {
MockBackingStoreLogEntry(uint32_t address, backing_store_int_t value) : address(address), value(value), erased(false) {}
MockBackingStoreLogEntry(bool erased) : address(0), value(0), erased(erased) {}
uint32_t address = 0; // The address of the operation
backing_store_int_t value = 0; // The value of the operation
bool erased = false; // Whether the entire backing store was erased
};
class MockBackingStore {
private:
MockBackingStore() {
reset_instance();
}
// Type containing each of the entries and the write counts
using storage_t = std::array<MockBackingStoreElement, BACKING_STORE_ELEMENT_COUNT::value>;
// Whether the backing store is locked
bool locked;
// The actual data stored in the emulated flash
storage_t backing_storage;
// The number of erase cycles that have occurred
std::uint64_t backing_erasure_count;
// The max number of writes to an element of the backing store
std::uint64_t backing_max_write_count;
// The total number of writes to all elements of the backing store
std::uint64_t backing_total_write_count;
// The write log for the backing store
std::vector<MockBackingStoreLogEntry> write_log;
// The number of times each API was invoked
std::uint64_t backing_init_invoke_count;
std::uint64_t backing_unlock_invoke_count;
std::uint64_t backing_erase_invoke_count;
std::uint64_t backing_write_invoke_count;
std::uint64_t backing_lock_invoke_count;
// Whether init should succeed
std::function<bool(std::uint64_t)> init_success_callback;
// Whether erase should succeed
std::function<bool(std::uint64_t)> erase_success_callback;
// Whether unlocks should succeed
std::function<bool(std::uint64_t)> unlock_success_callback;
// Whether writes should succeed
std::function<bool(std::uint64_t, std::uint32_t)> write_success_callback;
// Whether locks should succeed
std::function<bool(std::uint64_t)> lock_success_callback;
template <typename... Args>
void append_log(Args&&... args) {
if (write_log.size() < MOCK_WRITE_LOG_MAX_ENTRIES::value) {
write_log.emplace_back(std::forward<Args>(args)...);
}
}
public:
static MockBackingStore& Instance() {
static MockBackingStore instance;
return instance;
}
std::uint64_t erasure_count() const {
return backing_erasure_count;
}
std::uint64_t max_write_count() const {
return backing_max_write_count;
}
std::uint64_t total_write_count() const {
return backing_total_write_count;
}
// The number of times each API was invoked
std::uint64_t init_invoke_count() const {
return backing_init_invoke_count;
}
std::uint64_t unlock_invoke_count() const {
return backing_unlock_invoke_count;
}
std::uint64_t erase_invoke_count() const {
return backing_erase_invoke_count;
}
std::uint64_t write_invoke_count() const {
return backing_write_invoke_count;
}
std::uint64_t lock_invoke_count() const {
return backing_lock_invoke_count;
}
// Clear out the internal data for the next run
void reset_instance();
bool is_locked() const {
return locked;
}
// APIs for the backing store
bool init();
bool unlock();
bool erase();
bool write(std::uint32_t address, backing_store_int_t value);
bool lock();
bool read(std::uint32_t address, backing_store_int_t& value) const;
// Control over when init/writes/erases should succeed
void set_init_callback(std::function<bool(std::uint64_t)> callback) {
init_success_callback = callback;
}
void set_erase_callback(std::function<bool(std::uint64_t)> callback) {
erase_success_callback = callback;
}
void set_unlock_callback(std::function<bool(std::uint64_t)> callback) {
unlock_success_callback = callback;
}
void set_write_callback(std::function<bool(std::uint64_t, std::uint32_t)> callback) {
write_success_callback = callback;
}
void set_lock_callback(std::function<bool(std::uint64_t)> callback) {
lock_success_callback = callback;
}
auto storage_begin() const -> decltype(backing_storage.begin()) {
return backing_storage.begin();
}
auto storage_end() const -> decltype(backing_storage.end()) {
return backing_storage.end();
}
auto storage_begin() -> decltype(backing_storage.begin()) {
return backing_storage.begin();
}
auto storage_end() -> decltype(backing_storage.end()) {
return backing_storage.end();
}
auto log_begin() -> decltype(write_log.begin()) {
return write_log.begin();
}
auto log_end() -> decltype(write_log.end()) {
return write_log.end();
}
auto log_begin() const -> decltype(write_log.begin()) {
return write_log.begin();
}
auto log_end() const -> decltype(write_log.end()) {
return write_log.end();
}
};

+ 66
- 0
quantum/wear_leveling/tests/rules.mk View File

@ -0,0 +1,66 @@
wear_leveling_common_DEFS := \
-DWEAR_LEVELING_TESTS
wear_leveling_common_SRC := \
$(LIB_PATH)/fnv/qmk_fnv_type_validation.c \
$(LIB_PATH)/fnv/hash_32a.c \
$(LIB_PATH)/fnv/hash_64a.c \
$(QUANTUM_PATH)/wear_leveling/wear_leveling.c \
$(QUANTUM_PATH)/wear_leveling/tests/backing_mocks.cpp
wear_leveling_common_INC := \
$(LIB_PATH)/fnv \
$(QUANTUM_PATH)/wear_leveling
wear_leveling_general_DEFS := \
$(wear_leveling_common_DEFS) \
-DBACKING_STORE_WRITE_SIZE=2 \
-DWEAR_LEVELING_BACKING_SIZE=48 \
-DWEAR_LEVELING_LOGICAL_SIZE=16
wear_leveling_general_SRC := \
$(wear_leveling_common_SRC) \
$(QUANTUM_PATH)/wear_leveling/tests/wear_leveling_general.cpp
wear_leveling_general_INC := \
$(wear_leveling_common_INC)
wear_leveling_2byte_optimized_writes_DEFS := \
$(wear_leveling_common_DEFS) \
-DBACKING_STORE_WRITE_SIZE=2 \
-DWEAR_LEVELING_BACKING_SIZE=65536 \
-DWEAR_LEVELING_LOGICAL_SIZE=32768
wear_leveling_2byte_optimized_writes_SRC := \
$(wear_leveling_common_SRC) \
$(QUANTUM_PATH)/wear_leveling/tests/wear_leveling_2byte_optimized_writes.cpp
wear_leveling_2byte_optimized_writes_INC := \
$(wear_leveling_common_INC)
wear_leveling_2byte_DEFS := \
$(wear_leveling_common_DEFS) \
-DBACKING_STORE_WRITE_SIZE=2 \
-DWEAR_LEVELING_BACKING_SIZE=48 \
-DWEAR_LEVELING_LOGICAL_SIZE=16
wear_leveling_2byte_SRC := \
$(wear_leveling_common_SRC) \
$(QUANTUM_PATH)/wear_leveling/tests/wear_leveling_2byte.cpp
wear_leveling_2byte_INC := \
$(wear_leveling_common_INC)
wear_leveling_4byte_DEFS := \
$(wear_leveling_common_DEFS) \
-DBACKING_STORE_WRITE_SIZE=4 \
-DWEAR_LEVELING_BACKING_SIZE=48 \
-DWEAR_LEVELING_LOGICAL_SIZE=16
wear_leveling_4byte_SRC := \
$(wear_leveling_common_SRC) \
$(QUANTUM_PATH)/wear_leveling/tests/wear_leveling_4byte.cpp
wear_leveling_4byte_INC := \
$(wear_leveling_common_INC)
wear_leveling_8byte_DEFS := \
$(wear_leveling_common_DEFS) \
-DBACKING_STORE_WRITE_SIZE=8 \
-DWEAR_LEVELING_BACKING_SIZE=48 \
-DWEAR_LEVELING_LOGICAL_SIZE=16
wear_leveling_8byte_SRC := \
$(wear_leveling_common_SRC) \
$(QUANTUM_PATH)/wear_leveling/tests/wear_leveling_8byte.cpp
wear_leveling_8byte_INC := \
$(wear_leveling_common_INC)

+ 6
- 0
quantum/wear_leveling/tests/testlist.mk View File

@ -0,0 +1,6 @@
TEST_LIST += \
wear_leveling_general \
wear_leveling_2byte_optimized_writes \
wear_leveling_2byte \
wear_leveling_4byte \
wear_leveling_8byte

+ 228
- 0
quantum/wear_leveling/tests/wear_leveling_2byte.cpp View File

@ -0,0 +1,228 @@
// Copyright 2022 Nick Brassel (@tzarc)
// SPDX-License-Identifier: GPL-2.0-or-later
#include <numeric>
#include "gtest/gtest.h"
#include "gmock/gmock.h"
#include "backing_mocks.hpp"
class WearLeveling2Byte : public ::testing::Test {
protected:
void SetUp() override {
MockBackingStore::Instance().reset_instance();
wear_leveling_init();
}
};
static std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> verify_data;
static wear_leveling_status_t test_write(const uint32_t address, const void* value, size_t length) {
memcpy(&verify_data[address], value, length);
return wear_leveling_write(address, value, length);
}
/**
* This test verifies that the first write after initialisation occurs after the FNV1a_64 hash location.
*/
TEST_F(WearLeveling2Byte, FirstWriteOccursAfterHash) {
auto& inst = MockBackingStore::Instance();
uint8_t test_value = 0x15;
test_write(0x02, &test_value, sizeof(test_value));
EXPECT_EQ(inst.log_begin()->address, WEAR_LEVELING_LOGICAL_SIZE + 8) << "Invalid first write address.";
}
/**
* This test verifies that the first write after initialisation occurs after the FNV1a_64 hash location, after an erase has occurred.
*/
TEST_F(WearLeveling2Byte, FirstWriteOccursAfterHash_AfterErase) {
auto& inst = MockBackingStore::Instance();
uint8_t test_value = 0x15;
wear_leveling_erase();
test_write(0x02, &test_value, sizeof(test_value));
EXPECT_EQ((inst.log_begin() + 1)->address, WEAR_LEVELING_LOGICAL_SIZE + 8) << "Invalid first write address.";
}
/**
* This test forces consolidation by writing enough to the write log that it overflows, consolidating the data into the
* base logical area.
*/
TEST_F(WearLeveling2Byte, ConsolidationOverflow) {
auto& inst = MockBackingStore::Instance();
// Generate a test block of data which forces OPTIMIZED_64 writes
std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> testvalue;
// Write the data
std::iota(testvalue.begin(), testvalue.end(), 0x20);
EXPECT_EQ(test_write(0, testvalue.data(), testvalue.size()), WEAR_LEVELING_CONSOLIDATED) << "Write returned incorrect status";
uint8_t dummy = 0x40;
EXPECT_EQ(test_write(0x04, &dummy, sizeof(dummy)), WEAR_LEVELING_SUCCESS) << "Write returned incorrect status";
// All writes are at address<64, so each logical byte written will generate 1 write log entry, thus 1 backing store write.
// Expected log:
// [0..11]: optimised64, backing address 0x18, logical address 0x00
// [12]: erase
// [13..20]: consolidated data, backing address 0x00, logical address 0x00
// [21..24]: FNV1a_64 result, backing address 0x10
// [25]: optimised64, backing address 0x18, logical address 0x04
EXPECT_EQ(std::distance(inst.log_begin(), inst.log_end()), 26);
// Verify the backing store writes for the write log
std::size_t index;
write_log_entry_t e;
for (index = 0; index < 12; ++index) {
auto write_iter = inst.log_begin() + index;
EXPECT_EQ(write_iter->address, WEAR_LEVELING_LOGICAL_SIZE + 8 + (index * BACKING_STORE_WRITE_SIZE)) << "Invalid write log address";
e.raw16[0] = write_iter->value;
EXPECT_EQ(LOG_ENTRY_GET_TYPE(e), LOG_ENTRY_TYPE_OPTIMIZED_64) << "Invalid write log entry type";
}
// Verify the backing store erase
{
index = 12;
auto write_iter = inst.log_begin() + index;
e.raw16[0] = write_iter->value;
EXPECT_TRUE(write_iter->erased) << "Backing store erase did not occur as required";
}
// Verify the backing store writes for consolidation
for (index = 13; index < 21; ++index) {
auto write_iter = inst.log_begin() + index;
EXPECT_EQ(write_iter->address, (index - 13) * BACKING_STORE_WRITE_SIZE) << "Invalid write log entry address";
}
// Verify the FNV1a_64 write
{
EXPECT_EQ((inst.log_begin() + 21)->address, WEAR_LEVELING_LOGICAL_SIZE) << "Invalid write log address";
e.raw16[0] = (inst.log_begin() + 21)->value;
e.raw16[1] = (inst.log_begin() + 22)->value;
e.raw16[2] = (inst.log_begin() + 23)->value;
e.raw16[3] = (inst.log_begin() + 24)->value;
EXPECT_EQ(e.raw64, fnv_64a_buf(testvalue.data(), testvalue.size(), FNV1A_64_INIT)) << "Invalid checksum"; // Note that checksum is based on testvalue, as we overwrote one byte and need to consult the consolidated data, not the current
}
// Verify the final write
EXPECT_EQ((inst.log_begin() + 25)->address, WEAR_LEVELING_LOGICAL_SIZE + 8) << "Invalid write log address";
// Verify the data is what we expected
std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> readback;
EXPECT_EQ(wear_leveling_read(0, readback.data(), WEAR_LEVELING_LOGICAL_SIZE), WEAR_LEVELING_SUCCESS) << "Failed to read back the saved data";
EXPECT_TRUE(memcmp(readback.data(), verify_data.data(), WEAR_LEVELING_LOGICAL_SIZE) == 0) << "Readback did not match";
// Re-init and re-read, verifying the reload capability
EXPECT_NE(wear_leveling_init(), WEAR_LEVELING_FAILED) << "Re-initialisation failed";
EXPECT_EQ(wear_leveling_read(0, readback.data(), WEAR_LEVELING_LOGICAL_SIZE), WEAR_LEVELING_SUCCESS) << "Failed to read back the saved data";
EXPECT_TRUE(memcmp(readback.data(), verify_data.data(), WEAR_LEVELING_LOGICAL_SIZE) == 0) << "Readback did not match";
}
/**
* This test verifies multibyte readback gets canceled with an out-of-bounds address.
*/
TEST_F(WearLeveling2Byte, PlaybackReadbackMultibyte_OOB) {
auto& inst = MockBackingStore::Instance();
auto logstart = inst.storage_begin() + (WEAR_LEVELING_LOGICAL_SIZE / sizeof(backing_store_int_t));
// Invalid FNV1a_64 hash
(logstart + 0)->set(0);
(logstart + 1)->set(0);
(logstart + 2)->set(0);
(logstart + 3)->set(0);
// Set up a 2-byte logical write of [0x11,0x12] at logical offset 0x01
auto entry0 = LOG_ENTRY_MAKE_MULTIBYTE(0x01, 2);
entry0.raw8[3] = 0x11;
entry0.raw8[4] = 0x12;
(logstart + 4)->set(~entry0.raw16[0]);
(logstart + 5)->set(~entry0.raw16[1]);
(logstart + 6)->set(~entry0.raw16[2]);
// Set up a 2-byte logical write of [0x13,0x14] at logical offset 0x1000 (out of bounds)
auto entry1 = LOG_ENTRY_MAKE_MULTIBYTE(0x1000, 2);
entry1.raw8[3] = 0x13;
entry1.raw8[4] = 0x14;
(logstart + 7)->set(~entry1.raw16[0]);
(logstart + 8)->set(~entry1.raw16[1]);
(logstart + 9)->set(~entry1.raw16[2]);
// Set up a 2-byte logical write of [0x15,0x16] at logical offset 0x01
auto entry2 = LOG_ENTRY_MAKE_MULTIBYTE(0x01, 2);
entry2.raw8[3] = 0x15;
entry2.raw8[4] = 0x16;
(logstart + 10)->set(~entry2.raw16[0]);
(logstart + 11)->set(~entry2.raw16[1]);
(logstart + 12)->set(~entry2.raw16[2]);
EXPECT_EQ(inst.erasure_count(), 0) << "Invalid initial erase count";
EXPECT_EQ(wear_leveling_init(), WEAR_LEVELING_CONSOLIDATED) << "Readback should have failed and triggered consolidation";
EXPECT_EQ(inst.erasure_count(), 1) << "Invalid final erase count";
uint8_t buf[2];
wear_leveling_read(0x01, buf, sizeof(buf));
EXPECT_EQ(buf[0], 0x11) << "Readback should have maintained the previous pre-failure value from the write log";
EXPECT_EQ(buf[1], 0x12) << "Readback should have maintained the previous pre-failure value from the write log";
}
/**
* This test verifies optimized 64 readback gets canceled with an out-of-bounds address.
*/
TEST_F(WearLeveling2Byte, PlaybackReadbackOptimized64_OOB) {
auto& inst = MockBackingStore::Instance();
auto logstart = inst.storage_begin() + (WEAR_LEVELING_LOGICAL_SIZE / sizeof(backing_store_int_t));
// Invalid FNV1a_64 hash
(logstart + 0)->set(0);
(logstart + 1)->set(0);
(logstart + 2)->set(0);
(logstart + 3)->set(0);
// Set up a 1-byte logical write of 0x11 at logical offset 0x01
auto entry0 = LOG_ENTRY_MAKE_OPTIMIZED_64(0x01, 0x11);
(logstart + 4)->set(~entry0.raw16[0]);
// Set up a 1-byte logical write of 0x11 at logical offset 0x30 (out of bounds)
auto entry1 = LOG_ENTRY_MAKE_OPTIMIZED_64(0x30, 0x11);
(logstart + 5)->set(~entry1.raw16[0]);
// Set up a 1-byte logical write of 0x12 at logical offset 0x01
auto entry2 = LOG_ENTRY_MAKE_OPTIMIZED_64(0x01, 0x12);
(logstart + 6)->set(~entry2.raw16[0]);
EXPECT_EQ(inst.erasure_count(), 0) << "Invalid initial erase count";
EXPECT_EQ(wear_leveling_init(), WEAR_LEVELING_CONSOLIDATED) << "Readback should have failed and triggered consolidation";
EXPECT_EQ(inst.erasure_count(), 1) << "Invalid final erase count";
uint8_t tmp;
wear_leveling_read(0x01, &tmp, sizeof(tmp));
EXPECT_EQ(tmp, 0x11) << "Readback should have maintained the previous pre-failure value from the write log";
}
/**
* This test verifies word 0/1 readback gets canceled with an out-of-bounds address.
*/
TEST_F(WearLeveling2Byte, PlaybackReadbackWord01_OOB) {
auto& inst = MockBackingStore::Instance();
auto logstart = inst.storage_begin() + (WEAR_LEVELING_LOGICAL_SIZE / sizeof(backing_store_int_t));
// Invalid FNV1a_64 hash
(logstart + 0)->set(0);
(logstart + 1)->set(0);
(logstart + 2)->set(0);
(logstart + 3)->set(0);
// Set up a 1-byte logical write of 1 at logical offset 0x02
auto entry0 = LOG_ENTRY_MAKE_WORD_01(0x02, 1);
(logstart + 4)->set(~entry0.raw16[0]);
// Set up a 1-byte logical write of 1 at logical offset 0x1000 (out of bounds)
auto entry1 = LOG_ENTRY_MAKE_WORD_01(0x1000, 1);
(logstart + 5)->set(~entry1.raw16[0]);
// Set up a 1-byte logical write of 0 at logical offset 0x02
auto entry2 = LOG_ENTRY_MAKE_WORD_01(0x02, 0);
(logstart + 6)->set(~entry2.raw16[0]);
EXPECT_EQ(inst.erasure_count(), 0) << "Invalid initial erase count";
EXPECT_EQ(wear_leveling_init(), WEAR_LEVELING_CONSOLIDATED) << "Readback should have failed and triggered consolidation";
EXPECT_EQ(inst.erasure_count(), 1) << "Invalid final erase count";
uint8_t tmp;
wear_leveling_read(0x02, &tmp, sizeof(tmp));
EXPECT_EQ(tmp, 1) << "Readback should have maintained the previous pre-failure value from the write log";
}

+ 295
- 0
quantum/wear_leveling/tests/wear_leveling_2byte_optimized_writes.cpp View File

@ -0,0 +1,295 @@
// Copyright 2022 Nick Brassel (@tzarc)
// SPDX-License-Identifier: GPL-2.0-or-later
#include <numeric>
#include "gtest/gtest.h"
#include "gmock/gmock.h"
#include "backing_mocks.hpp"
class WearLeveling2ByteOptimizedWrites : public ::testing::Test {
protected:
void SetUp() override {
MockBackingStore::Instance().reset_instance();
wear_leveling_init();
}
};
static std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> verify_data;
static wear_leveling_status_t test_write(const uint32_t address, const void* value, size_t length) {
memcpy(&verify_data[address], value, length);
return wear_leveling_write(address, value, length);
}
/**
* This test ensures the correct number of backing store writes occurs with a multibyte write, given the input buffer size.
*/
TEST_F(WearLeveling2ByteOptimizedWrites, MultibyteBackingStoreWriteCounts) {
auto& inst = MockBackingStore::Instance();
for (std::size_t length = 1; length <= 5; ++length) {
// Clear things out
std::fill(verify_data.begin(), verify_data.end(), 0);
inst.reset_instance();
wear_leveling_init();
// Generate a test block of data
std::vector<std::uint8_t> testvalue(length);
std::iota(testvalue.begin(), testvalue.end(), 0x20);
// Write the data
EXPECT_EQ(test_write(2000, testvalue.data(), testvalue.size()), WEAR_LEVELING_SUCCESS) << "Write failed with incorrect status";
std::size_t expected;
if (length > 3) {
expected = 4;
} else if (length > 1) {
expected = 3;
} else {
expected = 2;
}
// Check that we got the expected number of write log entries
EXPECT_EQ(std::distance(inst.log_begin(), inst.log_end()), expected);
}
}
/**
* This test runs through writing U16 values of `0` or `1` over the entire logical address range, to even addresses only.
* - Addresses <16384 will result in a single optimised backing write
* - Higher addresses will result in a multibyte write of 3 backing writes
*/
TEST_F(WearLeveling2ByteOptimizedWrites, WriteOneThenZeroToEvenAddresses) {
auto& inst = MockBackingStore::Instance();
// Only attempt writes for each address up to a limit that would NOT force a consolidated data write.
std::size_t writes_per_loop = (MOCK_WRITE_LOG_MAX_ENTRIES::value / 6) - 1; // Worst case is 6 writes for each pair of writes of 0/1
std::size_t final_address;
for (uint32_t address = 0; address < WEAR_LEVELING_LOGICAL_SIZE; address += (writes_per_loop * 2)) {
// Clear things out
std::fill(verify_data.begin(), verify_data.end(), 0);
inst.reset_instance();
wear_leveling_init();
// Loop through all the addresses in this range
std::size_t expected = 0;
for (uint32_t offset = 0; offset < (writes_per_loop * 2); offset += 2) {
// If we're about to exceed the limit of the logical store, skip the writes
if (address + offset + 2 > WEAR_LEVELING_LOGICAL_SIZE) {
break;
}
// The default erased value of the wear-leveling cache is zero, so we write a one first, then a zero, to ensure a backing store write occurs.
uint16_t val = 1;
EXPECT_EQ(test_write(address + offset, &val, sizeof(val)), WEAR_LEVELING_SUCCESS) << "Write failed with incorrect status";
val = 0;
EXPECT_EQ(test_write(address + offset, &val, sizeof(val)), WEAR_LEVELING_SUCCESS) << "Write failed with incorrect status";
std::size_t backing_store_writes_expected = 0;
if (address + offset < 16384) {
// A U16 value of 0/1 at an even address <16384 will result in 1 backing write each, so we need 2 backing writes for 2 logical writes
backing_store_writes_expected = 2;
} else {
// All other addresses result in a multibyte write (3 backing store writes) to write two local bytes of data
backing_store_writes_expected = 6;
}
// Keep track of the total number of expected writes to the backing store
expected += backing_store_writes_expected;
// Verify we're at the correct number of writes
EXPECT_EQ(std::distance(inst.log_begin(), inst.log_end()), expected) << "Write log doesn't match required number of backing store writes for address " << (address + offset);
// Verify that the write log entries we expect are actually present
std::size_t write_index = expected - backing_store_writes_expected;
auto write_iter = inst.log_begin() + write_index;
write_log_entry_t e;
if (address + offset < 16384) {
// A U16 value of 0/1 at an even address <16384 will result in 1 backing write each, so we need 2 backing writes for 2 logical writes
for (std::size_t i = 0; i < 2; ++i) {
e.raw16[0] = write_iter->value;
EXPECT_EQ(LOG_ENTRY_GET_TYPE(e), LOG_ENTRY_TYPE_WORD_01) << "Invalid write log entry type at " << (address + offset);
++write_iter;
}
} else {
// Multibyte write
e.raw16[0] = write_iter->value;
EXPECT_EQ(LOG_ENTRY_GET_TYPE(e), LOG_ENTRY_TYPE_MULTIBYTE) << "Invalid write log entry type at " << (address + offset);
EXPECT_EQ(LOG_ENTRY_MULTIBYTE_GET_LENGTH(e), 2) << "Invalid write log entry length at " << (address + offset);
++write_iter;
}
// Keep track of the final address written, so we can verify the entire logical range was handled
final_address = address + offset;
}
// Verify the number of writes that occurred to the backing store
size_t backing_write_count = std::distance(inst.log_begin(), inst.log_end());
EXPECT_EQ(backing_write_count, expected) << "Invalid write count at address " << address;
// Verify the data is what we expected
std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> readback;
EXPECT_EQ(wear_leveling_read(0, readback.data(), WEAR_LEVELING_LOGICAL_SIZE), WEAR_LEVELING_SUCCESS) << "Failed to read back the saved data";
EXPECT_TRUE(memcmp(readback.data(), verify_data.data(), WEAR_LEVELING_LOGICAL_SIZE) == 0) << "Readback for address " << address << " did not match";
// Re-init and re-read, testing the reload capability
EXPECT_NE(wear_leveling_init(), WEAR_LEVELING_FAILED) << "Re-initialisation failed";
EXPECT_EQ(wear_leveling_read(0, readback.data(), WEAR_LEVELING_LOGICAL_SIZE), WEAR_LEVELING_SUCCESS) << "Failed to read back the saved data";
EXPECT_TRUE(memcmp(readback.data(), verify_data.data(), WEAR_LEVELING_LOGICAL_SIZE) == 0) << "Readback for address " << address << " did not match";
}
// Verify the full range of the logical area got written
EXPECT_EQ(final_address, WEAR_LEVELING_LOGICAL_SIZE - 2) << "Invalid final write address";
}
/**
* This test runs through writing U16 values of `0` or `1` over the entire logical address range, to odd addresses only.
* - Addresses <63 will result in 2 optimised backing writes
* - Address 63 results in a single optimised backing write for the first logical byte, and a multibyte write of 2 backing writes for the second logical byte
* - Higher addresses will result in a multibyte write of 3 backing writes
*/
TEST_F(WearLeveling2ByteOptimizedWrites, WriteOneThenZeroToOddAddresses) {
auto& inst = MockBackingStore::Instance();
// Only attempt writes for each address up to a limit that would NOT force a consolidated data write.
std::size_t writes_per_loop = (MOCK_WRITE_LOG_MAX_ENTRIES::value / 6) - 1; // Worst case is 6 writes for each pair of writes of 0/1
std::size_t final_address;
for (uint32_t address = 1; address < WEAR_LEVELING_LOGICAL_SIZE; address += (writes_per_loop * 2)) {
// Clear things out
std::fill(verify_data.begin(), verify_data.end(), 0);
inst.reset_instance();
wear_leveling_init();
// Loop through all the addresses in this range
std::size_t expected = 0;
for (uint32_t offset = 0; offset < (writes_per_loop * 2); offset += 2) {
// If we're about to exceed the limit of the logical store, skip the writes
if (address + offset + 2 > WEAR_LEVELING_LOGICAL_SIZE) {
break;
}
// The default erased value of the wear-leveling cache is zero, so we write a one first, then a zero, to ensure a backing store write occurs.
uint16_t val = 1;
EXPECT_EQ(test_write(address + offset, &val, sizeof(val)), WEAR_LEVELING_SUCCESS) << "Write failed with incorrect status";
val = 0;
EXPECT_EQ(test_write(address + offset, &val, sizeof(val)), WEAR_LEVELING_SUCCESS) << "Write failed with incorrect status";
std::size_t backing_store_writes_expected = 0;
if (address + offset < 63) {
// A U16 value of 0/1 at an odd address <64 will result in 2 backing writes each, so we need 4 backing writes for 2 logical writes
backing_store_writes_expected = 4;
} else if (address + offset == 63) {
// If we're straddling the boundary for optimised bytes (addr==64), then the first logical byte is written using the optimised write (1 backing
// store write), and the second logical byte uses a multibyte write (2 backing store writes)
backing_store_writes_expected = 2 // First logical bytes written using optimised log entries
+ 4; // Second logical bytes written using multibyte log entries
} else {
// All other addresses result in a multibyte write (3 backing store writes) to write two local bytes of data
backing_store_writes_expected = 6;
}
// Keep track of the total number of expected writes to the backing store
expected += backing_store_writes_expected;
// Verify we're at the correct number of writes
EXPECT_EQ(std::distance(inst.log_begin(), inst.log_end()), expected) << "Write log doesn't match required number of backing store writes for address " << (address + offset);
// Verify that the write log entries we expect are actually present
std::size_t write_index = expected - backing_store_writes_expected;
auto write_iter = inst.log_begin() + write_index;
write_log_entry_t e;
if (address + offset < 63) {
// A U16 value of 0/1 at an odd address <64 will result in 2 backing writes each, so we need 4 backing writes for 2 logical writes
for (std::size_t i = 0; i < 4; ++i) {
e.raw16[0] = write_iter->value;
EXPECT_EQ(LOG_ENTRY_GET_TYPE(e), LOG_ENTRY_TYPE_OPTIMIZED_64) << "Invalid write log entry type";
++write_iter;
}
} else if (address + offset == 63) {
// First log entry is the 64-addr optimised one
e.raw16[0] = write_iter->value;
EXPECT_EQ(LOG_ENTRY_GET_TYPE(e), LOG_ENTRY_TYPE_OPTIMIZED_64) << "Invalid write log entry type";
++write_iter;
// Second log entry is the multibyte entry for the second logical byte
e.raw16[0] = write_iter->value;
EXPECT_EQ(LOG_ENTRY_GET_TYPE(e), LOG_ENTRY_TYPE_MULTIBYTE) << "Invalid write log entry type";
EXPECT_EQ(LOG_ENTRY_MULTIBYTE_GET_LENGTH(e), 1) << "Invalid write log entry length";
++write_iter;
} else {
// Multibyte write
e.raw16[0] = write_iter->value;
EXPECT_EQ(LOG_ENTRY_GET_TYPE(e), LOG_ENTRY_TYPE_MULTIBYTE) << "Invalid write log entry type";
EXPECT_EQ(LOG_ENTRY_MULTIBYTE_GET_LENGTH(e), 2) << "Invalid write log entry length";
++write_iter;
}
// Keep track of the final address written, so we can verify the entire logical range was handled
final_address = address + offset;
}
// Verify the number of writes that occurred to the backing store
size_t backing_write_count = std::distance(inst.log_begin(), inst.log_end());
EXPECT_EQ(backing_write_count, expected) << "Invalid write count at address " << address;
// Verify the data is what we expected
std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> readback;
EXPECT_EQ(wear_leveling_read(0, readback.data(), WEAR_LEVELING_LOGICAL_SIZE), WEAR_LEVELING_SUCCESS) << "Failed to read back the saved data";
EXPECT_TRUE(memcmp(readback.data(), verify_data.data(), WEAR_LEVELING_LOGICAL_SIZE) == 0) << "Readback for address " << address << " did not match";
// Re-init and re-read, testing the reload capability
EXPECT_NE(wear_leveling_init(), WEAR_LEVELING_FAILED) << "Re-initialisation failed";
EXPECT_EQ(wear_leveling_read(0, readback.data(), WEAR_LEVELING_LOGICAL_SIZE), WEAR_LEVELING_SUCCESS) << "Failed to read back the saved data";
EXPECT_TRUE(memcmp(readback.data(), verify_data.data(), WEAR_LEVELING_LOGICAL_SIZE) == 0) << "Readback for address " << address << " did not match";
}
// Verify the full range of the logical area got written
EXPECT_EQ(final_address, WEAR_LEVELING_LOGICAL_SIZE - 3) << "Invalid final write address";
}
/**
* This test verifies readback after playback of the write log, simulating power loss and reboot.
*/
TEST_F(WearLeveling2ByteOptimizedWrites, PlaybackReadbackOptimized64_Success) {
auto& inst = MockBackingStore::Instance();
auto logstart = inst.storage_begin() + (WEAR_LEVELING_LOGICAL_SIZE / sizeof(backing_store_int_t));
// Invalid FNV1a_64 hash
(logstart + 0)->set(0);
(logstart + 1)->set(0);
(logstart + 2)->set(0);
(logstart + 3)->set(0);
// Set up a 1-byte logical write of 0x11 at logical offset 0x01
auto entry0 = LOG_ENTRY_MAKE_OPTIMIZED_64(0x01, 0x11);
(logstart + 4)->set(~entry0.raw16[0]); // start at offset 4 to skip FNV1a_64 result
wear_leveling_init();
uint8_t tmp;
wear_leveling_read(0x01, &tmp, sizeof(tmp));
EXPECT_EQ(tmp, 0x11) << "Failed to read back the seeded data";
}
/**
* This test verifies readback after playback of the write log, simulating power loss and reboot.
*/
TEST_F(WearLeveling2ByteOptimizedWrites, PlaybackReadbackWord01_Success) {
auto& inst = MockBackingStore::Instance();
auto logstart = inst.storage_begin() + (WEAR_LEVELING_LOGICAL_SIZE / sizeof(backing_store_int_t));
// Invalid FNV1a_64 hash
(logstart + 0)->set(0);
(logstart + 1)->set(0);
(logstart + 2)->set(0);
(logstart + 3)->set(0);
// Set up a 1-byte logical write of 1 at logical offset 0x02
auto entry0 = LOG_ENTRY_MAKE_WORD_01(0x02, 1);
(logstart + 4)->set(~entry0.raw16[0]); // start at offset 4 to skip FNV1a_64 result
wear_leveling_init();
uint8_t tmp;
wear_leveling_read(0x02, &tmp, sizeof(tmp));
EXPECT_EQ(tmp, 1) << "Failed to read back the seeded data";
}

+ 193
- 0
quantum/wear_leveling/tests/wear_leveling_4byte.cpp View File

@ -0,0 +1,193 @@
// Copyright 2022 Nick Brassel (@tzarc)
// SPDX-License-Identifier: GPL-2.0-or-later
#include <numeric>
#include "gtest/gtest.h"
#include "gmock/gmock.h"
#include "backing_mocks.hpp"
class WearLeveling4Byte : public ::testing::Test {
protected:
void SetUp() override {
MockBackingStore::Instance().reset_instance();
wear_leveling_init();
}
};
static std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> verify_data;
static wear_leveling_status_t test_write(const uint32_t address, const void* value, size_t length) {
memcpy(&verify_data[address], value, length);
return wear_leveling_write(address, value, length);
}
/**
* This test verifies that the first write after initialisation occurs after the FNV1a_64 hash location.
*/
TEST_F(WearLeveling4Byte, FirstWriteOccursAfterHash) {
auto& inst = MockBackingStore::Instance();
uint8_t test_value = 0x15;
test_write(0x02, &test_value, sizeof(test_value));
EXPECT_EQ(inst.log_begin()->address, WEAR_LEVELING_LOGICAL_SIZE + 8) << "Invalid first write address.";
}
/**
* This test verifies that the first write after initialisation occurs after the FNV1a_64 hash location, after an erase has occurred.
*/
TEST_F(WearLeveling4Byte, FirstWriteOccursAfterHash_AfterErase) {
auto& inst = MockBackingStore::Instance();
uint8_t test_value = 0x15;
wear_leveling_erase();
test_write(0x02, &test_value, sizeof(test_value));
EXPECT_EQ((inst.log_begin() + 1)->address, WEAR_LEVELING_LOGICAL_SIZE + 8) << "Invalid first write address.";
}
/**
* This test ensures the correct number of backing store writes occurs with a multibyte write, given the input buffer size.
*/
TEST_F(WearLeveling4Byte, MultibyteBackingStoreWriteCounts) {
auto& inst = MockBackingStore::Instance();
for (std::size_t length = 1; length <= 5; ++length) {
// Clear things out
std::fill(verify_data.begin(), verify_data.end(), 0);
inst.reset_instance();
wear_leveling_init();
// Generate a test block of data
std::vector<std::uint8_t> testvalue(length);
std::iota(testvalue.begin(), testvalue.end(), 0x20);
// Write the data
EXPECT_EQ(test_write(0, testvalue.data(), testvalue.size()), WEAR_LEVELING_SUCCESS) << "Write failed with incorrect status";
std::size_t expected;
if (length > 1) {
expected = 2;
} else {
expected = 1;
}
// Check that we got the expected number of write log entries
EXPECT_EQ(std::distance(inst.log_begin(), inst.log_end()), expected);
}
}
/**
* This test forces consolidation by writing enough to the write log that it overflows, consolidating the data into the
* base logical area.
*/
TEST_F(WearLeveling4Byte, ConsolidationOverflow) {
auto& inst = MockBackingStore::Instance();
// Generate a test block of data
std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> testvalue;
// Write the data
std::iota(testvalue.begin(), testvalue.end(), 0x20);
EXPECT_EQ(test_write(0, testvalue.data(), testvalue.size()), WEAR_LEVELING_CONSOLIDATED) << "Write returned incorrect status";
uint8_t dummy = 0x40;
EXPECT_EQ(test_write(0x04, &dummy, sizeof(dummy)), WEAR_LEVELING_SUCCESS) << "Write returned incorrect status";
// Expected log:
// [0,1]: multibyte, 5 bytes, backing address 0x18, logical address 0x00
// [2,3]: multibyte, 5 bytes, backing address 0x20, logical address 0x05
// [4,5]: multibyte, 5 bytes, backing address 0x28, logical address 0x0A, triggers consolidation
// [6]: erase
// [7,8]: consolidated data, backing address 0x00, logical address 0x00
// [9,10]: consolidated data, backing address 0x08, logical address 0x08
// [11,12]: FNV1a_64 result, backing address 0x10
// [13]: multibyte, 1 byte, backing address 0x18, logical address 0x04
EXPECT_EQ(std::distance(inst.log_begin(), inst.log_end()), 14);
// Verify the backing store writes for the write log
std::size_t index;
write_log_entry_t e;
for (index = 0; index < 6; ++index) {
auto write_iter = inst.log_begin() + index;
EXPECT_EQ(write_iter->address, WEAR_LEVELING_LOGICAL_SIZE + 8 + (index * BACKING_STORE_WRITE_SIZE)) << "Invalid write log address";
// If this is the backing store write that contains the metadata, verify it
if (index % 2 == 0) {
write_log_entry_t e;
e.raw64 = write_iter->value;
EXPECT_EQ(LOG_ENTRY_GET_TYPE(e), LOG_ENTRY_TYPE_MULTIBYTE) << "Invalid write log entry type";
}
}
// Verify the backing store erase
{
index = 6;
auto write_iter = inst.log_begin() + index;
e.raw64 = write_iter->value;
EXPECT_TRUE(write_iter->erased) << "Backing store erase did not occur as required";
}
// Verify the backing store writes for consolidation
for (index = 7; index < 11; ++index) {
auto write_iter = inst.log_begin() + index;
EXPECT_EQ(write_iter->address, (index - 7) * BACKING_STORE_WRITE_SIZE) << "Invalid write log entry address";
}
// Verify the FNV1a_64 write
{
EXPECT_EQ((inst.log_begin() + 11)->address, WEAR_LEVELING_LOGICAL_SIZE) << "Invalid write log address";
e.raw32[0] = (inst.log_begin() + 11)->value;
e.raw32[1] = (inst.log_begin() + 12)->value;
EXPECT_EQ(e.raw64, fnv_64a_buf(testvalue.data(), testvalue.size(), FNV1A_64_INIT)) << "Invalid checksum"; // Note that checksum is based on testvalue, as we overwrote one byte and need to consult the consolidated data, not the current
}
// Verify the final write
EXPECT_EQ((inst.log_begin() + 13)->address, WEAR_LEVELING_LOGICAL_SIZE + 8) << "Invalid write log address";
// Verify the data is what we expected
std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> readback;
EXPECT_EQ(wear_leveling_read(0, readback.data(), WEAR_LEVELING_LOGICAL_SIZE), WEAR_LEVELING_SUCCESS) << "Failed to read back the saved data";
EXPECT_TRUE(memcmp(readback.data(), verify_data.data(), WEAR_LEVELING_LOGICAL_SIZE) == 0) << "Readback did not match";
// Re-init and re-read, verifying the reload capability
EXPECT_NE(wear_leveling_init(), WEAR_LEVELING_FAILED) << "Re-initialisation failed";
EXPECT_EQ(wear_leveling_read(0, readback.data(), WEAR_LEVELING_LOGICAL_SIZE), WEAR_LEVELING_SUCCESS) << "Failed to read back the saved data";
EXPECT_TRUE(memcmp(readback.data(), verify_data.data(), WEAR_LEVELING_LOGICAL_SIZE) == 0) << "Readback did not match";
}
/**
* This test verifies multibyte readback gets canceled with an out-of-bounds address.
*/
TEST_F(WearLeveling4Byte, PlaybackReadbackMultibyte_OOB) {
auto& inst = MockBackingStore::Instance();
auto logstart = inst.storage_begin() + (WEAR_LEVELING_LOGICAL_SIZE / sizeof(backing_store_int_t));
// Invalid FNV1a_64 hash
(logstart + 0)->set(0);
(logstart + 1)->set(0);
// Set up a 2-byte logical write of [0x11,0x12] at logical offset 0x01
auto entry0 = LOG_ENTRY_MAKE_MULTIBYTE(0x01, 2);
entry0.raw8[3] = 0x11;
entry0.raw8[4] = 0x12;
(logstart + 2)->set(~entry0.raw32[0]);
(logstart + 3)->set(~entry0.raw32[1]);
// Set up a 2-byte logical write of [0x13,0x14] at logical offset 0x1000 (out of bounds)
auto entry1 = LOG_ENTRY_MAKE_MULTIBYTE(0x1000, 2);
entry1.raw8[3] = 0x13;
entry1.raw8[4] = 0x14;
(logstart + 4)->set(~entry1.raw32[0]);
(logstart + 5)->set(~entry1.raw32[1]);
// Set up a 2-byte logical write of [0x15,0x16] at logical offset 0x10
auto entry2 = LOG_ENTRY_MAKE_MULTIBYTE(0x01, 2);
entry2.raw8[3] = 0x15;
entry2.raw8[4] = 0x16;
(logstart + 6)->set(~entry2.raw32[0]);
(logstart + 7)->set(~entry2.raw32[1]);
EXPECT_EQ(inst.erasure_count(), 0) << "Invalid initial erase count";
EXPECT_EQ(wear_leveling_init(), WEAR_LEVELING_CONSOLIDATED) << "Readback should have failed and triggered consolidation";
EXPECT_EQ(inst.erasure_count(), 1) << "Invalid final erase count";
uint8_t buf[2];
wear_leveling_read(0x01, buf, sizeof(buf));
EXPECT_EQ(buf[0], 0x11) << "Readback should have maintained the previous pre-failure value from the write log";
EXPECT_EQ(buf[1], 0x12) << "Readback should have maintained the previous pre-failure value from the write log";
}

+ 178
- 0
quantum/wear_leveling/tests/wear_leveling_8byte.cpp View File

@ -0,0 +1,178 @@
// Copyright 2022 Nick Brassel (@tzarc)
// SPDX-License-Identifier: GPL-2.0-or-later
#include <numeric>
#include "gtest/gtest.h"
#include "gmock/gmock.h"
#include "backing_mocks.hpp"
class WearLeveling8Byte : public ::testing::Test {
protected:
void SetUp() override {
MockBackingStore::Instance().reset_instance();
wear_leveling_init();
}
};
static std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> verify_data;
static wear_leveling_status_t test_write(const uint32_t address, const void* value, size_t length) {
memcpy(&verify_data[address], value, length);
return wear_leveling_write(address, value, length);
}
/**
* This test verifies that the first write after initialisation occurs after the FNV1a_64 hash location.
*/
TEST_F(WearLeveling8Byte, FirstWriteOccursAfterHash) {
auto& inst = MockBackingStore::Instance();
uint8_t test_value = 0x15;
test_write(0x02, &test_value, sizeof(test_value));
EXPECT_EQ(inst.log_begin()->address, WEAR_LEVELING_LOGICAL_SIZE + 8) << "Invalid first write address.";
}
/**
* This test verifies that the first write after initialisation occurs after the FNV1a_64 hash location, after an erase has occurred.
*/
TEST_F(WearLeveling8Byte, FirstWriteOccursAfterHash_AfterErase) {
auto& inst = MockBackingStore::Instance();
uint8_t test_value = 0x15;
wear_leveling_erase();
test_write(0x02, &test_value, sizeof(test_value));
EXPECT_EQ((inst.log_begin() + 1)->address, WEAR_LEVELING_LOGICAL_SIZE + 8) << "Invalid first write address.";
}
/**
* This test ensures the correct number of backing store writes occurs with a multibyte write, given the input buffer size.
*/
TEST_F(WearLeveling8Byte, MultibyteBackingStoreWriteCounts) {
auto& inst = MockBackingStore::Instance();
for (std::size_t length = 1; length <= 5; ++length) {
// Clear things out
std::fill(verify_data.begin(), verify_data.end(), 0);
inst.reset_instance();
wear_leveling_init();
// Generate a test block of data
std::vector<std::uint8_t> testvalue(length);
std::iota(testvalue.begin(), testvalue.end(), 0x20);
// Write the data
EXPECT_EQ(test_write(0, testvalue.data(), testvalue.size()), WEAR_LEVELING_SUCCESS) << "Write failed with incorrect status";
// Check that we got the expected number of write log entries
EXPECT_EQ(std::distance(inst.log_begin(), inst.log_end()), 1);
}
}
/**
* This test forces consolidation by writing enough to the write log that it overflows, consolidating the data into the
* base logical area.
*/
TEST_F(WearLeveling8Byte, ConsolidationOverflow) {
auto& inst = MockBackingStore::Instance();
// Generate a test block of data
std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> testvalue;
// Write the data
std::iota(testvalue.begin(), testvalue.end(), 0x20);
EXPECT_EQ(test_write(0, testvalue.data(), testvalue.size()), WEAR_LEVELING_CONSOLIDATED) << "Write returned incorrect status";
uint8_t dummy = 0x40;
EXPECT_EQ(test_write(0x04, &dummy, sizeof(dummy)), WEAR_LEVELING_SUCCESS) << "Write returned incorrect status";
// Expected log:
// [0]: multibyte, 5 bytes, backing address 0x18, logical address 0x00
// [1]: multibyte, 5 bytes, backing address 0x20, logical address 0x05
// [2]: multibyte, 5 bytes, backing address 0x28, logical address 0x0A, triggers consolidation
// [3]: erase
// [4]: consolidated data, backing address 0x00, logical address 0x00
// [5]: consolidated data, backing address 0x08, logical address 0x08
// [6]: FNV1a_64 result, backing address 0x10
// [7]: multibyte, 1 byte, backing address 0x18, logical address 0x04
EXPECT_EQ(std::distance(inst.log_begin(), inst.log_end()), 8);
// Verify the backing store writes for the write log
std::size_t index;
write_log_entry_t e;
for (index = 0; index < 3; ++index) {
auto write_iter = inst.log_begin() + index;
EXPECT_EQ(write_iter->address, WEAR_LEVELING_LOGICAL_SIZE + 8 + (index * BACKING_STORE_WRITE_SIZE)) << "Invalid write log address";
write_log_entry_t e;
e.raw64 = write_iter->value;
EXPECT_EQ(LOG_ENTRY_GET_TYPE(e), LOG_ENTRY_TYPE_MULTIBYTE) << "Invalid write log entry type";
}
// Verify the backing store erase
{
index = 3;
auto write_iter = inst.log_begin() + index;
e.raw64 = write_iter->value;
EXPECT_TRUE(write_iter->erased) << "Backing store erase did not occur as required";
}
// Verify the backing store writes for consolidation
for (index = 4; index < 6; ++index) {
auto write_iter = inst.log_begin() + index;
EXPECT_EQ(write_iter->address, (index - 4) * BACKING_STORE_WRITE_SIZE) << "Invalid write log entry address";
}
// Verify the FNV1a_64 write
{
EXPECT_EQ((inst.log_begin() + 6)->address, WEAR_LEVELING_LOGICAL_SIZE) << "Invalid write log address";
e.raw64 = (inst.log_begin() + 6)->value;
EXPECT_EQ(e.raw64, fnv_64a_buf(testvalue.data(), testvalue.size(), FNV1A_64_INIT)) << "Invalid checksum"; // Note that checksum is based on testvalue, as we overwrote one byte and need to consult the consolidated data, not the current
}
// Verify the final write
EXPECT_EQ((inst.log_begin() + 7)->address, WEAR_LEVELING_LOGICAL_SIZE + 8) << "Invalid write log address";
// Verify the data is what we expected
std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> readback;
EXPECT_EQ(wear_leveling_read(0, readback.data(), WEAR_LEVELING_LOGICAL_SIZE), WEAR_LEVELING_SUCCESS) << "Failed to read back the saved data";
EXPECT_TRUE(memcmp(readback.data(), verify_data.data(), WEAR_LEVELING_LOGICAL_SIZE) == 0) << "Readback did not match";
// Re-init and re-read, verifying the reload capability
EXPECT_NE(wear_leveling_init(), WEAR_LEVELING_FAILED) << "Re-initialisation failed";
EXPECT_EQ(wear_leveling_read(0, readback.data(), WEAR_LEVELING_LOGICAL_SIZE), WEAR_LEVELING_SUCCESS) << "Failed to read back the saved data";
EXPECT_TRUE(memcmp(readback.data(), verify_data.data(), WEAR_LEVELING_LOGICAL_SIZE) == 0) << "Readback did not match";
}
/**
* This test verifies multibyte readback gets canceled with an out-of-bounds address.
*/
TEST_F(WearLeveling8Byte, PlaybackReadbackMultibyte_OOB) {
auto& inst = MockBackingStore::Instance();
auto logstart = inst.storage_begin() + (WEAR_LEVELING_LOGICAL_SIZE / sizeof(backing_store_int_t));
// Invalid FNV1a_64 hash
(logstart + 0)->set(0);
// Set up a 2-byte logical write of [0x11,0x12] at logical offset 0x01
auto entry0 = LOG_ENTRY_MAKE_MULTIBYTE(0x01, 2);
entry0.raw8[3] = 0x11;
entry0.raw8[4] = 0x12;
(logstart + 1)->set(~entry0.raw64);
// Set up a 2-byte logical write of [0x13,0x14] at logical offset 0x1000 (out of bounds)
auto entry1 = LOG_ENTRY_MAKE_MULTIBYTE(0x1000, 2);
entry1.raw8[3] = 0x13;
entry1.raw8[4] = 0x14;
(logstart + 2)->set(~entry1.raw64);
// Set up a 2-byte logical write of [0x15,0x16] at logical offset 0x10
auto entry2 = LOG_ENTRY_MAKE_MULTIBYTE(0x01, 2);
entry2.raw8[3] = 0x15;
entry2.raw8[4] = 0x16;
(logstart + 3)->set(~entry2.raw64);
EXPECT_EQ(inst.erasure_count(), 0) << "Invalid initial erase count";
EXPECT_EQ(wear_leveling_init(), WEAR_LEVELING_CONSOLIDATED) << "Readback should have failed and triggered consolidation";
EXPECT_EQ(inst.erasure_count(), 1) << "Invalid final erase count";
uint8_t buf[2];
wear_leveling_read(0x01, buf, sizeof(buf));
EXPECT_EQ(buf[0], 0x11) << "Readback should have maintained the previous pre-failure value from the write log";
EXPECT_EQ(buf[1], 0x12) << "Readback should have maintained the previous pre-failure value from the write log";
}

+ 204
- 0
quantum/wear_leveling/tests/wear_leveling_general.cpp View File

@ -0,0 +1,204 @@
// Copyright 2022 Nick Brassel (@tzarc)
// SPDX-License-Identifier: GPL-2.0-or-later
#include <numeric>
#include "gtest/gtest.h"
#include "gmock/gmock.h"
#include "backing_mocks.hpp"
class WearLevelingGeneral : public ::testing::Test {
protected:
void SetUp() override {
MockBackingStore::Instance().reset_instance();
wear_leveling_init();
}
};
/**
* This test verifies that even if there is consolidated data present, if the checksum doesn't match then the cache is zero'd after reading the consolidated area, but before write log is played back.
*/
TEST_F(WearLevelingGeneral, InvalidChecksum_ConsolidatedDataIgnored) {
auto& inst = MockBackingStore::Instance();
auto logstart = inst.storage_begin() + (WEAR_LEVELING_LOGICAL_SIZE / sizeof(backing_store_int_t));
// Generate a test block of data
std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> testvalue;
std::iota(testvalue.begin(), testvalue.end(), 0x20);
// Write the data
EXPECT_EQ(wear_leveling_write(0, testvalue.data(), testvalue.size()), WEAR_LEVELING_CONSOLIDATED) << "Write returned incorrect status";
// Invalidate the checksum
(logstart + 0)->erase();
(logstart + 1)->erase();
(logstart + 2)->erase();
(logstart + 3)->erase();
// Set up a 1-byte logical write of [0x11] at logical offset 0x01
auto entry0 = LOG_ENTRY_MAKE_OPTIMIZED_64(0x01, 0x11);
(logstart + 4)->set(~entry0.raw16[0]);
// Re-init
EXPECT_EQ(wear_leveling_init(), WEAR_LEVELING_SUCCESS) << "Init returned incorrect status";
EXPECT_EQ(wear_leveling_read(0, testvalue.data(), testvalue.size()), WEAR_LEVELING_SUCCESS) << "Failed to read";
for (int i = 0; i < WEAR_LEVELING_LOGICAL_SIZE; ++i) {
EXPECT_EQ(testvalue[i], i == 0x01 ? 0x11 : 0x00) << "Invalid readback";
}
}
/**
* This test verifies that writing the same data multiple times does not result in subsequent writes to the backing store.
*/
TEST_F(WearLevelingGeneral, SameValue_SingleBackingWrite) {
auto& inst = MockBackingStore::Instance();
uint8_t test_val = 0x14;
EXPECT_EQ(wear_leveling_write(0x02, &test_val, sizeof(test_val)), WEAR_LEVELING_SUCCESS) << "First overall write operation should have succeeded";
uint64_t invoke_count = inst.unlock_invoke_count();
uint64_t erase_count = inst.erase_invoke_count();
uint64_t write_count = inst.write_invoke_count();
uint64_t lock_count = inst.lock_invoke_count();
for (int i = 0; i < 10; ++i) {
EXPECT_EQ(wear_leveling_write(0x02, &test_val, sizeof(test_val)), WEAR_LEVELING_SUCCESS) << "Subsequent overall write operation should have succeeded";
EXPECT_EQ(inst.unlock_invoke_count(), invoke_count) << "Unlock count should match";
EXPECT_EQ(inst.erase_invoke_count(), erase_count) << "Erase count should match";
EXPECT_EQ(inst.write_invoke_count(), write_count) << "Write count should match";
EXPECT_EQ(inst.lock_invoke_count(), lock_count) << "Lock count should match";
}
}
/**
* This test verifies that no other invocations occur if `backing_store_init()` fails.
*/
TEST_F(WearLevelingGeneral, InitFailure) {
auto& inst = MockBackingStore::Instance();
inst.reset_instance(); // make sure the counters are all zero
inst.set_init_callback([](std::uint64_t count) { return false; });
EXPECT_EQ(inst.erasure_count(), 0) << "Invalid initial erase count";
EXPECT_EQ(wear_leveling_init(), WEAR_LEVELING_FAILED) << "Init should have failed";
EXPECT_EQ(inst.erasure_count(), 0) << "Invalid final erase count";
EXPECT_EQ(inst.init_invoke_count(), 1) << "Init should have been invoked once";
EXPECT_EQ(inst.unlock_invoke_count(), 0) << "Unlock should not have been invoked";
EXPECT_EQ(inst.erase_invoke_count(), 0) << "Erase should not have been invoked";
EXPECT_EQ(inst.write_invoke_count(), 0) << "Write should not have been invoked";
EXPECT_EQ(inst.lock_invoke_count(), 0) << "Lock should not have been invoked";
}
/**
* This test verifies that no invocations occur if the supplied address is out of range while writing.
*/
TEST_F(WearLevelingGeneral, WriteFailure_OOB) {
auto& inst = MockBackingStore::Instance();
uint8_t test_val = 0x14;
EXPECT_EQ(wear_leveling_write(0x21349830, &test_val, sizeof(test_val)), WEAR_LEVELING_FAILED) << "Overall write operation should have failed";
EXPECT_EQ(inst.unlock_invoke_count(), 0) << "Unlock should not have been invoked";
EXPECT_EQ(inst.erase_invoke_count(), 0) << "Erase should not have been invoked";
EXPECT_EQ(inst.write_invoke_count(), 0) << "Write should not have been invoked";
EXPECT_EQ(inst.lock_invoke_count(), 0) << "Lock should not have been invoked";
}
/**
* This test verifies that a single write occurs if the supplied address and data length hits the edge of the logical area.
*/
TEST_F(WearLevelingGeneral, WriteSuccess_BoundaryOK) {
auto& inst = MockBackingStore::Instance();
uint16_t test_val = 0x14;
EXPECT_EQ(wear_leveling_write(WEAR_LEVELING_LOGICAL_SIZE - sizeof(test_val), &test_val, sizeof(test_val)), WEAR_LEVELING_SUCCESS) << "Overall write operation should have succeeded";
EXPECT_EQ(inst.unlock_invoke_count(), 1) << "Unlock should have been invoked once";
EXPECT_EQ(inst.erase_invoke_count(), 0) << "Erase should not have been invoked";
EXPECT_EQ(inst.write_invoke_count(), 2) << "Write should have been invoked twice";
EXPECT_EQ(inst.lock_invoke_count(), 1) << "Lock should have been invoked once";
}
/**
* This test verifies that no invocations occur if the supplied address and length would generate writes outside the logical range.
*/
TEST_F(WearLevelingGeneral, WriteFailure_BoundaryOverflow) {
auto& inst = MockBackingStore::Instance();
uint16_t test_val = 0x14;
EXPECT_EQ(wear_leveling_write(WEAR_LEVELING_LOGICAL_SIZE - sizeof(test_val) + 1, &test_val, sizeof(test_val)), WEAR_LEVELING_FAILED) << "Overall write operation should have failed";
EXPECT_EQ(inst.unlock_invoke_count(), 0) << "Unlock should not have been invoked";
EXPECT_EQ(inst.erase_invoke_count(), 0) << "Erase should not have been invoked";
EXPECT_EQ(inst.write_invoke_count(), 0) << "Write should not have been invoked";
EXPECT_EQ(inst.lock_invoke_count(), 0) << "Lock should not have been invoked";
}
/**
* This test verifies that no invocations occur if the supplied address is out of range while reading.
*/
TEST_F(WearLevelingGeneral, ReadFailure_OOB) {
auto& inst = MockBackingStore::Instance();
uint8_t test_val = 0;
EXPECT_EQ(wear_leveling_read(0x21349830, &test_val, sizeof(test_val)), WEAR_LEVELING_FAILED) << "Overall read operation should have failed";
EXPECT_EQ(inst.unlock_invoke_count(), 0) << "Unlock should not have been invoked";
EXPECT_EQ(inst.erase_invoke_count(), 0) << "Erase should not have been invoked";
EXPECT_EQ(inst.write_invoke_count(), 0) << "Write should not have been invoked";
EXPECT_EQ(inst.lock_invoke_count(), 0) << "Lock should not have been invoked";
}
/**
* This test verifies that no write invocations occur if `backing_store_unlock()` fails.
*/
TEST_F(WearLevelingGeneral, UnlockFailure_NoWrite) {
auto& inst = MockBackingStore::Instance();
inst.set_unlock_callback([](std::uint64_t count) { return false; });
uint8_t test_val = 0x14;
EXPECT_EQ(wear_leveling_write(0x04, &test_val, sizeof(test_val)), WEAR_LEVELING_FAILED) << "Overall write operation should have failed";
EXPECT_EQ(inst.unlock_invoke_count(), 1) << "Unlock should have been invoked once";
EXPECT_EQ(inst.erase_invoke_count(), 0) << "Erase should not have been invoked";
EXPECT_EQ(inst.write_invoke_count(), 0) << "Write should not have been invoked";
EXPECT_EQ(inst.lock_invoke_count(), 0) << "Lock should not have been invoked";
test_val = 0;
wear_leveling_read(0x04, &test_val, sizeof(test_val));
EXPECT_EQ(test_val, 0x14) << "Readback should come from cache regardless of unlock failure";
}
/**
* This test verifies that no erase invocations occur if `backing_store_unlock()` fails.
*/
TEST_F(WearLevelingGeneral, UnlockFailure_NoErase) {
auto& inst = MockBackingStore::Instance();
inst.set_unlock_callback([](std::uint64_t count) { return false; });
EXPECT_EQ(wear_leveling_erase(), WEAR_LEVELING_FAILED) << "Overall erase operation should have failed";
EXPECT_EQ(inst.unlock_invoke_count(), 1) << "Unlock should have been invoked once";
EXPECT_EQ(inst.erase_invoke_count(), 0) << "Erase should not have been invoked";
EXPECT_EQ(inst.write_invoke_count(), 0) << "Write should not have been invoked";
EXPECT_EQ(inst.lock_invoke_count(), 0) << "Lock should not have been invoked";
}
/**
* This test verifies that only one write invocation occurs if `backing_store_write()` fails.
*/
TEST_F(WearLevelingGeneral, WriteFailure_NoSubsequentWrites) {
auto& inst = MockBackingStore::Instance();
inst.set_write_callback([](std::uint64_t count, std::uint32_t address) { return false; });
uint8_t test_val = 0x14;
EXPECT_EQ(wear_leveling_write(0x04, &test_val, sizeof(test_val)), WEAR_LEVELING_FAILED) << "Overall write operation should have failed";
EXPECT_EQ(inst.unlock_invoke_count(), 1) << "Unlock should have been invoked once";
EXPECT_EQ(inst.erase_invoke_count(), 0) << "Erase should not have been invoked";
EXPECT_EQ(inst.write_invoke_count(), 1) << "Write should have been invoked once";
EXPECT_EQ(inst.lock_invoke_count(), 1) << "Lock should have been invoked once";
test_val = 0;
wear_leveling_read(0x04, &test_val, sizeof(test_val));
EXPECT_EQ(test_val, 0x14) << "Readback should come from cache regardless of unlock failure";
}

+ 779
- 0
quantum/wear_leveling/wear_leveling.c View File

@ -0,0 +1,779 @@
// Copyright 2022 Nick Brassel (@tzarc)
// SPDX-License-Identifier: GPL-2.0-or-later
#include <stdbool.h>
#include "fnv.h"
#include "wear_leveling.h"
#include "wear_leveling_internal.h"
/*
This wear leveling algorithm is adapted from algorithms from previous
implementations in QMK, namely:
- Artur F. (http://engsta.com/stm32-flash-memory-eeprom-emulator/)
- Yiancar -- QMK's base implementation for STM32F303
- Ilya Zhuravlev -- initial wear leveling algorithm
- Don Kjer -- increased flash density algorithm
- Nick Brassel (@tzarc) -- decoupled for use on other peripherals
At this layer, it is assumed that any reads/writes from the backing store
have a "reset state" after erasure of zero.
It is up to the backing store to perform translation of values, such as
taking the complement in order to deal with flash memory's reset value.
Terminology:
- Backing store: this is the storage area used by the wear leveling
algorithm.
- Backing size: this is the amount of storage provided by the backing
store for use by the wear leveling algorithm.
- Backing write size: this is the minimum number of bytes the backing
store can write in a single operation.
- Logical data: this is the externally-visible "emulated EEPROM" that
external subsystems "see" when performing reads/writes.
- Logical size: this is the amount of storage available for use
externally. Effectively, the "size of the EEPROM".
- Write log: this is a section of the backing store used to keep track
of modifications without overwriting existing data. This log is
"played back" on startup such that any subsequent reads are capable
of returning the latest data.
- Consolidated data: this is a section of the backing store reserved for
use for the latest copy of logical data. This is only ever written
when the write log is full -- the latest values for the logical data
are written here and the write log is cleared.
Configurables:
- BACKING_STORE_WRITE_SIZE: The number of bytes requires for a write
operation. This is defined by the capabilities of the backing store.
- WEAR_LEVELING_BACKING_SIZE: The number of bytes provided by the
backing store for use by the wear leveling algorithm. This is
defined by the capabilities of the backing store. This value must
also be at least twice the size of the logical size, as well as a
multiple of the logical size.
- WEAR_LEVELING_LOGICAL_SIZE: The number of bytes externally visible
to other subsystems performing reads/writes. This must be a multiple
of the write size.
General algorithm:
During initialization:
* The contents of the consolidated data section are read into cache.
* The contents of the write log are "played back" and update the
cache accordingly.
During reads:
* Logical data is served from the cache.
During writes:
* The cache is updated with the new data.
* A new write log entry is appended to the log.
* If the log's full, data is consolidated and the write log cleared.
Write log structure:
The first 8 bytes of the write log are a FNV1a_64 hash of the contents
of the consolidated data area, in an attempt to detect and guard against
any data corruption.
The write log follows the hash:
Given that the algorithm needs to cater for 2-, 4-, and 8-byte writes,
a variable-length write log entry is used such that the minimal amount
of storage is used based off the backing store write size.
Firstly, an empty log entry is expected to be all zeros. If the backing
store uses 0xFF for cleared bytes, it should return the complement, such
that this wear-leveling algorithm "receives" zeros.
For multi-byte writes, up to 8 bytes will be used for each log entry,
depending on the size of backing store writes:
Multi-byte Log Entry (2, 4-byte)
00XXXYYYYYYYYYYYYYYYYYYYAAAAAAAA
LenAdd Address AddressValue[0]
Multi-byte Log Entry (2-byte)
00XXXYYYYYYYYYYYYYYYYYYYAAAAAAAABBBBBBBBCCCCCCCC
LenAdd Address AddressValue[0]Value[1]Value[2]
Multi-byte Log Entry (2, 4, 8-byte)
00XXXYYYYYYYYYYYYYYYYYYYAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDDEEEEEEEE
LenAdd Address AddressValue[0]Value[1]Value[2]Value[3]Value[4]
19 bits are used for the address, which allows for a max logical size of
512kB. Up to 5 bytes can be included in a single log entry.
For 2-byte backing store writes, the last two bytes are optional
depending on the length of data to be written. Accordingly, either 3
or 4 backing store write operations will occur.
For 4-byte backing store writes, either one or two write operations
occur, depending on the length.
For 8-byte backing store writes, one write operation occur.
2-byte backing store optimizations:
For single byte writes, addresses between 0...63 are encoded in a single
backing store write operation. 4- and 8-byte backing stores do not have
this optimization as it does not minimize the number of bytes written.
Byte-Entry
01XXXXXXYYYYYYYY
Address Value
0 <= Address < 0x40 (64)
A second optimization takes into account uint16_t writes of 0 or 1,
specifically catering for KC_NO and KC_TRANSPARENT in the dynamic keymap
subsystem. This is valid only for the first 16kB of logical data --
addresses outside this range will use the multi-byte encoding above.
U16-Encoded 0
100XXXXXXXXXXXXX
Address >> 1
Value: 0
0 <= Address <= 0x3FFE (16382)
U16-Encoded 1
101XXXXXXXXXXXXX
Address >> 1
Value: 1
0 <= Address <= 0x3FFE (16382) */
/**
* Storage area for the wear-leveling cache.
*/
static struct __attribute__((__aligned__(BACKING_STORE_WRITE_SIZE))) {
__attribute__((__aligned__(BACKING_STORE_WRITE_SIZE))) uint8_t cache[(WEAR_LEVELING_LOGICAL_SIZE)];
uint32_t write_address;
bool unlocked;
} wear_leveling;
/**
* Locking helper: status
*/
typedef enum backing_store_lock_status_t { STATUS_FAILURE = 0, STATUS_SUCCESS, STATUS_UNCHANGED } backing_store_lock_status_t;
/**
* Locking helper: unlock
*/
static inline backing_store_lock_status_t wear_leveling_unlock(void) {
if (wear_leveling.unlocked) {
return STATUS_UNCHANGED;
}
if (!backing_store_unlock()) {
return STATUS_FAILURE;
}
wear_leveling.unlocked = true;
return STATUS_SUCCESS;
}
/**
* Locking helper: lock
*/
static inline backing_store_lock_status_t wear_leveling_lock(void) {
if (!wear_leveling.unlocked) {
return STATUS_UNCHANGED;
}
if (!backing_store_lock()) {
return STATUS_FAILURE;
}
wear_leveling.unlocked = false;
return STATUS_SUCCESS;
}
/**
* Resets the cache, ensuring the write address is correctly initialised.
*/
static void wear_leveling_clear_cache(void) {
memset(wear_leveling.cache, 0, (WEAR_LEVELING_LOGICAL_SIZE));
wear_leveling.write_address = (WEAR_LEVELING_LOGICAL_SIZE) + 8; // +8 is due to the FNV1a_64 of the consolidated buffer
}
/**
* Reads the consolidated data from the backing store into the cache.
* Does not consider the write log.
*/
static wear_leveling_status_t wear_leveling_read_consolidated(void) {
wl_dprintf("Reading consolidated data\n");
wear_leveling_status_t status = WEAR_LEVELING_SUCCESS;
for (int address = 0; address < (WEAR_LEVELING_LOGICAL_SIZE); address += (BACKING_STORE_WRITE_SIZE)) {
backing_store_int_t *const loc = (backing_store_int_t *)&wear_leveling.cache[address];
backing_store_int_t temp;
bool ok = backing_store_read(address, &temp);
if (!ok) {
wl_dprintf("Failed to read from backing store\n");
status = WEAR_LEVELING_FAILED;
break;
}
*loc = temp;
}
// Verify the FNV1a_64 result
if (status != WEAR_LEVELING_FAILED) {
uint64_t expected = fnv_64a_buf(wear_leveling.cache, (WEAR_LEVELING_LOGICAL_SIZE), FNV1A_64_INIT);
write_log_entry_t entry;
#if BACKING_STORE_WRITE_SIZE == 2
backing_store_read((WEAR_LEVELING_LOGICAL_SIZE) + 0, &entry.raw16[0]);
backing_store_read((WEAR_LEVELING_LOGICAL_SIZE) + 2, &entry.raw16[1]);
backing_store_read((WEAR_LEVELING_LOGICAL_SIZE) + 4, &entry.raw16[2]);
backing_store_read((WEAR_LEVELING_LOGICAL_SIZE) + 6, &entry.raw16[3]);
#elif BACKING_STORE_WRITE_SIZE == 4
backing_store_read((WEAR_LEVELING_LOGICAL_SIZE) + 0, &entry.raw32[0]);
backing_store_read((WEAR_LEVELING_LOGICAL_SIZE) + 4, &entry.raw32[1]);
#elif BACKING_STORE_WRITE_SIZE == 8
backing_store_read((WEAR_LEVELING_LOGICAL_SIZE) + 0, &entry.raw64);
#endif
// If we have a mismatch, clear the cache but do not flag a failure,
// which will cater for the completely clean MCU case.
if (entry.raw64 != expected) {
wear_leveling_clear_cache();
}
}
// If we failed for any reason, then clear the cache
if (status == WEAR_LEVELING_FAILED) {
wear_leveling_clear_cache();
}
return status;
}
/**
* Writes the current cache to consolidated data at the beginning of the backing store.
* Does not clear the write log.
*/
static wear_leveling_status_t wear_leveling_write_consolidated(void) {
wl_dprintf("Writing consolidated data\n");
wear_leveling_status_t status = WEAR_LEVELING_CONSOLIDATED;
backing_store_lock_status_t lock_status = wear_leveling_unlock();
for (int address = 0; address < (WEAR_LEVELING_LOGICAL_SIZE); address += (BACKING_STORE_WRITE_SIZE)) {
const backing_store_int_t value = *(backing_store_int_t *)&wear_leveling.cache[address];
backing_store_int_t temp;
bool ok = backing_store_read(address, &temp);
if (!ok) {
wl_dprintf("Failed to read from backing store\n");
status = WEAR_LEVELING_FAILED;
break;
}
if (temp != value) {
ok = backing_store_write(address, value);
if (!ok) {
wl_dprintf("Failed to write to backing store\n");
status = WEAR_LEVELING_FAILED;
break;
}
}
}
if (status != WEAR_LEVELING_FAILED) {
// Write out the FNV1a_64 result of the consolidated data
write_log_entry_t entry;
entry.raw64 = fnv_64a_buf(wear_leveling.cache, (WEAR_LEVELING_LOGICAL_SIZE), FNV1A_64_INIT);
do {
#if BACKING_STORE_WRITE_SIZE == 2
if (!backing_store_write((WEAR_LEVELING_LOGICAL_SIZE) + 0, entry.raw16[0])) {
status = WEAR_LEVELING_FAILED;
break;
}
if (!backing_store_write((WEAR_LEVELING_LOGICAL_SIZE) + 2, entry.raw16[1])) {
status = WEAR_LEVELING_FAILED;
break;
}
if (!backing_store_write((WEAR_LEVELING_LOGICAL_SIZE) + 4, entry.raw16[2])) {
status = WEAR_LEVELING_FAILED;
break;
}
if (!backing_store_write((WEAR_LEVELING_LOGICAL_SIZE) + 6, entry.raw16[3])) {
status = WEAR_LEVELING_FAILED;
break;
}
#elif BACKING_STORE_WRITE_SIZE == 4
if (!backing_store_write((WEAR_LEVELING_LOGICAL_SIZE) + 0, entry.raw32[0])) {
status = WEAR_LEVELING_FAILED;
break;
}
if (!backing_store_write((WEAR_LEVELING_LOGICAL_SIZE) + 4, entry.raw32[1])) {
status = WEAR_LEVELING_FAILED;
break;
}
#elif BACKING_STORE_WRITE_SIZE == 8
if (!backing_store_write((WEAR_LEVELING_LOGICAL_SIZE) + 0, entry.raw64)) {
status = WEAR_LEVELING_FAILED;
break;
}
#endif
} while (0);
}
if (lock_status == STATUS_SUCCESS) {
wear_leveling_lock();
}
return status;
}
/**
* Forces a write of the current cache.
* Erases the backing store, including the write log.
* During this operation, there is the potential for data loss if a power loss occurs.
*/
static wear_leveling_status_t wear_leveling_consolidate_force(void) {
wl_dprintf("Erasing backing store\n");
// Erase the backing store. Expectation is that any un-written values that are read back after this call come back as zero.
bool ok = backing_store_erase();
if (!ok) {
wl_dprintf("Failed to erase backing store\n");
return WEAR_LEVELING_FAILED;
}
// Write the cache to the first section of the backing store.
wear_leveling_status_t status = wear_leveling_write_consolidated();
if (status == WEAR_LEVELING_FAILED) {
wl_dprintf("Failed to write consolidated data\n");
}
// Next write of the log occurs after the consolidated values at the start of the backing store.
wear_leveling.write_address = (WEAR_LEVELING_LOGICAL_SIZE) + 8; // +8 due to the FNV1a_64 of the consolidated area
return status;
}
/**
* Potential write of the current cache to the backing store.
* Skipped if the current write log position is not at the end of the backing store.
* During this operation, there is the potential for data loss if a power loss occurs.
*
* @return true if consolidation occurred
*/
static wear_leveling_status_t wear_leveling_consolidate_if_needed(void) {
if (wear_leveling.write_address >= (WEAR_LEVELING_BACKING_SIZE)) {
return wear_leveling_consolidate_force();
}
return WEAR_LEVELING_SUCCESS;
}
/**
* Appends the supplied fixed-width entry to the write log, optionally consolidating if the log is full.
*
* @return true if consolidation occurred
*/
static wear_leveling_status_t wear_leveling_append_raw(backing_store_int_t value) {
bool ok = backing_store_write(wear_leveling.write_address, value);
if (!ok) {
wl_dprintf("Failed to write to backing store\n");
return WEAR_LEVELING_FAILED;
}
wear_leveling.write_address += (BACKING_STORE_WRITE_SIZE);
return wear_leveling_consolidate_if_needed();
}
/**
* Handles writing multi_byte-encoded data to the backing store.
*
* @return true if consolidation occurred
*/
static wear_leveling_status_t wear_leveling_write_raw_multibyte(uint32_t address, const void *value, size_t length) {
const uint8_t * p = value;
write_log_entry_t log = LOG_ENTRY_MAKE_MULTIBYTE(address, length);
for (size_t i = 0; i < length; ++i) {
log.raw8[3 + i] = p[i];
}
// Write to the backing store. See the multi-byte log format in the documentation header at the top of the file.
wear_leveling_status_t status;
#if BACKING_STORE_WRITE_SIZE == 2
status = wear_leveling_append_raw(log.raw16[0]);
if (status != WEAR_LEVELING_SUCCESS) {
return status;
}
status = wear_leveling_append_raw(log.raw16[1]);
if (status != WEAR_LEVELING_SUCCESS) {
return status;
}
if (length > 1) {
status = wear_leveling_append_raw(log.raw16[2]);
if (status != WEAR_LEVELING_SUCCESS) {
return status;
}
}
if (length > 3) {
status = wear_leveling_append_raw(log.raw16[3]);
if (status != WEAR_LEVELING_SUCCESS) {
return status;
}
}
#elif BACKING_STORE_WRITE_SIZE == 4
status = wear_leveling_append_raw(log.raw32[0]);
if (status != WEAR_LEVELING_SUCCESS) {
return status;
}
if (length > 1) {
status = wear_leveling_append_raw(log.raw32[1]);
if (status != WEAR_LEVELING_SUCCESS) {
return status;
}
}
#elif BACKING_STORE_WRITE_SIZE == 8
status = wear_leveling_append_raw(log.raw64);
if (status != WEAR_LEVELING_SUCCESS) {
return status;
}
#endif
return status;
}
/**
* Handles the actual writing of logical data into the write log section of the backing store.
*/
static wear_leveling_status_t wear_leveling_write_raw(uint32_t address, const void *value, size_t length) {
const uint8_t * p = value;
size_t remaining = length;
wear_leveling_status_t status = WEAR_LEVELING_SUCCESS;
while (remaining > 0) {
#if BACKING_STORE_WRITE_SIZE == 2
// Small-write optimizations - uint16_t, 0 or 1, address is even, address <16384:
if (remaining >= 2 && address % 2 == 0 && address < 16384) {
const uint16_t v = *(const uint16_t *)p;
if (v == 0 || v == 1) {
const write_log_entry_t log = LOG_ENTRY_MAKE_WORD_01(address, v);
status = wear_leveling_append_raw(log.raw16[0]);
if (status != WEAR_LEVELING_SUCCESS) {
// If consolidation occurred, then the cache has already been written to the consolidated area. No need to continue.
// If a failure occurred, pass it on.
return status;
}
remaining -= 2;
address += 2;
p += 2;
continue;
}
}
// Small-write optimizations - address<64:
if (address < 64) {
const write_log_entry_t log = LOG_ENTRY_MAKE_OPTIMIZED_64(address, *p);
status = wear_leveling_append_raw(log.raw16[0]);
if (status != WEAR_LEVELING_SUCCESS) {
// If consolidation occurred, then the cache has already been written to the consolidated area. No need to continue.
// If a failure occurred, pass it on.
return status;
}
remaining--;
address++;
p++;
continue;
}
#endif // BACKING_STORE_WRITE_SIZE == 2
const size_t this_length = remaining >= LOG_ENTRY_MULTIBYTE_MAX_BYTES ? LOG_ENTRY_MULTIBYTE_MAX_BYTES : remaining;
status = wear_leveling_write_raw_multibyte(address, p, this_length);
if (status != WEAR_LEVELING_SUCCESS) {
// If consolidation occurred, then the cache has already been written to the consolidated area. No need to continue.
// If a failure occurred, pass it on.
return status;
}
remaining -= this_length;
address += (uint32_t)this_length;
p += this_length;
}
return status;
}
/**
* "Replays" the write log from the backing store, updating the local cache with updated values.
*/
static wear_leveling_status_t wear_leveling_playback_log(void) {
wl_dprintf("Playback write log\n");
wear_leveling_status_t status = WEAR_LEVELING_SUCCESS;
bool cancel_playback = false;
uint32_t address = (WEAR_LEVELING_LOGICAL_SIZE) + 8; // +8 due to the FNV1a_64 of the consolidated area
while (!cancel_playback && address < (WEAR_LEVELING_BACKING_SIZE)) {
backing_store_int_t value;
bool ok = backing_store_read(address, &value);
if (!ok) {
wl_dprintf("Failed to load from backing store, skipping playback of write log\n");
cancel_playback = true;
status = WEAR_LEVELING_FAILED;
break;
}
if (value == 0) {
wl_dprintf("Found empty slot, no more log entries\n");
cancel_playback = true;
break;
}
// If we got a nonzero value, then we need to increment the address to ensure next write occurs at next location
address += (BACKING_STORE_WRITE_SIZE);
// Read from the write log
write_log_entry_t log;
#if BACKING_STORE_WRITE_SIZE == 2
log.raw16[0] = value;
#elif BACKING_STORE_WRITE_SIZE == 4
log.raw32[0] = value;
#elif BACKING_STORE_WRITE_SIZE == 8
log.raw64 = value;
#endif
switch (LOG_ENTRY_GET_TYPE(log)) {
case LOG_ENTRY_TYPE_MULTIBYTE: {
#if BACKING_STORE_WRITE_SIZE == 2
ok = backing_store_read(address, &log.raw16[1]);
if (!ok) {
wl_dprintf("Failed to load from backing store, skipping playback of write log\n");
cancel_playback = true;
status = WEAR_LEVELING_FAILED;
break;
}
address += (BACKING_STORE_WRITE_SIZE);
#endif // BACKING_STORE_WRITE_SIZE == 2
const uint32_t a = LOG_ENTRY_MULTIBYTE_GET_ADDRESS(log);
const uint8_t l = LOG_ENTRY_MULTIBYTE_GET_LENGTH(log);
if (a + l > (WEAR_LEVELING_LOGICAL_SIZE)) {
cancel_playback = true;
status = WEAR_LEVELING_FAILED;
break;
}
#if BACKING_STORE_WRITE_SIZE == 2
if (l > 1) {
ok = backing_store_read(address, &log.raw16[2]);
if (!ok) {
wl_dprintf("Failed to load from backing store, skipping playback of write log\n");
cancel_playback = true;
status = WEAR_LEVELING_FAILED;
break;
}
address += (BACKING_STORE_WRITE_SIZE);
}
if (l > 3) {
ok = backing_store_read(address, &log.raw16[3]);
if (!ok) {
wl_dprintf("Failed to load from backing store, skipping playback of write log\n");
cancel_playback = true;
status = WEAR_LEVELING_FAILED;
break;
}
address += (BACKING_STORE_WRITE_SIZE);
}
#elif BACKING_STORE_WRITE_SIZE == 4
if (l > 1) {
ok = backing_store_read(address, &log.raw32[1]);
if (!ok) {
wl_dprintf("Failed to load from backing store, skipping playback of write log\n");
cancel_playback = true;
status = WEAR_LEVELING_FAILED;
break;
}
address += (BACKING_STORE_WRITE_SIZE);
}
#endif
memcpy(&wear_leveling.cache[a], &log.raw8[3], l);
} break;
#if BACKING_STORE_WRITE_SIZE == 2
case LOG_ENTRY_TYPE_OPTIMIZED_64: {
const uint32_t a = LOG_ENTRY_OPTIMIZED_64_GET_ADDRESS(log);
const uint8_t v = LOG_ENTRY_OPTIMIZED_64_GET_VALUE(log);
if (a >= (WEAR_LEVELING_LOGICAL_SIZE)) {
cancel_playback = true;
status = WEAR_LEVELING_FAILED;
break;
}
wear_leveling.cache[a] = v;
} break;
case LOG_ENTRY_TYPE_WORD_01: {
const uint32_t a = LOG_ENTRY_WORD_01_GET_ADDRESS(log);
const uint8_t v = LOG_ENTRY_WORD_01_GET_VALUE(log);
if (a + 1 >= (WEAR_LEVELING_LOGICAL_SIZE)) {
cancel_playback = true;
status = WEAR_LEVELING_FAILED;
break;
}
wear_leveling.cache[a + 0] = v;
wear_leveling.cache[a + 1] = 0;
} break;
#endif // BACKING_STORE_WRITE_SIZE == 2
default: {
cancel_playback = true;
status = WEAR_LEVELING_FAILED;
} break;
}
}
// We've reached the end of the log, so we're at the new write location
wear_leveling.write_address = address;
if (status == WEAR_LEVELING_FAILED) {
// If we had a failure during readback, assume we're corrupted -- force a consolidation with the data we already have
status = wear_leveling_consolidate_force();
} else {
// Consolidate the cache + write log if required
status = wear_leveling_consolidate_if_needed();
}
return status;
}
/**
* Wear-leveling initialization
*/
wear_leveling_status_t wear_leveling_init(void) {
wl_dprintf("Init\n");
// Reset the cache
wear_leveling_clear_cache();
// Initialise the backing store
if (!backing_store_init()) {
// If it failed, clear the cache and return with failure
wear_leveling_clear_cache();
return WEAR_LEVELING_FAILED;
}
// Read the previous consolidated values, then replay the existing write log so that the cache has the "live" values
wear_leveling_status_t status = wear_leveling_read_consolidated();
if (status == WEAR_LEVELING_FAILED) {
// If it failed, clear the cache and return with failure
wear_leveling_clear_cache();
return status;
}
status = wear_leveling_playback_log();
if (status == WEAR_LEVELING_FAILED) {
// If it failed, clear the cache and return with failure
wear_leveling_clear_cache();
return status;
}
return status;
}
/**
* Wear-leveling erase.
* Post-condition: any reads from the backing store directly after an erase operation must come back as zero.
*/
wear_leveling_status_t wear_leveling_erase(void) {
wl_dprintf("Erase\n");
// Unlock the backing store
backing_store_lock_status_t lock_status = wear_leveling_unlock();
if (lock_status == STATUS_FAILURE) {
wear_leveling_lock();
return WEAR_LEVELING_FAILED;
}
// Perform the erase
bool ret = backing_store_erase();
wear_leveling_clear_cache();
// Lock the backing store if we acquired the lock successfully
if (lock_status == STATUS_SUCCESS) {
ret &= (wear_leveling_lock() != STATUS_FAILURE);
}
return ret ? WEAR_LEVELING_SUCCESS : WEAR_LEVELING_FAILED;
}
/**
* Writes logical data into the backing store. Skips writes if there are no changes to values.
*/
wear_leveling_status_t wear_leveling_write(const uint32_t address, const void *value, size_t length) {
wl_assert(address + length <= (WEAR_LEVELING_LOGICAL_SIZE));
if (address + length > (WEAR_LEVELING_LOGICAL_SIZE)) {
return WEAR_LEVELING_FAILED;
}
wl_dprintf("Write ");
wl_dump(address, value, length);
// Skip write if there's no change compared to the current cached value
if (memcmp(value, &wear_leveling.cache[address], length) == 0) {
return true;
}
// Update the cache before writing to the backing store -- if we hit the end of the backing store during writes to the log then we'll force a consolidation in-line
memcpy(&wear_leveling.cache[address], value, length);
// Unlock the backing store
backing_store_lock_status_t lock_status = wear_leveling_unlock();
if (lock_status == STATUS_FAILURE) {
wear_leveling_lock();
return WEAR_LEVELING_FAILED;
}
// Perform the actual write
wear_leveling_status_t status = wear_leveling_write_raw(address, value, length);
switch (status) {
case WEAR_LEVELING_CONSOLIDATED:
case WEAR_LEVELING_FAILED:
// If the write triggered consolidation, or the write failed, then nothing else needs to occur.
break;
case WEAR_LEVELING_SUCCESS:
// Consolidate the cache + write log if required
status = wear_leveling_consolidate_if_needed();
break;
default:
// Unsure how we'd get here...
status = WEAR_LEVELING_FAILED;
break;
}
if (lock_status == STATUS_SUCCESS) {
if (wear_leveling_lock() == STATUS_FAILURE) {
status = WEAR_LEVELING_FAILED;
}
}
return status;
}
/**
* Reads logical data from the cache.
*/
wear_leveling_status_t wear_leveling_read(const uint32_t address, void *value, size_t length) {
wl_assert(address + length <= (WEAR_LEVELING_LOGICAL_SIZE));
if (address + length > (WEAR_LEVELING_LOGICAL_SIZE)) {
return WEAR_LEVELING_FAILED;
}
// Only need to copy from the cache
memcpy(value, &wear_leveling.cache[address], length);
wl_dprintf("Read ");
wl_dump(address, value, length);
return WEAR_LEVELING_SUCCESS;
}

+ 54
- 0
quantum/wear_leveling/wear_leveling.h View File

@ -0,0 +1,54 @@
// Copyright 2022 Nick Brassel (@tzarc)
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <stdint.h>
#include <stdlib.h>
/**
* @typedef Status returned from any wear-leveling API.
*/
typedef enum wear_leveling_status_t {
WEAR_LEVELING_FAILED, //< Invocation failed
WEAR_LEVELING_SUCCESS, //< Invocation succeeded
WEAR_LEVELING_CONSOLIDATED //< Invocation succeeded, consolidation occurred
} wear_leveling_status_t;
/**
* Wear-leveling initialization
*
* @return Status of the request
*/
wear_leveling_status_t wear_leveling_init(void);
/**
* Wear-leveling erasure.
*
* Clears the wear-leveling area, with the definition that the "reset state" of all data is zero.
*
* @return Status of the request
*/
wear_leveling_status_t wear_leveling_erase(void);
/**
* Writes logical data into the backing store.
*
* Skips writes if there are no changes to written values. The entire written block is considered when attempting to
* determine if an overwrite should occur -- if there is any data mismatch the entire block will be written to the log,
* not just the changed bytes.
*
* @param address[in] the logical address to write data
* @param value[in] pointer to the source buffer
* @param length[in] length of the data
* @return Status of the request
*/
wear_leveling_status_t wear_leveling_write(uint32_t address, const void* value, size_t length);
/**
* Reads logical data from the cache.
*
* @param address[in] the logical address to read data
* @param value[out] pointer to the destination buffer
* @param length[in] length of the data
* @return Status of the request
*/
wear_leveling_status_t wear_leveling_read(uint32_t address, void* value, size_t length);

+ 145
- 0
quantum/wear_leveling/wear_leveling_internal.h View File

@ -0,0 +1,145 @@
// Copyright 2022 Nick Brassel (@tzarc)
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#ifdef __cplusplus
# define _Static_assert static_assert
#endif
#include <stdint.h>
#include <string.h>
#if BACKING_STORE_WRITE_SIZE == 2
typedef uint16_t backing_store_int_t;
#elif BACKING_STORE_WRITE_SIZE == 4
typedef uint32_t backing_store_int_t;
#elif BACKING_STORE_WRITE_SIZE == 8
typedef uint64_t backing_store_int_t;
#else
# error Invalid BACKING_STORE_WRITE_SIZE, needs to be 2/4/8.
#endif
#ifndef WEAR_LEVELING_BACKING_SIZE
# error WEAR_LEVELING_BACKING_SIZE was not set.
#endif
#ifndef WEAR_LEVELING_LOGICAL_SIZE
# error WEAR_LEVELING_LOGICAL_SIZE was not set.
#endif
#ifdef WEAR_LEVELING_DEBUG_OUTPUT
# include <stdio.h>
# define wl_dprintf(...) printf("Wear leveling: " __VA_ARGS__)
# define wl_dump(address, value, length) \
do { \
printf("[0x%04X]: ", (int)(address)); \
const uint8_t* p = (const uint8_t*)(value); \
for (int i = 0; i < (length); ++i) { \
printf(" %02X", (int)p[i]); \
} \
printf("\n"); \
} while (0)
#else
# define wl_dprintf(...) \
do { \
} while (0)
# define wl_dump(...) \
do { \
} while (0)
#endif // WEAR_LEVELING_DEBUG_OUTPUT
#ifdef WEAR_LEVELING_ASSERTS
# include <assert.h>
# define wl_assert(...) assert(__VA_ARGS__)
#else
# define wl_assert(...) \
do { \
} while (0)
#endif // WEAR_LEVELING_ASSERTS
// Compile-time validation of configurable options
_Static_assert(WEAR_LEVELING_BACKING_SIZE >= (WEAR_LEVELING_LOGICAL_SIZE * 2), "Total backing size must be at least twice the size of the logical size");
_Static_assert(WEAR_LEVELING_LOGICAL_SIZE % BACKING_STORE_WRITE_SIZE == 0, "Logical size must be a multiple of write size");
_Static_assert(WEAR_LEVELING_BACKING_SIZE % WEAR_LEVELING_LOGICAL_SIZE == 0, "Backing size must be a multiple of logical size");
// Backing Store API, to be implemented elsewhere by flash driver etc.
bool backing_store_init(void);
bool backing_store_unlock(void);
bool backing_store_erase(void);
bool backing_store_write(uint32_t address, backing_store_int_t value);
bool backing_store_lock(void);
bool backing_store_read(uint32_t address, backing_store_int_t* value);
/**
* Helper type used to contain a write log entry.
*/
typedef union write_log_entry_t {
uint64_t raw64;
uint32_t raw32[2];
uint16_t raw16[4];
uint8_t raw8[8];
} write_log_entry_t;
_Static_assert(sizeof(write_log_entry_t) == 8, "Wear leveling write log entry size was not 8");
/**
* Log entry type discriminator.
*/
enum {
// 0x00 -- Multi-byte storage type
LOG_ENTRY_TYPE_MULTIBYTE,
// 0x01 -- 2-byte backing store write optimization: address < 64
LOG_ENTRY_TYPE_OPTIMIZED_64,
// 0x02 -- 2-byte backing store write optimization: word-encoded 0/1 values
LOG_ENTRY_TYPE_WORD_01,
LOG_ENTRY_TYPES
};
_Static_assert(LOG_ENTRY_TYPES <= (1 << 2), "Too many log entry types to fit into 2 bits of storage");
#define BITMASK_FOR_BITCOUNT(n) ((1 << (n)) - 1)
#define LOG_ENTRY_GET_TYPE(entry) (((entry).raw8[0] >> 6) & BITMASK_FOR_BITCOUNT(2))
#define LOG_ENTRY_MULTIBYTE_MAX_BYTES 5
#define LOG_ENTRY_MULTIBYTE_GET_ADDRESS(entry) (((((uint32_t)((entry).raw8[0])) & BITMASK_FOR_BITCOUNT(3)) << 16) | (((uint32_t)((entry).raw8[1])) << 8) | (entry).raw8[2])
#define LOG_ENTRY_MULTIBYTE_GET_LENGTH(entry) ((uint8_t)(((entry).raw8[0] >> 3) & BITMASK_FOR_BITCOUNT(3)))
#define LOG_ENTRY_MAKE_MULTIBYTE(address, length) \
(write_log_entry_t) { \
.raw8 = { \
[0] = (((((uint8_t)LOG_ENTRY_TYPE_MULTIBYTE) & BITMASK_FOR_BITCOUNT(2)) << 6) /* type */ \
| ((((uint8_t)(length)) & BITMASK_FOR_BITCOUNT(3)) << 3) /* length */ \
| ((((uint8_t)((address) >> 16))) & BITMASK_FOR_BITCOUNT(3)) /* address */ \
), \
[1] = (((uint8_t)((address) >> 8)) & BITMASK_FOR_BITCOUNT(8)), /* address */ \
[2] = (((uint8_t)(address)) & BITMASK_FOR_BITCOUNT(8)), /* address */ \
} \
}
#define LOG_ENTRY_OPTIMIZED_64_GET_ADDRESS(entry) ((uint32_t)((entry).raw8[0] & BITMASK_FOR_BITCOUNT(6)))
#define LOG_ENTRY_OPTIMIZED_64_GET_VALUE(entry) ((entry).raw8[1])
#define LOG_ENTRY_MAKE_OPTIMIZED_64(address, value) \
(write_log_entry_t) { \
.raw8 = { \
[0] = (((((uint8_t)LOG_ENTRY_TYPE_OPTIMIZED_64) & BITMASK_FOR_BITCOUNT(2)) << 6) /* type */ \
| ((((uint8_t)(address))) & BITMASK_FOR_BITCOUNT(6)) /* address */ \
), \
[1] = ((uint8_t)(value)), /* value */ \
} \
}
#define LOG_ENTRY_WORD_01_GET_ADDRESS(entry) ((((uint32_t)(((entry).raw8[0]) & BITMASK_FOR_BITCOUNT(5))) << 9) | (((uint32_t)((entry).raw8[1])) << 1))
#define LOG_ENTRY_WORD_01_GET_VALUE(entry) ((uint8_t)((entry).raw8[0] >> 5) & BITMASK_FOR_BITCOUNT(1))
#define LOG_ENTRY_MAKE_WORD_01(address, value) \
(write_log_entry_t) { \
.raw8 = { \
[0] = (((((uint8_t)LOG_ENTRY_TYPE_WORD_01) & BITMASK_FOR_BITCOUNT(2)) << 6) /* type */ \
| (((((uint8_t)((value) ? 1 : 0))) & BITMASK_FOR_BITCOUNT(1)) << 5) /* value */ \
| ((((uint8_t)((address) >> 9))) & BITMASK_FOR_BITCOUNT(5)) /* address */ \
), \
[1] = (uint8_t)((address) >> 1), /* address */ \
} \
}

Loading…
Cancel
Save