Implementing FNV hash in swift - algorithm

I am trying to implement a version of FNV hash in swift. Here it is in Objective-C:
+ (uint32_t)hash:(uint8_t *)a length:(uint32_t)length
{
uint8_t *p;
uint32_t x;
p = a;
x = *p << 7;
for (int i=0; i<length; i++) {
x = (1000003 * x) ^ *p++;
x ^= length;
}
if (x == -1) {
x = -2;
}
return x;
}
Here is my attempt at porting it to swift:
func hashFNV(data: UInt8[]) -> UInt32 {
var x = data[0] << 7
for byte in data {
x *= 1000003
x ^= byte
x ^= data.count
}
if x == -1 {
x = -2
}
return x
}
It compiles but results in an error at runtime:
EXC_BAD_INSTRUCTION (code=EXC_I386_INVOP,subcode=0x0)
Same error when I try in the playground:
Playground execution failed: error: Execution was interrupted, reason: EXC_BAD_INSTRUCTION (code=EXC_I386_INVOP, subcode=0x0).
The process has been left at the point where it was interrupted, use "thread return -x" to return to the state before expression evaluation.
* thread #1: tid = 0x619fa, 0x000000010d119aad, queue = 'com.apple.main-thread', stop reason = EXC_BAD_INSTRUCTION (code=EXC_I386_INVOP, subcode=0x0)
* frame #0: 0x000000010d119aad
frame #1: 0x0000000100204880 libswift_stdlib_core.dylib`value witness table for Swift.Int + 160
I thought that maybe it was related to the overflow, but the following code also fails with the same error:
func hashFNV(data: UInt8[]) -> UInt32 {
var x = UInt32(data[0]) << 7
for byte in data {
x = 1000003 &* x
x ^= byte
x ^= data.count
}
if x == -1 {
x = -2
}
return x
}
EDIT:
Actually, shouldn't the fact that I am trying to assign -2 to x result in a compile error? I thought swift won't implicitly cast from what looks like Int (-2) to UInt32 (x).
Same with the x ^= byte line. byte should be UInt8 and x is UInt32.
EDIT 2:
This was a compile error (see comments below).
Fixed the compile error, still fails at runtime:
func hashFNV(data: UInt8[]) -> UInt32 {
var x = Int(data[0]) << 7
for byte in data {
x = 1000003 &* x
x ^= Int(byte)
x ^= data.count
}
if x == -1 {
x = -2
}
return UInt32(x)
}

If you are still looking for an implementation, here is mine. It is built much like the regular default Hasher from the standard lib.
struct HasherFNV1a {
private var hash: UInt = 14_695_981_039_346_656_037
private let prime: UInt = 1_099_511_628_211
mutating func combine<S: Sequence>(_ sequence: S) where S.Element == UInt8 {
for byte in sequence {
hash ^= UInt(byte)
hash = hash &* prime
}
}
func finalize() -> Int {
Int(truncatingIfNeeded: hash)
}
}
extension HasherFNV1a {
mutating func combine(_ string: String) {
combine(string.utf8)
}
mutating func combine(_ bool: Bool) {
combine(CollectionOfOne(bool ? 1 : 0))
}
}
Keep in mind that this is FNV1a, if you truly need FNV1 you can just switch the 2 lines in the loop around.

I found this GPL Swift implementation:
//
// FNVHash.swift
//
// A Swift implementation of the Fowler–Noll–Vo (FNV) hash function
// See http://www.isthe.com/chongo/tech/comp/fnv/
//
// Created by Mauricio Santos on 3/9/15.
import Foundation
// MARK:- Constants
private struct Constants {
// FNV parameters
#if arch(arm64) || arch(x86_64) // 64-bit
static let OffsetBasis: UInt = 14695981039346656037
static let FNVPrime: UInt = 1099511628211
#else // 32-bit
static let OffsetBasis: UInt = 2166136261
static let FNVPrime: UInt = 16777619
#endif
}
// MARK:- Public API
/// Calculates FNV-1 hash from a raw byte array.
public func fnv1(bytes: [UInt8]) -> UInt {
var hash = Constants.OffsetBasis
for byte in bytes {
hash = hash &* Constants.FNVPrime // &* means multiply with overflow
hash ^= UInt(byte)
}
return hash
}
/// Calculates FNV-1a hash from a raw byte array.
public func fnv1a(bytes: [UInt8]) -> UInt {
var hash = Constants.OffsetBasis
for byte in bytes {
hash ^= UInt(byte)
hash = hash &* Constants.FNVPrime
}
return hash
}
/// Calculates FNV-1 hash from a String using it's UTF8 representation.
public func fnv1(str: String) -> UInt {
return fnv1(bytesFromString(str))
}
/// Calculates FNV-1a hash from a String using it's UTF8 representation.
public func fnv1a(str: String) -> UInt {
return fnv1a(bytesFromString(str))
}
/// Calculates FNV-1 hash from an integer type.
public func fnv1<T: IntegerType>(value: T) -> UInt {
return fnv1(bytesFromNumber(value))
}
/// Calculates FNV-1a hash from an integer type.
public func fnv1a<T: IntegerType>(value: T) -> UInt {
return fnv1a(bytesFromNumber(value))
}
/// Calculates FNV-1 hash from a floating point type.
public func fnv1<T: FloatingPointType>(value: T) -> UInt {
return fnv1(bytesFromNumber(value))
}
/// Calculates FNV-1a hash from a floating point type.
public func fnv1a<T: FloatingPointType>(value: T) -> UInt {
return fnv1a(bytesFromNumber(value))
}
// MARK:- Private helper functions
private func bytesFromString(str: String) -> [UInt8] {
var byteArray = [UInt8]()
for codeUnit in str.utf8 {
byteArray.append(codeUnit)
}
return byteArray
}
private func bytesFromNumber<T>(var value: T) -> [UInt8] {
return withUnsafePointer(&value) {
Array(UnsafeBufferPointer(start: UnsafePointer<UInt8>($0), count: sizeof(T)))
}
}

Related

What's the idiomatic replacement for glsl output parameters in wgsl?

In glsl and hlsl, I can define a function like this:
float voronoi(vec2 x, out int2 cell) {
cell = ...
return ...
}
However, it doesn't seem like this is possible in wgsl.
What's the intended replacement for this? I guess I could define a VoronoiResult struct, but it seems overly boilerplate heavy:
struct VoronoiResult {
cell: vec2<i32>;
distance: f32;
};
fn voronoi(x: vec2<f32>) -> VoronoiResult {
// ...
var ret: VoronoiResult;
ret.distance = distance;
ret.cell = cell;
return ret;
}
The equivalent would be to use a pointer argument:
fn voronoi(x: vec2<f32>, cell: ptr<function, vec2<i32>>) -> f32 {
*cell = vec2(1, 2);
return 1.f;
}
#compute #workgroup_size(1)
fn main() {
var a: vec2<i32>;
var f = voronoi(vec2(1.f, 1.f), &a);
}
This produces the HLSL:
float voronoi(float2 x, inout int2 cell) {
cell = int2(1, 2);
return 1.0f;
}
[numthreads(1, 1, 1)]
void main() {
int2 a = int2(0, 0);
float f = voronoi((1.0f).xx, a);
return;
}
You can also make the struct version shorter by using the struct initializer:
struct Out {
cell: vec2<i32>,
val: f32,
}
fn voronoi(x: vec2<f32>) -> Out {
return Out(vec2(1, 2), 1.f);
}
#compute #workgroup_size(1)
fn main() {
var f = voronoi(vec2(1.f, 1.f));
}

Global `comptime var` in Zig

In Zig, I can do this with no problems:
fn foo() void {
comptime var num: comptime_int = 0;
num += 1;
}
But when I try declaring the variable outside of a function, I get a compile error:
comptime var num: comptime_int = 0;
fn foo() void {
num += 1;
}
fn bar() void {
num += 2;
}
error: expected block or field, found 'var'
Zig version: 0.9.0-dev.453+7ef854682
Use the method used in zorrow. It defines the variable in a function (a block works too), then it returns a struct with functions for accessing it.
You can create a struct that defines get and set functions:
const num = block_name: {
comptime var self: comptime_int = 0;
const result = struct {
fn get() comptime_int {
return self;
}
fn increment(amount: comptime_int) void {
self += amount;
}
};
break :block_name result;
};
fn foo() void {
num.increment(1);
}
fn bar() void {
num.increment(2);
}
In the future, you will be able to use a const with a pointer to the mutable value, and the method shown above will no longer be allowed by the compiler: https://github.com/ziglang/zig/issues/7396

Sort a vector of objects by a member of map in this vector

I have a very simple question i guess but...
I have to sort a vector by it's own member, but I can not.
This is my function for filling the vector with objects from another vector.
I have to sort the vector SortDealers by specific product but I don't know how to send the name of the Stock to my overloading operator<
void CShop::sortVector(const CStock& s1)
{
vector<CDealer> SortDealers;
vector<CDealer* >::iterator it = Dealers.begin();
while (it != Dealers.end())
{
if ((*(*it)).ComapareNameProducts(s1))
{
SortDealers.push_back(*(*it));
}
it++;
}
sort(SortDealers.begin(), SortDealers.end());
copy(SortDealers.begin(), SortDealers.end(), ostream_iterator<CDealer>(cout, "\n"));
}
this is overloading operator<:
I have to sort by unsigned member of the map.
bool CDealer::operator<(const CDealer & o1)
{
unsigned res1 = 0;
unsigned res2= 0;
map<const CStock, pair<unsigned, double>>::const_iterator it = Stock.begin();
map<const CStock, pair<unsigned, double>>::const_iterator iter = o1.Stock.begin();
while (it != Stock.end())
{
res1 += it->second.first;
it++;
}
while (iter != o1.Stock.end())
{
res2 += iter->second.first;
iter++;
}
return (res1 < res2);
}
You can use functor:
class less_than
{
const string stockname;
public:
less_than(string s) : stockname(s) {}
inline bool operator() const (const CDealer& a, const CDealer& b)
{
// use stockname here
}
};
sort(SortDealers.begin(), SortDealers.end(), less_than("name"));
Also you can use lambda expression providing stock name in its capture.
Related answer.

Simple Swift function return error

I am converting some algorithm pseudo code to Swift and have the following function:
func max(a: [Int], b: Int) {
var result = a[0]
var i: Int
for (i = 1; i <= b; i++) {
if (a[i] > result) {
result = a[i]
}
}
return result
}
I get an error when returning the result: 'Int' is not convertible to '()'
I've had a search online and can't find an answer to this question and am hoping someone can point me in the right direction.
Thanks
The return type is missing in the function declaration:
func max(inout a: [Int], b: Int) -> Int {
^^^^^^
Without a return type, swift defaults to an empty tuple (), and that's what the error means: int is not convertible to an empty tuple.
Also note that your return statement is misplaced: it should go right before the last closing bracket
}
return result
}
and not
return result
}
}
You must Implement the return type like this
func max(inout a: [Int], b: Int)-> Int {
var result = a[0]
var i: Int
for (i = 1; i <= b; i++) {
if (a[i] > result) {
result = a[i]
}
}
return result
}

Problems calculating CRC for NRPE protocol on .NET Micro Framework (Netduino)

I am trying to write an NRPE interpreter for a Netduino board. This is an Arduino-type board running .NET Micro Framework 4.3. I'm having trouble calculating the CRC that the protocol calls for, which looks like this (original C++ header file snippet):
typedef struct packet_struct {
int16_t packet_version;
int16_t packet_type;
uint32_t crc32_value;
int16_t result_code;
char buffer[1024];
} packet;
There are definitely byte ordering problems because I'm moving from big-endian (Network) to little endian (Netduino/.Net). I have been trying to be careful to reverse and re-reverse the Int16 and Uint32s as they come in and out of my structure. When I re-output a packet I've read in from the wire it is identical, so I believe that much is handled properly. But the CRC I calculate for it is not. The routine I'm calling is Utility.ComputeCRC from the Micro framework
Others have had similar problems in this area, so I'm fortunate enough to have some clues what the problem might be:
The NRPE Protocol Explained
Stack Overflow post about CRC'ing NRPE posts in Python
CRC implementations for Micro
For example, it seems clear the original message is 1034 bytes, padded to 1036. Where I'm not so fortunate is that I'm on the constrained Micro environment, and all the example code for CRC I can find generally involves templates, Linq, or other libraries I don't have access to.
All help appreciated. Here's some sample code where I attempt to re-compute a CRC from an existing valid packet unsuccessfully.
Code Output:
Original 1036 bytes: 0002000174D13FD5426E5F4E5250455F434845434B0000000000000000...
Original CRC: 3FD574D1
1036 bytes with zeroed out checksum: 0002000100000000426E5F4E5250455F434845434B00000000000000....
Re-computed checksum (0xFFFF seed): F5B1C55A
Actual Code:
using System;
using System.Text;
// .NET Micro Framework 4.3
using Microsoft.SPOT;
using Microsoft.SPOT.Hardware;
namespace TestApp
{
public class Program
{
/// <summary>
/// These are the bytes as-received from the wire, hex encoded for readability here.
/// </summary>
private const string OriginalNetworkBytes = "0002000174D13FD5426E5F4E5250455F434845434B00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000";
/// <summary>
/// Index the CRC starts at in the original message
/// </summary>
private const int CrcIndex = 4;
public static void Main()
{
byte[] bufferBytes = StringToByteArrayFastest(OriginalNetworkBytes);
PrintBytesInHex("Original " + bufferBytes.Length + " bytes: ", bufferBytes);
UInt32 originalCrc = ParseSwappedUInt32(bufferBytes, CrcIndex);
Debug.Print("Original CRC: " + originalCrc.ToString("X"));
// Zero out CRC, then attempt to recompute the CRC
ZeroOutChecksum(bufferBytes);
PrintBytesInHex(bufferBytes.Length + " bytes with zeroed out checksum: ", bufferBytes);
uint computedCrc = Utility.ComputeCRC(bufferBytes, 0, bufferBytes.Length, 0xFFFF);
Debug.Print("Re-computed checksum (0xFFFF seed): " + computedCrc.ToString("X"));
}
/// <summary>
/// From this fine Stack Overflow post:
/// https://stackoverflow.com/questions/321370/convert-hex-string-to-byte-array
/// Because as the author points out, "also works on .NET Micro Framework where (in SDK4.3) byte.Parse(string) only
/// permits integer formats."
/// </summary>
/// <param name="hex"></param>
/// <returns></returns>
public static byte[] StringToByteArrayFastest(string hex)
{
if (hex.Length%2 == 1)
throw new Exception("The binary key cannot have an odd number of digits");
var arr = new byte[hex.Length >> 1];
for (int i = 0; i < hex.Length >> 1; ++i)
{
arr[i] = (byte) ((GetHexVal(hex[i << 1]) << 4) + (GetHexVal(hex[(i << 1) + 1])));
}
return arr;
}
public static int GetHexVal(char hex)
{
int val = hex;
//For uppercase A-F letters:
return val - (val < 58 ? 48 : 55);
//For lowercase a-f letters:
//return val - (val < 58 ? 48 : 87);
//Or the two combined, but a bit slower:
//return val - (val < 58 ? 48 : (val < 97 ? 55 : 87));
}
public static UInt32 ParseSwappedUInt32(byte[] byteArray, int arrayIndex)
{
byte[] swappedBytes = ByteSwapper(byteArray, arrayIndex, 4);
return BitConverter.ToUInt32(swappedBytes, 0);
}
public static byte[] ByteSwapper(byte[] array, int incomingArrayIndex, int countOfBytesToSwap)
{
if (countOfBytesToSwap%2 != 0)
{
throw new Exception("Bytes to be swapped must be divisible by 2; you requested " + countOfBytesToSwap);
}
int outgoingArrayIndex = 0;
byte lastByte = 0;
var arrayToReturn = new byte[countOfBytesToSwap];
int finalArrayIndex = incomingArrayIndex + countOfBytesToSwap;
for (int arrayIndex = incomingArrayIndex; arrayIndex < finalArrayIndex; arrayIndex++)
{
bool isEvenIndex = arrayIndex%2 == 0 || arrayIndex == 0;
byte currentByte = array[arrayIndex];
if (isEvenIndex)
{
// Store current byte for next pass through
lastByte = currentByte;
}
else
{
// Swap two bytes, put into outgoing array
arrayToReturn[outgoingArrayIndex] = currentByte;
arrayToReturn[outgoingArrayIndex + 1] = lastByte;
outgoingArrayIndex += 2;
}
}
return arrayToReturn;
}
private static void ZeroOutChecksum(byte[] messageBytesToClear)
{
messageBytesToClear[CrcIndex] = 0;
messageBytesToClear[CrcIndex + 1] = 0;
messageBytesToClear[CrcIndex + 2] = 0;
messageBytesToClear[CrcIndex + 3] = 0;
}
/// <summary>
/// Debug function to output the message as a hex string
/// </summary>
public static void PrintBytesInHex(string messageLabel, byte[] messageBytes)
{
string hexString = BytesToHexString(messageBytes);
Debug.Print(messageLabel + hexString);
}
private static string BytesToHexString(byte[] messageBytes)
{
var sb = new StringBuilder();
foreach (byte b in messageBytes)
{
sb.Append(b.ToString("X2"));
}
string hexString = sb.ToString();
return hexString;
}
}
}
I eventually worked out a solution.
The full thing is documented here:
http://www.skyscratch.com/2014/04/02/rats-ate-the-washing-machine-or-a-nagios-nrpe-environmental-monitor-for-netduino/
The relevant CRC code goes like this:
using System;
namespace FloodSensor
{
/// <summary>
/// Ported from https://github.com/KristianLyng/nrpe/blob/master/src/utils.c
/// I am not sure if this was strictly necessary, but then I could not seem to get Utility.ComputeCRC
/// (http://msdn.microsoft.com/query/dev11.query?appId=Dev11IDEF1&l=EN-US&k=k%28Microsoft.SPOT.Hardware.Utility.ComputeCRC%29;k%28TargetFrameworkMoniker-.NETMicroFramework)
/// to return the same result as this function, no matter what seed I tried with it.
/// </summary>
class NrpeCrc
{
private const int CrcTableLength = 256;
static private readonly UInt32[] Crc32Table = new UInt32[CrcTableLength];
public NrpeCrc()
{
generateCrc32Table();
}
// Build the crc table - must be called before calculating the crc value
private void generateCrc32Table()
{
const uint poly = 0xEDB88320;
for (int i = 0; i < 256; i++)
{
var crc = (UInt32)i;
for (int j = 8; j > 0; j--)
{
if ((crc & (UInt32)1) > 0)
{
crc = (crc >> 1) ^ poly;
}
else
{
crc >>= 1;
}
}
Crc32Table[i] = crc;
}
}
/// <summary>
/// Calculates the CRC 32 value for a buffer
/// </summary>
public UInt32 CalculateCrc32(byte[] buffer, int bufferSize)
{
int currentIndex;
uint crc = 0xFFFFFFFF;
for (currentIndex = 0; currentIndex < bufferSize; currentIndex++)
{
int thisChar = buffer[currentIndex];
crc = ((crc >> 8) & 0x00FFFFFF) ^ Crc32Table[(crc ^ thisChar) & 0xFF];
}
return (crc ^ 0xFFFFFFFF);
}
}
}
See also https://github.com/StewLG/NetduinoNrpe/blob/master/FloodSensor/NrpeServer/NrpeCrc.cs

Resources