Related
For instance, how can I construct an array in ATS containing all of the letters in the upper case from A to Z? In C, this can be done as follows:
char *Letters()
{
int i;
char *cs = (char *)malloc(26);
assert(cs != 0);
for (i = 0; i < 26; i += 1) cs[i] = 'A' + i;
return cs;
}
You could use the tabulate function for creating linear arrays. For instance,
extern
fun
Letters(): arrayptr(char, 26)
implement
Letters() =
arrayptr_tabulate_cloref<char>
(i2sz(26), lam(i) => 'A' + sz2i(i))
If you don't want to use a higher-order function, you can try the following template-based solutioin:
implement
Letters() =
arrayptr_tabulate<char>(i2sz(26)) where
{
implement array_tabulate$fopr<char> (i) = 'A' + sz2i(i)
}
Well, here's one way, although it's extremely complicated, because it follows your outlined approach to the letter: it involves linear proofs for arrays (aka dataviews), memory allocation, and array initialization via a while loop.
extern
fun
Letters (): arrayptr (char, 26)
implement
Letters () = let
val (pf_arr, pf_gc | p_arr) = array_ptr_alloc<char> ((i2sz)26)
var i: int = 0
prval [larr:addr] EQADDR () = eqaddr_make_ptr (p_arr)
var p = p_arr
prvar pf0 = array_v_nil {char} ()
prvar pf1 = pf_arr
//
val () =
while* {i:nat | i <= 26} .<26-i>. (
i: int (i)
, p: ptr (larr + i*sizeof(char))
, pf0: array_v (char, larr, i)
, pf1: array_v (char?, larr+i*sizeof(char), 26-i)
) : (
pf0: array_v (char, larr, 26)
, pf1: array_v (char?, larr+i*sizeof(char), 0)
) => (
i < 26
) {
//
prval (pf_at, pf1_res) = array_v_uncons {char?} (pf1)
prval () = pf1 := pf1_res
//
val c = 'A' + (g0ofg1)i
val () = ptr_set<char> (pf_at | p, c)
val () = p := ptr1_succ<char> (p)
//
prval () = pf0 := array_v_extend {char} (pf0, pf_at)
val () = i := i + 1
//
} // end of [val]
//
prval () = pf_arr := pf0
prval () = array_v_unnil {char?} (pf1)
//
val res = arrayptr_encode (pf_arr, pf_gc | p_arr)
in
res
end // end of [Letters]
You can run the code at Glot.io
Apple touted about the fast performance compared to other languages. I never doubted this until recently when I began to write some code in Swift.
I implemented the xorshift algorithm in Swift, just to find that the Swift version is about 80 times slower than Delphi.
Xorshift is described at http://en.wikipedia.org/wiki/Xorshift
SWIFT version:
// xcrun -sdk macosx10.10 swiftc main.swift
import Foundation
func xor_shift(x0: UInt32, y0: UInt32, z0: UInt32, w0: UInt32) -> () -> UInt32 {
var x = x0
var y = y0
var z = z0
var w = w0
func num() -> UInt32 {
let t = x ^ (x << 11)
x = y
y = z
z = w
w = w ^ (w >> 19) ^ (t ^ (t >> 8))
return w
}
return num
}
let loopcount = Int32.max
let xrand = xor_shift(2014, 12, 29, 2015)
let t0 = NSDate()
for _ in 0..<loopcount {
xrand()
}
let t1 = NSDate()
let ms = Int(t1.timeIntervalSinceDate(t0) * 1000)
println("[SWIFT] Time used: \(ms) millisecons, Loop count: \(loopcount)")
Delphi/Pascal version:
// Command line compile:
// dcc64 xortest.dpr
{$APPTYPE CONSOLE}
program xortest;
uses sysutils;
type
TRandSeed = record
x,y,z,w: UInt32;
end;
function xrand(var seed: TRandSeed): UInt32;
var
t: UInt32;
begin
t := seed.x xor (seed.x shl 11);
seed.x := seed.y; seed.y := seed.z; seed.z := seed.w;
seed.w := seed.w xor (seed.w shr 19) xor (t xor (t shr 8));
result := seed.w
end;
var
r: TRandSeed;
t0, t1: TDateTime;
s: string;
i, loopcount: integer;
begin
// Set the rand seed
r.x := 2014; r.y := 12; r.z := 29; r.w := 2015;
loopcount := high(Int32);
t0 := now;
for i := 1 to loopcount do xrand(r);
t1 := now;
s := Format('[PASCAL] Time used: %d milliseconds, Loopcount = %d', [Trunc((t1-t0)*24*3600*1000), loopcount]);
writeln(s);
end.
Test environments:
OS X: iMac 27" retina 4 GHz Intel Core i7
Windows 7 run inside VMWare fusion on the above-mentioned iMac
The Swift version outputs:
[SWIFT] Time used: 412568 millisecons, Loop count: 2147483647
The Pascal version outputs:
[PASCAL] Time used: 5083 milliseconds, Loopcount = 2147483647
The Pascal version runs 81 times faster than the Swift version, not to mention that the former runs inside a virtual machine.
Is Apple lying about the fast performance of Swift, or is there anything wrong with my code?
edit: oops, sorry, I mistranslated part of the Delphi code, so not quite as good as a constant calculation – but the closure is definitely the problem so you should re-run your comparison against it as it makes a dramatic difference.
Your Swift code is not a direct translation of your Delphi code, so you are not comparing apples to apples. In the Swift version, you are calling a function that returns a closure that captures some variables, then calling that closure. Whereas in the Delphi version, you are just calling a function that takes a struct. Below is a more direct Swift translation of the Delphi code.
Closures can often be a barrier to compiler optimization. Removing this barrier seems to help the code a lot, since if I run my Swift equivalent after compiling with -O, it calculates 2,147,483,647 runs of xrand in 0 milliseconds 5,341 milliseconds, compared to 238,762 milliseconds for the version with the closure on my horrible ancient laptop.
Why? Because without that barrier to optimization, the compiler has way more latitude to rewrite the code to be as fast as possible. Possibly even replaces the entire function with a constant value (it's also possible that it could detect the value is not even being used, and so not run the function at all, which is why I added a store of the result and printout of the result just to be sure).
import Foundation
struct TRandSeed {
var x: UInt32
var y: UInt32
var z: UInt32
var w: UInt32
}
func xrand(inout seed: TRandSeed) -> UInt32{
var t = seed.x ^ (seed.x << 11)
seed.x = seed.y
seed.y = seed.z
seed.z = seed.w
seed.w = seed.w ^ (seed.w >> 19) ^ (t ^ (t >> 8))
return seed.w
}
var r = TRandSeed(x: 2014, y: 12, z: 29, w: 2015)
let loopcount = Int32.max-1
let t0 = NSDate()
for _ in 0..<loopcount {
xrand(&r)
}
let result = xrand(&r)
let t1 = NSDate()
let ms = Int(t1.timeIntervalSinceDate(t0) * 1000)
println("[SWIFT] Time used: \(ms) millisecons to calculate \(result), Loop count: \(loopcount+1)")
I slightly modified the code by Airspeed Velocity:
import Foundation
struct TRandSeed {
var x: UInt32
var y: UInt32
var z: UInt32
var w: UInt32
}
func xrand(inout seed: TRandSeed) -> UInt32{
let t = seed.x ^ (seed.x << 11)
seed.x = seed.y
seed.y = seed.z
seed.z = seed.w
seed.w = seed.w ^ (seed.w >> 19) ^ (t ^ (t >> 8))
return seed.w
}
var r = TRandSeed(x: 2014, y: 12, z: 29, w: 2015)
let loopcount = Int32.max
let t0 = NSDate()
var total: UInt64 = 0
for _ in 0..<loopcount {
let t = xrand(&r)
total = total &+ UInt64(t)
}
let t1 = NSDate()
let ms = Int(t1.timeIntervalSinceDate(t0) * 1000)
println("[SWIFT] Time used: \(ms) milliseconds to calculate, Loop count: \(loopcount), Total = \(total)")
I compiled it with the command line: xcrun -sdk macosx10.10 swiftc -O main.swift
The new code outputs: [SWIFT] Time used: 2838 milliseconds to calculate, Loop count: 2147483647, Total = 4611723097222874280
The performance is now up to par. (Since the Pascal version runs inside a virtual machine, it's not fair to say the Swift binary is twice faster.)
I have written hash calculate function:
var hash = function (string) {
var h = 7;
var i = 0;
var letters = "acdegilmnoprstuw";
while (i < string.length) {
h = (h * 37 + letters.indexOf( string[i++] ));
}
return h;
};
Where string = "agdpeew" and result is 664804774844. But now I don't know how I can decipher hash. So, If my input is 664804774844, answer will agdpeew.
What algorithm can I use for this?
Maybe I can start with the division 664804774844 / 37 but how I can get letter indexes?
For short strings, you can start by expressing the number in base 37 - but why are you trying to do this? Most of the use cases for hash functions don't require you to invert the function, and many hash functions are designed for it to be difficult or impossible to invert the function, except by evaluating on input after input until you find one that produces the hash value you are looking for.
Below is the code written in Swift language, it has both encrypt & decrypt of the hash value
var letters = "acdegilmnoprstuw";
Hashing / Encrypt
func hash(s:String) -> Int{
var h = 7 as Int;
for (var i = 0; i < s.characters.count; i++) {
// Getting the character at index
let s2: Character = s[s.startIndex.advancedBy(i)];
// Getting index of string 'acdegilmnoprstuw'
let l : Int = letters.startIndex.distanceTo(letters.characters.indexOf(s2)!);
h = (h * 37 + l);
}
return h;
}
Unhashing / Decrypt
func unhash(hashValue:Int) -> String{
var h = hashValue
var unhashedString : String = ""
while(h > 37){
unhashedString.append(letters[letters.startIndex.advancedBy(h % 37)])
h = h / 37
}
return String(unhashedString.characters.reverse())
}
I'm currently looking for on how to determine the CRC produced from the machine to PC (and vice-versa).
The devices are communicating using serial communication or RS232 cable.
I do only have data to be able for us to create a program to be used for both devices.
The data given was from my boss and the program was corrupted. So we are trying for it to work out.
I hope everyone can help.
Thanks :)
The sequence to use for the CRC calculation in your protocol is the ASCII string
starting from the first printing character (e.g. the 'R' from REQ)
until and including the '1E' in the calculation.
It's a CRC with the following specs according to our CRC calculator
CRC:16,1021,0000,0000,No,No
which means:
CRC width: 16 bit (of course)
polynomial: 1021 HEX (truncated CRC polynomial)
init value: 0000
final Xor applied: 0000
reflectedInput: No
reflectedOutput: No`
(If 'init value' were FFFF, it would be a "16 bit width CRC as designated by CCITT").
See also the Docklight CRC glossary and the Boost CRC library on what the CRC terms mean plus sample code.
What I did is to write a small script that tries out the popular 16 bit CRCs on varying parts of the first simple "REQ=INI" command, and see if I end up with a sum of 4255. This failed, but instead of going a full brute force with trying all sorts of polynoms, I assumed that it was maybe just an oddball / flawed implementation of the known standards, and indeed succeeded with a variation of the CRC-CCITT.
Heres is some slow & easy C code (not table based!) to calculate all sorts of CRCs:
// Generic, not table-based CRC calculation
// Based on and credits to the following:
// CRC tester v1.3 written on 4th of February 2003 by Sven Reifegerste (zorc/reflex)
unsigned long reflect (unsigned long crc, int bitnum) {
// reflects the lower 'bitnum' bits of 'crc'
unsigned long i, j=1, crcout=0;
for (i=(unsigned long)1<<(bitnum-1); i; i>>=1) {
if (crc & i) crcout|=j;
j<<= 1;
}
return (crcout);
}
calcCRC(
const int width, const unsigned long polynominal, const unsigned long initialRemainder,
const unsigned long finalXOR, const int reflectedInput, const int reflectedOutput,
const unsigned char message[], const long startIndex, const long endIndex)
{
// Ensure the width is in range: 1-32 bits
assert(width >= 1 && width <= 32);
// some constant parameters used
const bool b_refInput = (reflectedInput > 0);
const bool b_refOutput = (reflectedOutput > 0);
const unsigned long crcmask = ((((unsigned long)1<<(width-1))-1)<<1)|1;
const unsigned long crchighbit = (unsigned long)1<<(width-1);
unsigned long j, c, bit;
unsigned long crc = initialRemainder;
for (long msgIndex = startIndex; msgIndex <= endIndex; ++msgIndex) {
c = (unsigned long)message[msgIndex];
if (b_refInput) c = reflect(c, 8);
for (j=0x80; j; j>>=1) {
bit = crc & crchighbit;
crc<<= 1;
if (c & j) bit^= crchighbit;
if (bit) crc^= polynominal;
}
}
if (b_refOutput) crc=reflect(crc, width);
crc^= finalXOR;
crc&= crcmask;
return(crc);
}
With this code and the CRCs specs listed above, I have been able to re-calculate the following three sample CRCs:
10.03.2014 22:20:57.109 [TX] - REQ=INI<CR><LF>
<RS>CRC=4255<CR><LF>
<GS>
10.03.2014 22:20:57.731 [TX] - ANS=INI<CR><LF>
STATUS=0<CR><LF>
<RS>CRC=57654<CR><LF>
<GS>
10.03.2014 22:20:59.323 [TX] - ANS=INI<CR><LF>
STATUS=0<CR><LF>
MID="CTL1"<CR><LF>
DEF="DTLREQ";1025<CR><LF>
INFO=0<CR><LF>
<RS>CRC=1683<CR><LF>
<GS>
I failed on the very complex one with the DEF= parts - probably didn't understand the character sequence correctly.
The Docklight script I used to reverse engineer this:
Sub crcReverseEngineer()
Dim crctypes(7)
crctypes(0) = "CRC:16,1021,FFFF,0000" ' CCITT
crctypes(1) = "CRC:16,8005,0000,0000" ' CRC-16
crctypes(2) = "CRC:16,8005,FFFF,0000" ' CRC-MODBUS
' lets try also some nonstandard variations with different init and final Xor, but stick
' to the known two polynoms.
crctypes(3) = "CRC:16,1021,FFFF,FFFF"
crctypes(4) = "CRC:16,1021,0000,FFFF"
crctypes(5) = "CRC:16,1021,0000,0000"
crctypes(6) = "CRC:16,8005,FFFF,FFFF"
crctypes(7) = "CRC:16,8005,FFFF,0000"
crcString = "06 1C 52 45 51 3D 49 4E 49 0D 0A 1E 43 52 43 3D 30 30 30 30 0D 0A 1D"
For reflectedInOrOut = 0 To 3
For cType = 0 To 7
crcSpec = crctypes(cType) & "," & IIf(reflectedInOrOut Mod 2 = 1, "Yes", "No") & "," & IIf(reflectedInOrOut > 1, "Yes", "No")
For cStart = 1 To 3
For cEnd = 9 To (Len(crcString) + 1) / 3
subDataString = Mid(crcString, (cStart - 1) * 3 + 1, (cEnd - cStart + 1) * 3)
result = DL.CalcChecksum(crcSpec, subDataString, "H")
resultInt = CLng("&h" + Left(result, 2)) * 256 + CLng("&h" + Right(result, 2))
If resultInt = 4255 Then
DL.AddComment "Found it!"
DL.AddComment "sequence: " & subDataString
DL.AddComment "CRC spec: " & crcSpec
DL.AddComment "CRC result: " & result & " (Integer = " & resultInt & ")"
Exit Sub
End If
Next
Next
Next
Next
End Sub
Public Function IIf(blnExpression, vTrueResult, vFalseResult)
If blnExpression Then
IIf = vTrueResult
Else
IIf = vFalseResult
End If
End Function
Hope this helps and I'm happy to provide extra information or clarify details.
When drawing graphs using SI codes is pretty much what we want. Our y-axis values tend to be large currency values. eg: $10,411,504,201.20
Abbreviating this, at least in a US locale, this should translate to $10.4B.
But using d3.format's 's' type for SI codes this would display as $10.4G. This might be great for some locales and good when dealing with computer-based values (eg: processor speed, memory...), but not so with currency or other non-computer types of values.
Is there a way to get locale-specific functionality similar to SI-codes that would convert billions to B instead of G, etc...?
(I realize this is mostly an SI-codes thing and not specific to D3, but since I'm using D3 this seems the most appropriate tag.)
I prefer overriding d3.formatPrefix. Then you can just forget about replacing strings within your viz code. Simply execute the following code immediately after loading D3.js.
// Change D3's SI prefix to more business friendly units
// K = thousands
// M = millions
// B = billions
// T = trillion
// P = quadrillion
// E = quintillion
// small decimals are handled with e-n formatting.
var d3_formatPrefixes = ["e-24","e-21","e-18","e-15","e-12","e-9","e-6","e-3","","K","M","B","T","P","E","Z","Y"].map(d3_formatPrefix);
// Override d3's formatPrefix function
d3.formatPrefix = function(value, precision) {
var i = 0;
if (value) {
if (value < 0) {
value *= -1;
}
if (precision) {
value = d3.round(value, d3_format_precision(value, precision));
}
i = 1 + Math.floor(1e-12 + Math.log(value) / Math.LN10);
i = Math.max(-24, Math.min(24, Math.floor((i - 1) / 3) * 3));
}
return d3_formatPrefixes[8 + i / 3];
};
function d3_formatPrefix(d, i) {
var k = Math.pow(10, Math.abs(8 - i) * 3);
return {
scale: i > 8 ? function(d) { return d / k; } : function(d) { return d * k; },
symbol: d
};
}
function d3_format_precision(x, p) {
return p - (x ? Math.ceil(Math.log(x) / Math.LN10) : 1);
}
After running this code, try formatting a number with SI prefix:
d3.format(".3s")(1234567890) // 1.23B
You could augment this code pretty simply to support different locales by including locale-specific d3_formatPrefixes values in an object and then select the proper one that matches a locale you need.
I like the answer by #nross83
Just going to paste a variation that I think might be more robust.
Example:
import { formatLocale, formatSpecifier } from "d3";
const baseLocale = {
decimal: ".",
thousands: ",",
grouping: [3],
currency: ["$", ""],
};
// You can define your own si prefix abbr. here
const d3SiPrefixMap = {
y: "e-24",
z: "e-21",
a: "e-18",
f: "e-15",
p: "e-12",
n: "e-9",
µ: "e-6",
m: "e-3",
"": "",
k: "K",
M: "M",
G: "B",
T: "T",
P: "P",
E: "E",
Z: "Z",
Y: "Y",
};
const d3Format = (specifier: string) => {
const locale = formatLocale({ ...baseLocale });
const formattedSpecifier = formatSpecifier(specifier);
const valueFormatter = locale.format(specifier);
return (value: number) => {
const result = valueFormatter(value);
if (formattedSpecifier.type === "s") {
// modify the return value when using si-prefix.
const lastChar = result[result.length - 1];
if (Object.keys(d3SiPrefixMap).includes(lastChar)) {
return result.slice(0, -1) + d3SiPrefixMap[lastChar];
}
}
// return the default result from d3 format in case the format type is not set to `s` (si suffix)
return result;
};
}
And use it like the following:
const value = 1000000000;
const formattedValue = d3Format("~s")(value);
console.log({formattedValue}); // Outputs: {formattedValue: "1B"}
We used the formatSpecifier function from d3-format to check if the format type is s, i.e. si suffix, and only modify the return value in this case.
In the example above, I have not modified the actual d3 function. You can change the code accordingly if you want to do that for the viz stuff.
I hope this answer is helpful. Thank you :)