Why is this Swift code slower than a Delphi implementation? - performance

Apple touted about the fast performance compared to other languages. I never doubted this until recently when I began to write some code in Swift.
I implemented the xorshift algorithm in Swift, just to find that the Swift version is about 80 times slower than Delphi.
Xorshift is described at http://en.wikipedia.org/wiki/Xorshift
SWIFT version:
// xcrun -sdk macosx10.10 swiftc main.swift
import Foundation
func xor_shift(x0: UInt32, y0: UInt32, z0: UInt32, w0: UInt32) -> () -> UInt32 {
var x = x0
var y = y0
var z = z0
var w = w0
func num() -> UInt32 {
let t = x ^ (x << 11)
x = y
y = z
z = w
w = w ^ (w >> 19) ^ (t ^ (t >> 8))
return w
}
return num
}
let loopcount = Int32.max
let xrand = xor_shift(2014, 12, 29, 2015)
let t0 = NSDate()
for _ in 0..<loopcount {
xrand()
}
let t1 = NSDate()
let ms = Int(t1.timeIntervalSinceDate(t0) * 1000)
println("[SWIFT] Time used: \(ms) millisecons, Loop count: \(loopcount)")
Delphi/Pascal version:
// Command line compile:
// dcc64 xortest.dpr
{$APPTYPE CONSOLE}
program xortest;
uses sysutils;
type
TRandSeed = record
x,y,z,w: UInt32;
end;
function xrand(var seed: TRandSeed): UInt32;
var
t: UInt32;
begin
t := seed.x xor (seed.x shl 11);
seed.x := seed.y; seed.y := seed.z; seed.z := seed.w;
seed.w := seed.w xor (seed.w shr 19) xor (t xor (t shr 8));
result := seed.w
end;
var
r: TRandSeed;
t0, t1: TDateTime;
s: string;
i, loopcount: integer;
begin
// Set the rand seed
r.x := 2014; r.y := 12; r.z := 29; r.w := 2015;
loopcount := high(Int32);
t0 := now;
for i := 1 to loopcount do xrand(r);
t1 := now;
s := Format('[PASCAL] Time used: %d milliseconds, Loopcount = %d', [Trunc((t1-t0)*24*3600*1000), loopcount]);
writeln(s);
end.
Test environments:
OS X: iMac 27" retina 4 GHz Intel Core i7
Windows 7 run inside VMWare fusion on the above-mentioned iMac
The Swift version outputs:
[SWIFT] Time used: 412568 millisecons, Loop count: 2147483647
The Pascal version outputs:
[PASCAL] Time used: 5083 milliseconds, Loopcount = 2147483647
The Pascal version runs 81 times faster than the Swift version, not to mention that the former runs inside a virtual machine.
Is Apple lying about the fast performance of Swift, or is there anything wrong with my code?

edit: oops, sorry, I mistranslated part of the Delphi code, so not quite as good as a constant calculation – but the closure is definitely the problem so you should re-run your comparison against it as it makes a dramatic difference.
Your Swift code is not a direct translation of your Delphi code, so you are not comparing apples to apples. In the Swift version, you are calling a function that returns a closure that captures some variables, then calling that closure. Whereas in the Delphi version, you are just calling a function that takes a struct. Below is a more direct Swift translation of the Delphi code.
Closures can often be a barrier to compiler optimization. Removing this barrier seems to help the code a lot, since if I run my Swift equivalent after compiling with -O, it calculates 2,147,483,647 runs of xrand in 0 milliseconds 5,341 milliseconds, compared to 238,762 milliseconds for the version with the closure on my horrible ancient laptop.
Why? Because without that barrier to optimization, the compiler has way more latitude to rewrite the code to be as fast as possible. Possibly even replaces the entire function with a constant value (it's also possible that it could detect the value is not even being used, and so not run the function at all, which is why I added a store of the result and printout of the result just to be sure).
import Foundation
struct TRandSeed {
var x: UInt32
var y: UInt32
var z: UInt32
var w: UInt32
}
func xrand(inout seed: TRandSeed) -> UInt32{
var t = seed.x ^ (seed.x << 11)
seed.x = seed.y
seed.y = seed.z
seed.z = seed.w
seed.w = seed.w ^ (seed.w >> 19) ^ (t ^ (t >> 8))
return seed.w
}
var r = TRandSeed(x: 2014, y: 12, z: 29, w: 2015)
let loopcount = Int32.max-1
let t0 = NSDate()
for _ in 0..<loopcount {
xrand(&r)
}
let result = xrand(&r)
let t1 = NSDate()
let ms = Int(t1.timeIntervalSinceDate(t0) * 1000)
println("[SWIFT] Time used: \(ms) millisecons to calculate \(result), Loop count: \(loopcount+1)")

I slightly modified the code by Airspeed Velocity:
import Foundation
struct TRandSeed {
var x: UInt32
var y: UInt32
var z: UInt32
var w: UInt32
}
func xrand(inout seed: TRandSeed) -> UInt32{
let t = seed.x ^ (seed.x << 11)
seed.x = seed.y
seed.y = seed.z
seed.z = seed.w
seed.w = seed.w ^ (seed.w >> 19) ^ (t ^ (t >> 8))
return seed.w
}
var r = TRandSeed(x: 2014, y: 12, z: 29, w: 2015)
let loopcount = Int32.max
let t0 = NSDate()
var total: UInt64 = 0
for _ in 0..<loopcount {
let t = xrand(&r)
total = total &+ UInt64(t)
}
let t1 = NSDate()
let ms = Int(t1.timeIntervalSinceDate(t0) * 1000)
println("[SWIFT] Time used: \(ms) milliseconds to calculate, Loop count: \(loopcount), Total = \(total)")
I compiled it with the command line: xcrun -sdk macosx10.10 swiftc -O main.swift
The new code outputs: [SWIFT] Time used: 2838 milliseconds to calculate, Loop count: 2147483647, Total = 4611723097222874280
The performance is now up to par. (Since the Pascal version runs inside a virtual machine, it's not fair to say the Swift binary is twice faster.)

Related

The '+' operation is not applicable to the types function(x: real): real and real. Check the operation of the program for a = 0.1; b = 1.0; h = 0.1;

Check the operation of the program for a = 0.1; b = 1.0; h = 0.1; select the value of parameter n depending on the task.
Why am I getting an error? What is the best way to solve this problem? How to simplify?
var i, n: integer;
x, k, h, sx: real;
function Y(x: real): real;
begin
Y := x * arctan(x) - 0.5 * ln(1.0 + x * x)
end;
function S(x: real): real;
var sum, xx, p, znak, e: real;
begin
S := 0.5 * x * x;
p := x * x;
xx := - x * x;
k := 2;
e := 1e303;
while abs(e) > 1e-14 do
begin
k := k + 2;
p := p * xx;
e := p / (k * (k - 1));
S := S + e
end
end;
begin
h := 0.1;
writeln('x': 2, 'S(x)': 14,
'Y(x)': 18, 'n': 15);
for i := 1 to 10 do
begin
x := i * h;
sx := S(x);
n := round(k / 2);
writeln(x: 3: 1, sx: 18: 14,
Y(x): 18: 14, n: 10)
end
end.
-->The '+' operation is not applicable to the types function(x: real): real and real
I tried to solve the problem based on the fact that x is the range a to b with a step h:
program test;
var y, a, b, h, x, Sx, Yx, n:real;
begin
a:=0.1;
b:=1.0;
h:=0.1;
x:=a;
n:=0;
while x<=b do
begin
Yx:= x*arctan(x)-ln(sqrt(1+exp(x)));
x:=x+h;
writeln(Yx);
writeln('---------------------', n); n:=n+1;
end;
end.
But I do not know how to get S(x)
The error message means that the first argument of + is a function. I'll bet this is the S := S + e line. While you can assign to S to set the return value of S, you can't read it back like that.
You can refer to a function inside that function; this is used with recursion. But then you'll need to actually call yourself. E.g. Fibonacci := Fibonacci(i-1) * i. Now the left side of * is not a function, but the result of a function call.
Solution: just use a temporary variable, and assign that to S at the very end; of S

Better Random Number Ranges In Swift 3

Before I upgraded to Swift 3 (Xcode 8) my random position start looked like this:
func randomStartPosition(range: Range<Int> = 45...(Int(self.frame.size.height)-45)) -> Int {
let min = range.startIndex
let max = range.endIndex
return Int(arc4random_uniform(UInt32(max - min))) + min }
Which stopped working after the conversion because it was updated to this:
func randomSmallObjectsStartPosition(_ range: Range<Int> = 25...(Int(self.frame.size.height)-25)) -> Int {
let min = range.lowerBound
let max = range.upperBound
return Int(arc4random_uniform(UInt32(max - min))) + min }
Which was worked out because I ended up learning about GamePlayKit:
Start by:
import GameplayKit
Then you can easily make new random variables using GamePlayKit:
You can read up on the apple site but basically this gives you more "hits" in the middle:
lazy var randomStartPosition = GKGaussianDistribution(randomSource: GKARC4RandomSource(), lowestValue: 0, highestValue:100)
And this will cycle through the random values until they are all consumed, then start again.
lazy var randomShuffle = GKShuffledDistribution(randomSource: GKARC4RandomSource(), lowestValue: 0, highestValue:100)
And lastly the totally random value generator:
lazy var totallyRandom = GKRandomDistribution(lowestValue: 1, highestValue: 100)
An example of how to use:
MyObject.position = CGPoint(x: self.frame.size.width + 20, y: CGFloat(totallyRandom.nextInt()) )
This will put my object off the screen on the far right and put it at a completely random position on the Y axis.

Function not working in Swift 2. (Unresolved identifier for defined variable)

I'm trying to move learn Swift 2 coming from a background of Python. Started making a really simple function that counts the G's and C's. I'm getting Use of unresolved identifier 'SEQ' on the line var length: Float = Float(SEQ.characters.count)
What am I doing wrong? It's definitely defined in the beginning of the function?
Tried the following posts:
Swift Use of unresolved identifier 'UIApplicationStateInactive'
Swift - Use of unresolved identifier
func GC(input_seq: String) -> Float {
let SEQ = input_seq.uppercaseString
var counter = 0.0
for nt in SEQ.characters {
if (nt == "G") {
var counter = counter + 1
}
if (nt == "C") {
var counter = counter + 1
}
}
}
var length: Float = Float(SEQ.characters.count)
return counter/length
}
let query_seq = "ATGGGGCTTTTGA"
GC(query_seq)
Couple of things you do wrong.
You are creating a Double counter (not a Float as you probably intended): var counter = 0.0
You really need it as an integer since it's a counter. You can convert it to a Float later. var counter = 0 will create an Int variable
you are creating a second and a third local variables in the if blocks:
if (nt == "G") {
var counter = counter + 1
}
I don't think you understand the basics and might be beneficial for you to start reading the Swift book from the beginning.
This is really an improvement - you can use a shorthands:
counter = counter + 1 to counter += 1 or even counter++
Here is a working version of your code:
func GC(input_seq: String) -> Float {
let SEQ = input_seq.uppercaseString
var counter = 0
for nt in SEQ.characters {
if (nt == "G") {
counter++
}
if (nt == "C") {
counter++
}
}
return Float(counter)/Float(SEQ.characters.count)
}
let query_seq = "ATGGGGCTTTTGA"
GC(query_seq)
Hope this helps.

golang - ceil function like php?

I want to return the least integer value greater than or equal to integer division. So I used math.ceil, but can not get the value I want.
package main
import (
"fmt"
"math"
)
func main() {
var pagesize int = 10
var length int = 43
d := float64(length / pagesize)
page := int(math.Ceil(d))
fmt.Println(page)
// output 4 not 5
}
http://golang.org/pkg/math/#Ceil
http://play.golang.org/p/asHta1HkO_
What is wrong?
Thanks.
The line
d := float64(length / pagesize)
transforms to float the result of the division. Since the division itself is integer division, it results in 4, so d = 4.0 and math.Ceil(d) is 4.
Replace the line with
d := float64(length) / float64(pagesize)
and you'll have d=4.3 and int(math.Ceil(d))=5.
Avoiding floating point operations (for performance and clarity):
x, y := length, pagesize
q := (x + y - 1) / y;
for x >= 0 and y > 0.
Or to avoid overflow of x+y:
q := 1 + (x - 1) / y
It's the same as the C++ version: Fast ceiling of an integer division in C / C++
Convert length and pagesize to floats before the division:
d := float64(length) / float64(pagesize)
http://play.golang.org/p/FKWeIj7of5
You can check the remainder to see if it should be raised to the next integer.
page := length / pagesize
if length % pagesize > 0 {
page++
}

combination without repetition of N elements without use for..to..do

i want load in a list the combination of N number without repetition, giving to input the elements and group.
For example, with 4 elements [1,2,3,4], i have for:
Group 1: [1][2][3][4];
Group 2: [1,2][1,3][1,4][2,3][2,4][3,4];
Group 3: [1,2,3][1,2,4][1,3,4][2,3,4]
Group 4: [1,2,3,4]
Now, i have solved it using nested loop for, for example with group 2, i write:
for x1 := 1 to 3 do
for x2 := Succ(x1) to 4 do
begin
// x1, x2 //
end
or for group 3, i wrote:
for x1 := 1 to 2 do
for x2 := Succ(x1) to 3 do
for x3 := Succ(x2) to 4 do
begin
// x1, x2, x3 //
end
and so for other groups.
In general, if i want to do it for group N, as i can to do, without write N procedures with nested loops?
I have thinked to a double while..do loop one to use for counter and one to use for groups count, but so is little hard, i wanted know if there was some solution more simple and fast, too using operator boolean or something so.
Who can give me some suggest about it? Thanks very much.
It seems you are looking for a fast algorithm to calculate all k-combinations. The following Delphi code is a direct translation of the C code found here: Generating Combinations. I even fixed a bug in that code!
program kCombinations;
{$APPTYPE CONSOLE}
// Prints out a combination like {1, 2}
procedure printc(const comb: array of Integer; k: Integer);
var
i: Integer;
begin
Write('{');
for i := 0 to k-1 do
begin
Write(comb[i]+1);
if i<k-1 then
Write(',');
end;
Writeln('}');
end;
(*
Generates the next combination of n elements as k after comb
comb => the previous combination ( use (0, 1, 2, ..., k) for first)
k => the size of the subsets to generate
n => the size of the original set
Returns: True if a valid combination was found, False otherwise
*)
function next_comb(var comb: array of Integer; k, n: Integer): Boolean;
var
i: Integer;
begin
i := k - 1;
inc(comb[i]);
while (i>0) and (comb[i]>=n-k+1+i) do
begin
dec(i);
inc(comb[i]);
end;
if comb[0]>n-k then// Combination (n-k, n-k+1, ..., n) reached
begin
// No more combinations can be generated
Result := False;
exit;
end;
// comb now looks like (..., x, n, n, n, ..., n).
// Turn it into (..., x, x + 1, x + 2, ...)
for i := i+1 to k-1 do
comb[i] := comb[i-1]+1;
Result := True;
end;
procedure Main;
const
n = 4;// The size of the set; for {1, 2, 3, 4} it's 4
k = 2;// The size of the subsets; for {1, 2}, {1, 3}, ... it's 2
var
i: Integer;
comb: array of Integer;
begin
SetLength(comb, k);// comb[i] is the index of the i-th element in the combination
//Setup comb for the initial combination
for i := 0 to k-1 do
comb[i] := i;
// Print the first combination
printc(comb, k);
// Generate and print all the other combinations
while next_comb(comb, k, n) do
printc(comb, k);
end;
begin
Main;
Readln;
end.
Output
{1,2}
{1,3}
{1,4}
{2,3}
{2,4}
{3,4}
Here's a rather fun solution reliant on bitsets. As it stands it's limited to sets of size not greater than 32. I don't think that's a practical limitation since there are a lot of subsets for a set of cardinality greater than 32.
The output is not in the order that you want, but that would be easy enough to remedy if it matters to you.
program VisitAllSubsetsDemo;
{$APPTYPE CONSOLE}
procedure PrintBitset(Bitset: Cardinal; Size: Integer);
var
i: Integer;
Mask: Cardinal;
SepNeeded: Boolean;
begin
SepNeeded := False;
Write('{');
for i := 1 to Size do begin
Mask := 1 shl (i-1);
if Bitset and Mask<>0 then begin
if SepNeeded then begin
Write(',');
end;
Write(i);
SepNeeded := True;
end;
end;
Writeln('}');
end;
procedure EnumerateSubsets(Size: Integer);
var
Bitset: Cardinal;
begin
for Bitset := 0 to (1 shl Size)-1 do begin
PrintBitset(Bitset, Size);
end;
end;
begin
EnumerateSubsets(4);
end.
Output
{}
{1}
{2}
{1,2}
{3}
{1,3}
{2,3}
{1,2,3}
{4}
{1,4}
{2,4}
{1,2,4}
{3,4}
{1,3,4}
{2,3,4}
{1,2,3,4}
And here is a variant that just lists the subsets of a specified cardinality:
function SetBitCount(Bitset: Cardinal; Size: Integer): Integer;
var
i: Integer;
Mask: Cardinal;
begin
Result := 0;
for i := 1 to Size do begin
Mask := 1 shl (i-1);
if Bitset and Mask<>0 then begin
inc(Result);
end;
end;
end;
procedure EnumerateSubsets(Size, NumberOfSetBits: Integer);
var
Bitset: Cardinal;
begin
for Bitset := 0 to (1 shl Size)-1 do begin
if SetBitCount(Bitset, Size)=NumberOfSetBits then begin
PrintBitset(Bitset, Size);
end;
end;
end;
begin
EnumerateSubsets(4, 2);
end.
Output
{1,2}
{1,3}
{2,3}
{1,4}
{2,4}
{3,4}
This seems to be a question that comes up over and over and a few bits of code are
kicking about that address the problem. A very nice algorithm in some code has been
written but it wasn't strictly clean C and not portable across UNIX or Linux or any
POSIX system, therefore I cleaned it up and added warning messages, usage and the
ability to provide a set size and sub_set size on the command line. Also comb[] has
been transitioned to a more general pointer to an array of integers and calloc used
to zero out the memory needed for whatever set size one may want.
The following is ISO IEC 9899:1999 C clean :
/*********************************************************************
* The Open Group Base Specifications Issue 6
* IEEE Std 1003.1, 2004 Edition
*
* An XSI-conforming application should ensure that the feature
* test macro _XOPEN_SOURCE is defined with the value 600 before
* inclusion of any header. This is needed to enable the
* functionality described in The _POSIX_C_SOURCE Feature Test
* Macro and in addition to enable the XSI extension.
*
* Compile with c99 or with gcc and CFLAGS to include options
* -std=iso9899:199409 -pedantic-errors in order to ensure compliance
* with ISO IEC 9899:1999 C spec.
*
* Code cleanup and transition to comb as a pointer to type ( int * )
* array by Dennis Clarke dclarke#blastwave.org 28 Dec 2012
*
*********************************************************************/
#define _XOPEN_SOURCE 600
#include <stdio.h>
#include <stdlib.h>
/* Prints out a combination like {1, 2} */
void printc( int *comb, int k) {
int j;
printf("{ ");
for ( j = 0; j < k; ++j )
printf("%d , ", *( comb + j ) + 1 );
printf( "\b\b}\n" );
} /* printc */
/**********************************************************************
next_comb(int comb[], int k, int n)
Generates the next combination of n elements as k after comb
comb => the previous combination ( use (0, 1, 2, ..., k) for first)
k => the size of the subsets to generate
n => the size of the original set
Returns: 1 if a valid combination was found
0, otherwise
**********************************************************************/
int next_comb( int *comb, int k, int n) {
int i = k - 1;
++*( comb + i );
while ( ( i >= 0 ) && ( *( comb + i ) >= n - k + 1 + i ) ) {
--i;
++*( comb + i );
}
if ( *comb > n - k) /* Combination (n-k, n-k+1, ..., n) reached */
return 0; /* No more combinations can be generated */
/* comb now looks like (..., x, n, n, n, ..., n).
* Turn it into (..., x, x + 1, x + 2, ...) */
for (i = i + 1; i < k; ++i)
*( comb + i ) = *( comb + ( i - 1 ) ) + 1;
return 1;
} /* next_comb */
int main(int argc, char *argv[]) {
int *comb, i, n, k;
n = 9; /* The size of the set; for {1, 2, 3, 4} it's 4 */
k = 6; /* The size of the subsets; for {1, 2}, {1, 3}, .. it's 2 */
if ( argc < 3 ) {
printf ( "\nUSAGE : %s n k\n", argv[0] );
printf ( " : Where n is the set size and k the sub set size.\n" );
printf ( " : Note that k <= n\n" );
return ( EXIT_FAILURE );
}
n = atoi ( argv[1] );
k = atoi ( argv[2] );
if ( k > n ) {
printf ( "\nWARN : k > n is not allowed.\n" );
printf ( "USAGE : %s n k\n", argv[0] );
printf ( " : Where n is the set size and k the sub set size.\n" );
printf ( " : Note that k <= n\n" );
return ( EXIT_FAILURE );
}
comb = ( int * ) calloc( (size_t) k, sizeof(int) );
for ( i = 0; i < k; ++i)
*( comb + i ) = i;
/* Print the first combination */
printc( comb, k );
/* Generate and print all the other combinations */
while ( next_comb( comb, k, n ) )
printc( comb, k );
free ( comb );
return ( EXIT_SUCCESS );
}
One may compile the above on an Opteron based machine thus :
$ echo $CFLAGS
-m64 -g -malign-double -std=iso9899:199409 -pedantic-errors -mno-mmx
-mno-sse -fexceptions -fpic -fvisibility=default -mtune=opteron
-march=opteron -m128bit-long-double -mpc80 -Wl,-q
$ gcc $CFLAGS -o combinations combinations.c
A quick trivial test with a set size of 10 and a sub-set of 6 will be thus :
$ ./combinations 10 6 | wc -l
210
The math is correct :
( 10 ! ) / ( ( 10 - 6 )! * ( 6! ) ) = 210 unique combinations.
Now that the integer array comb is based on a pointer system we are only restricted
by available memory and time. Therefore we have the following :
$ /usr/bin/time -p ./combinations 20 6 | wc -l
real 0.11
user 0.10
sys 0.00
38760
This looks correct :
( 20 ! ) / ( ( 20 - 6 )! * ( 6! ) ) = 38,760 unique combinations
We may now push the limits a bit thus :
$ ./combinations 30 24 | wc -l
593775
Again the math agrees with the result :
( 30 ! ) / ( ( 30 - 24 )! * ( 24! ) ) = 593 775 unique combinations
Feel free to push the limits of your system :
$ /usr/bin/time -p ./combinations 30 22 | wc -l
real 18.62
user 17.76
sys 0.83
5852925
I have yet to try anything larger but the math looks correct as well as the output
thus far. Feel free to let me know if some correction is needed.
Dennis Clarke
dclarke#blastwave.org
28 Dec 2012
Following the link that David posted and clicking around led me to an article where they coin the term "Banker's Search", which seems to fit your pattern.
The article provides an example solution in C++, utilizing recursion:
Efficiently Enumerating the Subsets of a Set
Unless you can't make function calls by some requirement, do this:
select_n_from_list(int *selected, int n, int *list, int list_size):
if (n==0) {
// print all numbers from selected by traversing backward
// you can set the head to a special value or make the head location
// a static variable for lookup
}
for (int i=0; i<=list_size-n; i++) {
*selected = list[i];
select_n_from_list(selected+1, n-1, list+i+1, list_size-i-1);
}
}
You really need some sort of recursion because you need automatic storage for intermediate results. Let me know if there's special requirement that makes this solution don't work.
I created this script here and worked very well:
$(document).ready(function(){
$("#search").on('click', function(){
var value = $("#fieldArray").val().split(",");
var results = new SearchCombinations(value);
var output = "";
for(var $i = 0; $i< results.length;$i++){
results[$i] = results[$i].join(",");
output +="<li>"+results[$i]+"</li>";
}
$("#list").html(output);
});
});
/*Helper Clone*/
var Clone = function (data) {
return JSON.parse(JSON.stringify(data));
}
/*Script of Search All Combinations without repetitions. Ex: [1,2,3]*/
var SearchCombinations = function (statesArray) {
var combinations = new Array(),
newValue = null,
arrayBeforeLevel = new Array(),
$level = 0,
array = new Clone(statesArray),
firstInteration = true,
indexFirstInteration = 0,
sizeValues = array.length,
totalSizeValues = Math.pow(2, array.length) - 1;
array.sort();
combinations = new Clone(array);
arrayBeforeLevel = new Clone(array);
loopLevel: while ($level < arrayBeforeLevel.length) {
for (var $i = 0; $i < array.length; $i++) {
newValue = arrayBeforeLevel[$level] + "," + array[$i];
newValue = newValue.split(",");
newValue.sort();
newValue = newValue.join(",");
if (combinations.indexOf(newValue) == -1 && arrayBeforeLevel[$level].toString().indexOf(array[$i]) == -1) {
if (firstInteration) {
firstInteration = false;
indexFirstInteration = combinations.length
}
sizeValues++;
combinations.push(newValue);
if (sizeValues == totalSizeValues) {
break loopLevel;
}
}
}
$level++;
if ($level == arrayBeforeLevel.length) {
firstInteration = true;
arrayBeforeLevel = new Clone(combinations);
arrayBeforeLevel = arrayBeforeLevel.splice(indexFirstInteration);
indexFirstInteration = 0;
$level = 0;
}
}
for (var $i = 0; $i < combinations.length; $i++) {
combinations[$i] = combinations[$i].toString().split(",");
}
return combinations;
}
*{font-family: Arial;font-size:14px;}
small{font-size:11px}
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<label for="">
<input type="text" id="fieldArray">
<button id="search">Search</button>
<br><small>Info the elements. Ex: "a,b,c"</small>
</label>
<hr>
<ul id="list"></ul>

Resources