Related
how to convert Flux<List> into Flux<int[][]>.
I have a Flux<List> -> {1,2,3,.....100} I want to group them by 30 numbers -> [[1,2,3,.....30], [31,32....60],[61.....100]]
I have tried the below approach but was not successful. elements are getting grouped in batches of 5 [ [1,2,3,4,5], [6,7,8,9,10],.....]
Flux<int[][]> groupedData = fluxData.map(x -> {
int outerArraySize = (int) Math.ceil(x.size() / 30) +1;
System.out.println(outerArraySize);
int[][] boxedData = new int[30][outerArraySize];
AtomicInteger innerArray = new AtomicInteger(0);
AtomicInteger outerArray = new AtomicInteger(0);
x.forEach(ids -> {
boxedData[innerArray.get()][outerArray.get()] = ids;
innerArray.getAndIncrement();
if (innerArray.get() == 30) {
innerArray.set(0);
outerArray.getAndIncrement();
}
});
Flux has a useful operator called 'buffer', we can use it to batch the List of Integers.
I have created my 'fluxData' like this, just so I can test my code:
List<Integer> list1 = IntStream.rangeClosed(1, 100).boxed().collect(Collectors.toList());
List<Integer> list2 = IntStream.rangeClosed(1, 40).boxed().collect(Collectors.toList());
List<Integer> list3 = IntStream.rangeClosed(1, 70).boxed().collect(Collectors.toList());
Flux<List<Integer>> fluxData = Flux.just(list1, list2, list3);
Now, we can do the following:
fluxData.map(integersList -> {
List<List<Integer>> batchesList = Flux.fromStream(integersList.stream())
.buffer(30) // This the magic.
.collectList()
.block();
// List<List<Integer>> --> int[][]
int[][] batchesArray = new int[batchesList.size()][];
for(int i = 0;i < batchesArray.length;i++){
batchesArray[i] = new int[batchesList.get(i).size()];
for (int j = 0; j < batchesArray[i].length; j++) {
batchesArray[i][j] = batchesList.get(i).get(j);
}
}
return batchesArray;
})
.subscribe(batchesArray -> System.out.println(Arrays.deepToString(batchesArray)));
Output:
[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], [31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60], [61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90], [91, 92, 93, 94, 95, 96, 97, 98, 99, 100]]
[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], [31, 32, 33, 34, 35, 36, 37, 38, 39, 40]]
[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], [31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60], [61, 62, 63, 64, 65, 66, 67, 68, 69, 70]]
Running a simple Bayesian regression model, I am not able to replicate the results with multiple runs on GPU. I am wondering how I can set tfp.mcmc.sample_chain to generate reproducible results on GPU? Seeding the sample_chain didn't work for me.
The test code snippet:
import os
import random
from pprint import pprint
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_probability as tfp
sns.reset_defaults()
#sns.set_style('whitegrid')
#sns.set_context('talk')
sns.set_context(context='talk',font_scale=0.7)
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
tfd = tfp.distributions
tfb = tfp.bijectors
dtype = tf.float64
dfhogg = pd.DataFrame(np.array([[1, 201, 592, 61, 9, -0.84],
[2, 244, 401, 25, 4, 0.31],
[3, 47, 583, 38, 11, 0.64],
[4, 287, 402, 15, 7, -0.27],
[5, 203, 495, 21, 5, -0.33],
[6, 58, 173, 15, 9, 0.67],
[7, 210, 479, 27, 4, -0.02],
[8, 202, 504, 14, 4, -0.05],
[9, 198, 510, 30, 11, -0.84],
[10, 158, 416, 16, 7, -0.69],
[11, 165, 393, 14, 5, 0.30],
[12, 201, 442, 25, 5, -0.46],
[13, 157, 317, 52, 5, -0.03],
[14, 131, 311, 16, 6, 0.50],
[15, 166, 400, 34, 6, 0.73],
[16, 160, 337, 31, 5, -0.52],
[17, 186, 423, 42, 9, 0.90],
[18, 125, 334, 26, 8, 0.40],
[19, 218, 533, 16, 6, -0.78],
[20, 146, 344, 22, 5, -0.56]]),
columns=['id','x','y','sigma_y','sigma_x','rho_xy'])
## for convenience zero-base the 'id' and use as index
dfhogg['id'] = dfhogg['id'] - 1
dfhogg.set_index('id', inplace=True)
## standardize (mean center and divide by 1 sd)
dfhoggs = (dfhogg[['x','y']] - dfhogg[['x','y']].mean(0)) / dfhogg[['x','y']].std(0)
dfhoggs['sigma_y'] = dfhogg['sigma_y'] / dfhogg['y'].std(0)
dfhoggs['sigma_x'] = dfhogg['sigma_x'] / dfhogg['x'].std(0)
X_np = dfhoggs['x'].values
sigma_y_np = dfhoggs['sigma_y'].values
Y_np = dfhoggs['y'].values
def sample(seed):
mdl_ols_batch = tfd.JointDistributionSequential([
# b0
tfd.Normal(loc=tf.cast(0, dtype), scale=1.),
# b1
tfd.Normal(loc=tf.cast(0, dtype), scale=1.),
# likelihood
# Using Independent to ensure the log_prob is not incorrectly broadcasted
lambda b1, b0: tfd.Independent(
tfd.Normal(
# Parameter transformation
loc=b0[..., tf.newaxis] + b1[..., tf.newaxis]*X_np[tf.newaxis, ...],
scale=sigma_y_np[tf.newaxis, ...]),
reinterpreted_batch_ndims=1
),
])
#tf.function(autograph=False, experimental_compile=True)
def run_chain(init_state,
step_size,
target_log_prob_fn,
unconstraining_bijectors,
num_steps=500,
burnin=50):
def trace_fn(_, pkr):
return (
pkr.inner_results.inner_results.target_log_prob,
pkr.inner_results.inner_results.leapfrogs_taken,
pkr.inner_results.inner_results.has_divergence,
pkr.inner_results.inner_results.energy,
pkr.inner_results.inner_results.log_accept_ratio
)
kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.NoUTurnSampler(
target_log_prob_fn,
step_size=step_size),
bijector=unconstraining_bijectors)
hmc = tfp.mcmc.DualAveragingStepSizeAdaptation(
inner_kernel=kernel,
num_adaptation_steps=burnin,
step_size_setter_fn=lambda pkr, new_step_size: pkr._replace(
inner_results=pkr.inner_results._replace(step_size=new_step_size)),
step_size_getter_fn=lambda pkr: pkr.inner_results.step_size,
log_accept_prob_getter_fn=lambda pkr: pkr.inner_results.log_accept_ratio
)
# Sampling from the chain.
chain_state, sampler_stat = tfp.mcmc.sample_chain(
num_results=num_steps,
num_burnin_steps=burnin,
current_state=init_state,
kernel=hmc,
trace_fn=trace_fn,
seed=seed
)
return chain_state, sampler_stat
nchain = 4
b0, b1, _ = mdl_ols_batch.sample(nchain)
init_state = [b0, b1]
step_size = [tf.cast(i, dtype=dtype) for i in [.1, .1]]
target_log_prob_fn = lambda *x: mdl_ols_batch.log_prob(x + (Y_np, ))
# bijector to map contrained parameters to real
unconstraining_bijectors = [
tfb.Identity(),
tfb.Identity(),
]
samples, sampler_stat = run_chain(
init_state, step_size, target_log_prob_fn, unconstraining_bijectors)
print(tf.reduce_sum(samples))
seed = 24
os.environ['TF_DETERMINISTIC_OPS'] = 'true'
os.environ['PYTHONHASHSEED'] = f'{seed}'
np.random.seed(seed)
random.seed(seed)
tf.random.set_seed(seed)
sample(seed)
os.environ['TF_DETERMINISTIC_OPS'] = 'true'
os.environ['PYTHONHASHSEED'] = f'{seed}'
np.random.seed(seed)
random.seed(seed)
tf.random.set_seed(seed)
sample(seed)
I've tried to use sparse QR and LU to solve complex cases in my FEA program and it seems that QR and BiCGSTAB method cannot get correct result.
Meanwhile BiCGSTAB with IncompleteLUT is okay.
Eigen version 3.3.1 with mingw-x86_64 gcc 6.2
smallest code
#include <vector>
#include <complex>
#include <iostream>
#include <Eigen/Eigen>
using namespace std::complex_literals;
struct mat_cell
{
int row;
int col;
double value;
};
// matrix data.
mat_cell mat01[]={
{ 0, 0, 40432.2974517006}, { 0, 6, -20216.1487258503}, { 0, 12, -20216.1487258503},
{ 1, 1, 180.518062136147}, { 1, 7, -90.2590310680736}, { 1, 11, -9025.90310680736},
{ 1, 13, -90.2590310680736}, { 1, 17, 9025.90310680736},
{ 2, 2, 180.518062136147}, { 2, 8, -90.2590310680736}, { 2, 10, 9025.90310680736},
{ 2, 14, -90.2590310680736}, { 2, 16, -9025.90310680736},
{ 3, 3, 456735.213970955}, { 3, 9, -228367.606985477}, { 3, 15, -228367.606985477},
{ 4, 4, 2421773.15749991}, { 4, 8, -9025.90310680736}, { 4, 10, 594294.042611519},
{ 4, 14, 9025.90310680736}, { 4, 16, 594294.042611519},
{ 5, 5, 2421773.15749991}, { 5, 7, 9025.90310680736}, { 5, 11, 594294.042611519},
{ 5, 13, -9025.90310680736}, { 5, 17, 594294.042611519},
{ 6, 0, -20216.1487258503}, { 6, 6, 40432.2974517006}, { 6, 24, -20216.1487258503},
{ 7, 1, -90.2590310680736}, { 7, 5, 9025.90310680736}, { 7, 7, 180.518062136147},
{ 7, 25, -90.2590310680736}, { 7, 29, -9025.90310680736},
{ 8, 2, -90.2590310680736}, { 8, 4, -9025.90310680736}, { 8, 8, 180.518062136147},
{ 8, 26, -90.2590310680736}, { 8, 28, 9025.90310680736},
{ 9, 3, -228367.606985477}, { 9, 9, 456735.213970955}, { 9, 27, -228367.606985477},
{10, 2, 9025.90310680736}, {10, 4, 594294.042611519}, {10, 10, 2421773.15749991},
{10, 26, -9025.90310680736}, {10, 28, 594294.042611519},
{11, 1, -9025.90310680736}, {11, 5, 594294.042611519}, {11, 11, 2421773.15749991},
{11, 25, 9025.90310680736}, {11, 29, 594294.042611519},
{12, 0, -20216.1487258503}, {12, 12, 20216.1487258503},
{13, 1, -90.2590310680736}, {13, 5, -9025.90310680736}, {13, 13, 90.2590310680736},
{13, 17, -9025.90310680736},
{14, 2, -90.2590310680736}, {14, 4, 9025.90310680736}, {14, 14, 90.2590310680736},
{14, 16, 9025.90310680736},
{15, 3, -228367.606985477}, {15, 15, 228367.606985477},
{16, 2, -9025.90310680736}, {16, 4, 594294.042611519}, {16, 14, 9025.90310680736},
{16, 16, 1210886.57874995},
{17, 1, 9025.90310680736}, {17, 5, 594294.042611519}, {17, 13, -9025.90310680736},
{17, 17, 1210886.57874995},
{18, 18, 40432.2974517006}, {18, 24, -20216.1487258503},
{19, 19, 180.518062136147}, {19, 25, -90.2590310680736}, {19, 29, 9025.90310680736},
{20, 20, 180.518062136147}, {20, 26, -90.2590310680736}, {20, 28, -9025.90310680736},
{21, 21, 456735.213970955}, {21, 27, -228367.606985477},
{22, 22, 2421773.15749991}, {22, 26, 9025.90310680736}, {22, 28, 594294.042611519},
{23, 23, 2421773.15749991}, {23, 25, -9025.90310680736}, {23, 29, 594294.042611519},
{24, 6, -20216.1487258503}, {24, 18, -20216.1487258503}, {24, 24, 40432.2974517006},
{25, 7, -90.2590310680736}, {25, 11, 9025.90310680736}, {25, 19, -90.2590310680736},
{25, 23, -9025.90310680736}, {25, 25, 180.518062136147},
{26, 8, -90.2590310680736}, {26, 10, -9025.90310680736}, {26, 20, -90.2590310680736},
{26, 22, 9025.90310680736}, {26, 26, 180.518062136147},
{27, 9, -228367.606985477}, {27, 21, -228367.606985477}, {27, 27, 456735.213970955},
{28, 8, 9025.90310680736}, {28, 10, 594294.042611519}, {28, 20, -9025.90310680736},
{28, 22, 594294.042611519}, {28, 28, 2421773.15749991},
{29, 7, -9025.90310680736}, {29, 11, 594294.042611519}, {29, 19, 9025.90310680736},
{29, 23, 594294.042611519}, {29, 29, 2421773.15749991}};
int main(int argc, char *argv[])
{
int nn{30};
Eigen::MatrixXcd A_dens = Eigen::MatrixXcd::Zero(nn, nn);
Eigen::VectorXcd rhs = Eigen::VectorXcd::Zero(nn);
Eigen::SparseMatrix<std::complex<double>> A_sp(nn, nn);
std::vector<Eigen::Triplet<std::complex<double>>> triList;
double yita{0.02};// small imag.
for(auto const cell: mat01){
A_dens(cell.row, cell.col) = cell.value*(1.+yita*1.0i);
triList.push_back({cell.row, cell.col, cell.value*(1.+yita*1.0i)});
}
A_sp.setFromTriplets(triList.begin(), triList.end());
triList.clear();
A_sp.makeCompressed();
int ix[]={12, 13, 14};
double scale{1.e60};// Large than 1e38.
for(auto const j: ix){
A_dens(j, j) *= scale;
A_sp.coeffRef(j, j) *= scale;
}
rhs(ix[1]) = 0.618*A_sp.coeff(ix[1], ix[1]);
// solve by dense LU method.
Eigen::VectorXcd x_lu = A_dens.lu().solve(rhs);
// define sparse solver.
Eigen::SparseLU<Eigen::SparseMatrix<std::complex<double>>, Eigen::COLAMDOrdering<int>> solver_lu;
Eigen::SparseQR<Eigen::SparseMatrix<std::complex<double>>, Eigen::COLAMDOrdering<int>> solver_qr;
Eigen::BiCGSTAB<Eigen::SparseMatrix<std::complex<double>>> solver_bi;
Eigen::BiCGSTAB<Eigen::SparseMatrix<std::complex<double>>, Eigen::IncompleteLUT<std::complex<double>, int>> solver_bi_2;
solver_lu.compute(A_sp);
if(solver_lu.info()!=Eigen::ComputationInfo::Success)std::cout << "SparseLU decomposition failed!\n";
Eigen::VectorXcd x_sp_lu = solver_lu.solve(rhs);
if(solver_lu.info()!=Eigen::ComputationInfo::Success)std::cout << "SparseLU solve failed!\n";
solver_qr.compute(A_sp);
if(solver_qr.info()!=Eigen::ComputationInfo::Success)std::cout << "SparseQR decomposition failed!\n";
Eigen::VectorXcd x_sp_qr = solver_qr.solve(rhs);
if(solver_qr.info()!=Eigen::ComputationInfo::Success)std::cout << "SparseQR solve failed!\n";
solver_bi.compute(A_sp);
if(solver_bi.info()!=Eigen::ComputationInfo::Success)std::cout << "SparseBi decomposition failed!\n";
Eigen::VectorXcd x_sp_bi = solver_bi.solve(rhs);
if(solver_bi.info()!=Eigen::ComputationInfo::Success)std::cout << "SparseBi solve failed!\n";
solver_bi_2.compute(A_sp);
if(solver_bi_2.info()!=Eigen::ComputationInfo::Success)std::cout << "SparseBi2 decomposition failed!\n";
Eigen::VectorXcd x_sp_bi_2 = solver_bi_2.solve(rhs);
if(solver_bi_2.info()!=Eigen::ComputationInfo::Success)std::cout << "SparseBi2 solve failed!\n";
std::cout << "No | Dense LU | SparseLU | SparseQR | BiCGSTAB |BiCGSTAB+ILUT|\n";
std::cout << "---|---|---|---|---|---|\n";
for(int i=0; i<nn; i++){
std::cout << i << "|";
std::cout << x_lu(i) << "|";
std::cout << x_sp_lu(i) << "|";
std::cout << x_sp_qr(i) << "|";
std::cout << x_sp_bi(i) << "|";
std::cout << x_sp_bi_2(i) << "|\n";
}
}
Method X(1) X(5)
DenseLU (0.435087,-1.73121e-017) (0.0008897,7.91857e-020)
SparseLU (0.435087,3.61979e-017) (0.0008897,-1.2936e-019)
SparseQR (0,0) (0,0)
BiCGSTAB (0.187474,-8.66607e-019) (0.00139743,-2.34841e-021)
BiCGSTAB+ILUT (0.435068,1.58791e-017) (0.000889823,-1.00545e-019)
More detailed result compare picture
Your matrix is highly singular. For double precision numbers, the rank of your matrix is 3 only, because of your weird scaling of 3 columns. There is thus an infinite space of solution to your problem. If you look at the relative error: (A*x-b).norm()/b.norm(), you get:
No | Dense LU | SparseLU | SparseQR | BiCGSTAB |BiCGSTAB+ILUT
--- | ---------- | ---------- | ---------- | ---------- |-------------
res | 3.6633e-74 | 1.4915e-74 | 3.1977e-18 | 1.9095e-59 | 2.67692e-63
meaning that all results are "correct" with respect to the precision of double precision floating point numbers.
I want to add an array to a two dimensional array like this:
arrays = [[8300, 6732, 4101, 3137, 3097], [1088, 647, 410, 138, 52], [623, 362, 191, 25, 0]]
new_array = [10, 100, 1000]
arrays.map.with_index{|v,i| v << new_array[i]}
# => [[8300, 6732, 4101, 3137, 3097, 10], [1088, 647, 410, 138, 52, 100], [623, 362, 191, 25, 0, 1000]]
It works well, but I want to know if there is more simpler way to accomplish this behavior.
I appreciate any suggestion.
arrays.zip(new_array).map(&:flatten)
# => [[8300, 6732, 4101, 3137, 3097, 10], [1088, 647, 410, 138, 52, 100], [623, 362, 191, 25, 0, 1000]]
You can use zip:
arrays.zip(new_array).each { |arr, item| arr << item }
arrays
# => [[8300, 6732, 4101, 3137, 3097, 10], [1088, 647, 410, 138, 52, 100], [623, 362, 191, 25, 0, 1000]]
Just a little extension to Santosh answer. If there are nested arrays and you want to the result to be as nested as in original arrays like
arrays = [[8300, [6732], 4101, [3137], 3097], [1088, [647], 410, 138, 52], [623, [362], 191, 25, 0]]
new_array = [10, [100], 1000]
required_answer = [[8300, [6732], 4101, [3137], 3097, 10], [1088, [647], 410, 138, 52, 100], [623, [362], 191, 25, 0, 1000]]
then you can use
arrays.zip(new_array).map{|x| x.flatten(1)}
this will flatten the array to one level.
Hi I'm trying to write some simple code to use random mutation hill climbing for the travelling salesman problem. I have created a Tour class as such:-
import java.util.ArrayList;
import java.util.Collections;
public class Tour
{
private ArrayList<Integer> tour;
// Specified tour
public Tour(ArrayList<Integer> tour) { this.tour = tour; }
// Random tour
public Tour(int size)
{
// Initalize tour with size
tour = new ArrayList<Integer>(size);
// Add integers up to size into ArrayList
for (int i=0; i<size; ++i) {tour.add(i);}
// Shuffle ArrayList
Collections.shuffle(tour);
}
ArrayList<Integer> getTour() {return tour;}
void printTour()
{
for (int i=0; i<tour.size(); ++i)
{
System.out.print(tour.get(i) + ", ");
}
}
// Get the distance between all tour stops using a set of distances
double getFitness (double[][] distances)
{
double s = 0;
for (int i=0; i<tour.size()-1; ++i)
{
int a = tour.get(i);
int b = tour.get(i+1);
s += distances[a][b];
}
int start_city = tour.get(0);
int end_city = tour.get(tour.size()-1);
s += distances[end_city][start_city];
return s;
}
// Makes a small change to the tour
void smallChange()
{
// Change random index values to swap
int indexfirst = CS2004.UI(0, tour.size()-1);
int indexsecond = CS2004.UI(0, tour.size()-1);
// Checks to make sure index values are not the same
while (indexsecond == indexfirst)
{
indexsecond = CS2004.UI(0, tour.size()-1);
}
// Store city value in temp variable
int indexTemp = tour.get(indexfirst);
// Swap values
tour.set(indexfirst, tour.get(indexsecond));
tour.set(indexsecond, indexTemp);
}
}
My RMHC method looks like this:-
public static Tour RMHC(double[][] distances, int iter)
{
Tour sol = new Tour(distances.length);
double oldFitness;
double fitness = 0;
Tour oldSol=null;
for (int i=0;i<iter;i++)
{
oldSol = null;
// Make old solution equal to solution before change
oldSol = new Tour(sol.getTour());
System.out.println(oldSol.getTour());
// Calculate old fitness for comparison
oldFitness = sol.getFitness(distances);
// Change solution slightly
sol.smallChange();
// Calculate new fitness
fitness = sol.getFitness(distances);
/* Compare new fitness to old fitness
* set solution back to old solution and fitness to old fitness if
* new solution is not better */
System.out.println(oldFitness + " " + fitness);
if (fitness > oldFitness) {System.out.println(oldSol.getTour()); System.out.println(sol.getTour()); sol = null; sol = new Tour(oldSol.getTour()); fitness = oldFitness;}
// Print iteration number and new fitness
System.out.println("Iteration " + (i+1) + ", fitness: " + sol.getFitness(distances));
}
return(sol);
}
The problem I'm having is that when I call my smallChange method in the RMHC it seems to change the Tour for both the old solution and the new solution. I ran this for a few iterations on a 48 size dataset and got the following output:-
[11, 6, 13, 37, 23, 45, 34, 25, 16, 39, 5, 35, 31, 9, 27, 0, 10, 42, 30, 28, 4, 12, 33, 36, 2, 21, 17, 29, 18, 20, 32, 3, 15, 47, 26, 19, 46, 8, 22, 44, 7, 24, 43, 14, 41, 1, 38, 40]
155843.9387676824 159088.1701641078
[31, 6, 13, 37, 23, 45, 34, 25, 16, 39, 5, 35, 11, 9, 27, 0, 10, 42, 30, 28, 4, 12, 33, 36, 2, 21, 17, 29, 18, 20, 32, 3, 15, 47, 26, 19, 46, 8, 22, 44, 7, 24, 43, 14, 41, 1, 38, 40]
[31, 6, 13, 37, 23, 45, 34, 25, 16, 39, 5, 35, 11, 9, 27, 0, 10, 42, 30, 28, 4, 12, 33, 36, 2, 21, 17, 29, 18, 20, 32, 3, 15, 47, 26, 19, 46, 8, 22, 44, 7, 24, 43, 14, 41, 1, 38, 40]
Iteration 1, fitness: 159088.1701641078
[31, 6, 13, 37, 23, 45, 34, 25, 16, 39, 5, 35, 11, 9, 27, 0, 10, 42, 30, 28, 4, 12, 33, 36, 2, 21, 17, 29, 18, 20, 32, 3, 15, 47, 26, 19, 46, 8, 22, 44, 7, 24, 43, 14, 41, 1, 38, 40]
159088.1701641078 144709.1336957683
Iteration 2, fitness: 144709.1336957683
[31, 6, 13, 37, 7, 45, 34, 25, 16, 39, 5, 35, 11, 9, 27, 0, 10, 42, 30, 28, 4, 12, 33, 36, 2, 21, 17, 29, 18, 20, 32, 3, 15, 47, 26, 19, 46, 8, 22, 44, 23, 24, 43, 14, 41, 1, 38, 40]
144709.1336957683 143387.5110957744
Iteration 3, fitness: 143387.5110957744
[31, 6, 13, 37, 7, 45, 22, 25, 16, 39, 5, 35, 11, 9, 27, 0, 10, 42, 30, 28, 4, 12, 33, 36, 2, 21, 17, 29, 18, 20, 32, 3, 15, 47, 26, 19, 46, 8, 34, 44, 23, 24, 43, 14, 41, 1, 38, 40]
143387.5110957744 143565.3842060348
[31, 6, 13, 37, 7, 45, 22, 25, 16, 39, 5, 35, 14, 9, 27, 0, 10, 42, 30, 28, 4, 12, 33, 36, 2, 21, 17, 29, 18, 20, 32, 3, 15, 47, 26, 19, 46, 8, 34, 44, 23, 24, 43, 11, 41, 1, 38, 40]
[31, 6, 13, 37, 7, 45, 22, 25, 16, 39, 5, 35, 14, 9, 27, 0, 10, 42, 30, 28, 4, 12, 33, 36, 2, 21, 17, 29, 18, 20, 32, 3, 15, 47, 26, 19, 46, 8, 34, 44, 23, 24, 43, 11, 41, 1, 38, 40]