p5.js Recursive Bubble Sort - algorithm

I'm trying to modify this solution to work for Bubble Sort, but I'm a bit out of my depth, especially with the whole async function business. The code works up to a point, but does not follow the exact pattern I would expect for Bubble Sort, and only partially sorts the array.
Can anyone help me out please?
let values = [];
let startSort = true;
function bubbleSort( a ) {
// create copy of the array
clone = a.slice();
// asynchronous sort the copy
recursiveBubbleSort( clone, clone.length );
return;
}
//Recursive Bubble Sort
async function recursiveBubbleSort( arr, n ) {
//If there is only single element
//the return the array
if ( n === 1 ) {
return arr;
}
await recursiveBubbleSort( arr, n - 1 );
//Swap the elements by comparing them
for ( let j = 0; j < n - 1; j++ ) {
if ( arr[j] > arr[j + 1] ) {
[arr[j], arr[j + 1]] = [arr[j + 1], arr[j]];
}
}
// copy back the current state of the sorting
values = arr.slice();
// slow down
await sleep( 500 );
}
async function sleep( ms ) {
return new Promise( resolve => setTimeout( resolve, ms ) );
}
function setup() {
createCanvas( 600, 190 );
frameRate( 60 );
}
let numOfRects = 15;
let rectWidth;
function draw() {
if ( startSort ) {
startSort = false;
rectWidth = floor( width / numOfRects );
values = new Array( floor( width / rectWidth ) );
for ( let i = 0; i < values.length; i++ ) {
values[i] = random( height );
}
bubbleSort( values );
}
background( 23 );
stroke( 0 );
fill( 255 );
for ( let i = 0; i < values.length; i++ ) {
rect( i * rectWidth, height - values[i], rectWidth, values[i] );
}
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.1.9/p5.min.js"></script>

In this answer I will not focus on how async and await work because that doesn't seem to be your goal. I'll show you how to make a working version of the bubble sort instead.
First let's get rid of this startSort variable you have: You use it to initialize your values and a better way to do it is to use the setup() function used by p5:
function setup() {
createCanvas(600, 190);
rectWidth = floor(width / numOfRects);
// Generate the values
values = new Array(floor(width / rectWidth));
for (let i = 0; i < values.length; i++) {
values[i] = random(height);
}
// The number of iterations is equal to the number of values
n = values.length;
}
We first we fill your array with random values and define a global variable n which will hold the remaining number of iterations do to (which is the same as the number of values).
Then let's modify your bubble sort function. Here we don't need it to be recursive because, as I'll show later on, we will simply call it several times until we have done all the iterations.
// Bubble Sort
function bubbleSort(arr, n) {
// If there is no remaining iterations do nothing
if (n <= 1) {
return 0;
}
// Swap the elements by comparing them
for (let j = 0; j < n - 1; j++) {
if (arr[j] > arr[j + 1]) {
[arr[j], arr[j + 1]] = [arr[j + 1], arr[j]];
}
}
// We did one more iteration return the remaining number of iterations
return n - 1;
}
There are 3 important things here:
The function modifies the arr in place: so when you call the function with an array you will have the result in the same array.
The function returns the remaining number of iterations do it (each call to the function is one iteration)
If you call it with n the number of remaining of iterations at 1 or 0 it will simply do nothing. Otherwise it will make one pass on the array.
Now you can change your draw() function. This function is called automatically by p5 X times by seconds (by default 60). So each time it is ran it will call the bubble sort function, updating the array and the number of remaining iterations:
function draw() {
// Define the "speed" at which we do the iterations
frameRate(10);
background(23);
stroke(0);
fill(255);
// Show the values
for (let i = 0; i < values.length; i++) {
rect(i * rectWidth, height - values[i], rectWidth, values[i]);
}
// Make one new iteration
n = bubbleSort(values, n);
}
Note how we used frameRate() to call it less often to let the user see the different steps of the sort.
All you need is to declare your global variables at the beginning of your sketch, put everything together and you are good to go.
let values = [];
let numOfRects = 15;
let rectWidth;
let n;
You can see the complete code here you will notice that I did to more things in this implementation:
I extracted the code which puts new values in the array in it's own function resetArray()
I call this function in the setup() function to initialize the animation but also in draw() when n===0 so that when the array is sorted we generate new values to have an infinite animation.
A note about async and await. These functions are used to create asynchronous code in javascript. This is a whole topic which is not trivial for someone new to programming, you can read the doc here. Basically async is used to say that a function will take time to be executed and await is used to say to wait for the function to finish it's execution.
In the function you go inspiration from, this asynchronous code is used to put a delay between two calls to the bubbleSort function so that the user can see the different iterations. Here you don't really need it because p5 gives you the mechanism of draw() and frameRate() to handle that more easily.

Related

Do I count Math.max in a Big O Runtime?

I have trouble in understanding weather or not using Math.max should be counted as a loop, therefore should be included in calculating Big O runtime.
I assume for the Math.max to find the max value it has to loop and compare through all the values it was provided. Therefore it is actually looping.
My code in JS:
function getWaterCapacityPerSurface(surface){
let waterAmount = 0;
// full loop
for(let i = 1; i < surface.length - 1; i++){
const current = surface[i];
// I assume each slice is counted as a half of the full loop
const leftSlice = surface.slice(0, (i - 1 < 0 ? 0 : i));
const rightSlice = surface.slice(i + 1, surface.length);
// I assume each Math.max is counted as a half of the full loop
const leftBound = Math.max(...leftSlice);
const rightBound = Math.max(...rightSlice);
const canWaterStay = leftBound > current && rightBound > current;
const currentBound = Math.min(leftBound, rightBound);
const waterLevel = currentBound - current;
if(canWaterStay) waterAmount += waterLevel;
}
return waterAmount;
}
console.log(getWaterCapacityPerSurface([4,2,1,3,0,1,2]));
// returns 6
Is Big O runtime O(N(N+N)) or O(N(N))?
I assume in this case it doesn't really matter because we drop constants and at the end it is going to be O(N(N+N)) = O(N(2N)) = O(N(N)) = O(N²)
But I just would like to know weather I should count Math.max/Math.min as a loop for future reference.
Yes, if you pass a list of arguments of length n to Math.max(), then it is an O(n) operation. Math.max() iterates through every argument. See more info in the specification.

Loops and iterations

Is there a way that a function has 2 options of return and the option of return is chosen after a certain interval of iterations?
example:
a function that swaps from "a" to "b" and "b" to "a" after 4 iterations returns:
a
a
a
a
b
b
b
b
a
a
a
a
b
b
b
b
.
.
.
Edit I did something like this to solve the problem:
var counter = 0;
var state = true;
var changeState = function(){
var x = 0
while(x != 12){
if(counter == 4){
counter = 0;
state = !state;
}
if (state){
console.log("a");
} else {
console.log("b")
}
counter += 1;
x += 1
}
}
changeState();
You will need to have a stateful function, as it needs to somehow remember something from previous call(s) that were made to it. This is a so-called side effect, and means that the function is not pure.
For the example you have given, the function would need to know (i.e. have a state with) the number of previous calls (modulo 8), or the previous four returned values, or some mix of this information.
How such a function is implemented, depends on the programming language.
In object oriented programming languages you would create a class with the function as method, and a property reflecting that state. When the function is called, it also updates the state. For instance, in Java:
class MyClass {
int callCount = 0;
char myFunction() {
return "aaaabbbb".charAt(this.callCount++ % 8);
}
}
You would call the function repeatedly like so:
MyClass obj = new MyClass();
for (int i = 0; i < 10; i++) {
System.out.println(obj.myFunction());
}
In JavaScript you could do the same, or you could create a closure:
function createFunction() {
let callCount = 0;
return function myFunction() {
return "aaaabbbb"[callCount++ % 8];
}
}
let myFunction = createFunction();
for (let i = 0; i < 10; i++) {
console.log(myFunction());
}
In Python you can do the same, but it allows also to use default arguments (which are only initialised at function definition time), and so you could define an argument that is an object holding a counter:
def myFunction(counter=[0]):
counter[0] += 1
return "aaaabbbb"[counter[0] % 8]
for _ in range(10):
print(myFunction())
This is of course not an exhaustive list of possibilities, but the essence is that programming languages offer their own ways to construct such stateful functions.
Iterators
Some languages offer a different approach: iterators. This means the function produces a stream of return values. The function can run to produce the first value after which the running state is saved until a new value is requested from it. The function's execution context is then restored to run until it can produce the next value, ...etc.
Here is how that design would look in JavaScript:
function * myIterator() {
let callCount = 0;
while (true) {
yield "aaaabbbb"[callCount++ % 8];
// ^^^^^ a language construct for yielding back to the caller
}
}
let iter = myIterator();
for (let i = 0; i < 10; i++) {
console.log(iter.next().value);
}
When a programming language offers this possibility, it is often preferred over the other alternatives listed earlier.

Find K arrays that sum up to a given array with a certain accuracy

Let's say I have set containing thousands of arrays (let's fix it to 5000 of arrays) of a fixed size (size = 8) with non negative values. And I'm given another array of the same size with non negative values (Input Array). My task is to select some subset of arrays, with the condition that if I sum them together (summation of vectors) I would get the resultant array which is very close to a given Input Array with the desired accuracy (+-m).
For example if the desired result (input array) is (3, 2, 5) and accuracy = 2
Then of course the best set would be the one that would sum up to exactly (3,2,5) but also any solution of the following form would be ok (3 +- m, 2 +- m, 5 +- m).
The question is what could be the right algorithmic approach here? It is similar to multi dimensional sack problem, but there is no cost optimization section in my task.
At least one solution is required which meets the constraints. But several would be better, so that it would be possible to have a choice.
This is kind of extended knapsack problem. We know that it is NPC task to do which mean = we cannot use bruteforce and try all possibilities. It is just not computable with current computers.
What we can do is use some heuristic. One simple and useful is the simulated annealing. The principle is quite simple - at beginning of your algorithm, when the temperature is high - you are not afraid to take even the "at the moment worse solution" (which can actually lead to the best possible solution). So at beginning you take almost anything. Then you start cooling and more cool you are, the more causius you are so you are trying to improve your solution more and more and risk less and less.
The gifs on wiki are actually nice example: https://en.wikipedia.org/wiki/Simulated_annealing
I have also implemented solution that at the end prints whats the inputArray and what is your solution and the "negative score" (the less the better).
You are not guaranteed to get best/valid solution, but you can basically run this in some while cycle until you find solution good enough or you hit some threshold (like if you do not find good solution after running 100x times, you say "data not valid" or take the best of these "not good" solutions)
class Simulation {
constructor(size, allArrSize, inputArrayRange, ordinarySize, maxDif, overDifPenalisation) {
this.size = size;
this.allArrSize = allArrSize;
this.inputArrayRange = inputArrayRange;
this.ordinarySize = ordinarySize;
this.maxDif = maxDif;
this.overDifPenalisation = overDifPenalisation;
this.allArr = [];
this.solutionMap = new Map();
for (let i = 0; i < allArrSize; i++) {
let subarr = [];
for (let j = 0; j < size; j++) {
subarr.push(Math.round(Math.random() * ordinarySize));
}
this.allArr.push(subarr);
}
this.temperature = 100;
this.inputArray = [];
for (let i = 0; i < size; i++) {
this.inputArray.push(Math.round(Math.random() * inputArrayRange));
}
}
findBest() {
while (this.temperature > 0) {
const oldScore = this.countScore(this.solutionMap);
// console.log(oldScore);
let newSolution = new Map(this.solutionMap);
if (this.addNewOrRemove(true)) {
const newCandidate = Math.floor(Math.random() * this.allArrSize);
newSolution.set(newCandidate, true);
} else if (this.addNewOrRemove(false)) {
const deleteCandidate = Math.floor(Math.random() * this.solutionMap.size);
Simulation.deleteFromMapByIndex(newSolution, deleteCandidate);
} else {
const deleteCandidate = Math.floor(Math.random() * this.solutionMap.size);
Simulation.deleteFromMapByIndex(newSolution, deleteCandidate);
const newCandidate = Math.floor(Math.random() * this.allArrSize);
newSolution.set(newCandidate, true);
}
const newScore = this.countScore(newSolution);
if (newScore < oldScore) {
this.solutionMap = newSolution;
} else if ((newScore - oldScore) / newScore < this.temperature / 300) {
this.solutionMap = newSolution;
}
this.temperature -= 0.001;
}
console.log(this.countScore(this.solutionMap), 'Negative Score');
console.log(this.sumTheSolution(this.solutionMap).toString(), 'Solution');
console.log(this.inputArray.toString(), 'Input array');
console.log('Solution is built on these inputs:');
this.solutionMap.forEach((val, key) => console.log(this.allArr[key].toString()))
}
addNewOrRemove(addNew) {
const sum = this.sumTheSolution(this.solutionMap);
let dif = 0;
sum.forEach((val, i) => {
const curDif = this.inputArray[i] - val;
if (curDif < -this.maxDif) {
dif -= 1;
}
if (curDif > this.maxDif) {
dif += 1;
}
});
let chance;
if (addNew) {
chance = (dif + this.size - 1) / (this.size * 2);
} else {
chance = (-dif + this.size - 1) / (this.size * 2);
}
return chance > Math.random();
}
countScore(solution) {
const sum = this.sumTheSolution(solution);
let dif = 0;
sum.forEach((val, i) => {
let curDif = Math.abs(this.inputArray[i] - val);
if (curDif > this.maxDif) {
curDif += (curDif - this.maxDif) * this.overDifPenalisation;
}
dif += curDif;
});
return dif;
}
sumTheSolution(solution) {
const sum = Array(this.size).fill(0);
solution.forEach((unused, key) => this.allArr[key].forEach((val, i) => sum[i] += val));
return sum;
}
static deleteFromMapByIndex(map, index) {
let i = 0;
let toDelete = null;
map.forEach((val, key) => {
if (index === i) {
toDelete = key;
}
i++;
});
map.delete(toDelete);
}
}
const simulation = new Simulation(8, 5000, 1000, 100, 40, 100);
simulation.findBest();
You can play a bit with numbers to get waht you need (the speed of cooling, how it affects probability, some values in constructor etc.)

CouchDB null value when sort descending

I have CouchDB view that gives me a correct value in natural order and a null when sorted descending, here are the Futon screenshots:
Natural order
Descending order
Here is the view code:
"informe_precios": {
"map": "function(doc){if(doc.doc_type=='precio'){emit([doc.comprador,doc.fecha.substr(0,4),doc.fecha.substr(5,2)],{precio:doc.precio,litros:doc.litros});}}",
"reduce": "function(keys, values, rereduce){var importe= 0; var totallitros = 0;for(var i = 0; i < values.length; i++) {importe += values[i].precio*values[i].litros;totallitros += values[i].litros;}return importe/totallitros;}"
}
I need it descending because I want to get 12 last values.
TIA
Diego
You're always assuming that your reduce function is called with the output of your map function, ie. you're not handling the rereduce situation.
In the rereduce your values will be the importe/totallitros values from previous reduce calls.
Your reduce function is getting a "price per liter" average for each month, so because it's an average there's no way for your rereduce function to actually handle that data because for the multiple values coming in there's no way to know their weight in the average.
So, you'll need to change your function to return the count so that you can use that to weight the average in the rereduce function (we're also using the inbuilt sum function to make things simpler):
function(keys, values, rereduce) {
if (rereduce) {
var length = sum(values.map(function(v){return v[1]}));
var avg = sum(values.map(function(v){
return v[0] * (v[1] / length)
}));
return [avg, length];
}
else {
var importe= 0;
var totallitros = 0;
for( var i = 0; i < values.length; i++) {
importe += values[i].precio * values[i].litros;
totallitros += values[i].litros;
}
return [ importe/totallitros, values.length ];
}
}
The final result you'll see in your view here will be an array, so you'll always need to pick out the first element of that in your client code.

Algorithm for finding the closest set of measurment to certain measurment

I have a collection of measurments, example:
measurment #1: { 200, 350, 712, 1023, 1430, 1555, 1800, 2036, 2569 }
measurment #2: { 165, 400, 974, 1124, 1600, 1893, 1919, 2032, 2654, 2932 }
...
measurment #N: { 234, 454, 879, 1432, 1877, 2000, 2543, 2876 }
The order of the elements in each measurment is important.
Each element will have higher value than the previous.
The number of elements in each measurment may vary,
but they should not vary to much.
Now i am getting as an input a new measurment
(lets say: { 212, 354, 978, 1222, 1454, 1922, 2013, 2432, 2987})
and should find the closest measurment from the collection of measurment i already possess.
My question is what algorithm should i use for this task ?
More:
1. It is also possible to extend the task in such meatter that instead input of one measurment i will be given a small collection of measurments.
2. Each element in a measurment represent time passed in second from the begining.
The measuring is stoped when reached 3600 seconds (1 hour), therfore the minimal posible value will be 0 and the maximal will be 3599.
The events creating each element in the measurment to be created is affected by a human behaviour.
Thanks for your help :)
Assuming that your data is "fuzzy", one class of algorithms you may want to look into is dynamic programming. By fuzzy I mean that two sets are almost align, but one set may have extra elements inserted, removed compared to the other and the matching elements "almost" matches.
In these types of algorithms you typically define a distance score by defining a penalty for inserting/removing an element in the alignment and a penalty score for two elements not quite matching.
In your case you may define an insert / delete penalty of "100" seconds for inserting an extra timing event, and a two-element distance score as the absolute distance in seconds.
Given that definition you can easily find and modify a needleman-wunsch algorithm implementation or something similar. This will give you the distance between two small sets of measurements in an acceptable amount of time.
However, if your number of elements in your measurements is huge or your number of sets is huge, and you need the answer in say milliseconds, then it is a rather difficult problem unless you can find a lot of good constraints for your problem.
The above is just an example, it all boils down to the context. Is your data noisy? How "noisy", with extra elements in the middle, start or end or just slightly off in position? plus a ton of other questions.
Choosing and implementing fuzzy algorithms can range between pretty easy to near impossible all depending on the context and what you are going to use the result for. Does it need to be exact or "just good enough". Does it need to be fast, etc.
Find the squared sum of errors of your new measure with each measurement in your collection. Then return the one from your collection with the smallest error.
var measures = [
[1, 2, 3, 4],
[10, 20, 30, 40],
[66, 77, 88, 99],
[101, 202, 303, 404]
];
// ignores measurements that aren't the same length as the data
// uses the squared sum of differences (errors)
function findClosest(data) {
var minError = 0x7FFFFFFF; // max 32bit signed int
var result = null;
for(var i=0; i < measures.length; i++) {
if(data.length !== measures[i].length) { continue; }
var error = 0;
for(var j=0; j < data.length; j++) {
error += Math.pow(measures[i][j] - data[j], 2);
}
if(error < minError) {
minError = error;
result = measures[i];
}
}
return result;
}
// allows data that is different length than measurements by trying to best fit each element of data to an element of the tested measurement
// uses the squared sum of differences (error)
function findClosestV2(data) {
var minError = 0x7FFFFFFF; // max 32bit signed int
var result = null;
for(var i=0; i < measures.length; i++) {
var measure = measures[i];
var error = 0;
var minLocalError = 0x7FFFFFFF;
for(var j=0; j < data.length; j++) {
for(var k=0; k < measure.length; k++) {
var localError = Math.pow(measure[k] - data[j], 2);
if(localError < minLocalError) {
minLocalError = localError;
}
}
error += minLocalError;
}
if(error < minError) {
minError = error;
result = measures[i];
}
}
return result;
}
// allows data that is different length than measurements by trying to best fit each element of data to an element of the tested measurement
// uses the average of the absolute error % using the previous measurement as the ideal value
function findClosestV3(data) {
var minError = 0x7FFFFFFF; // max 32bit signed int
var result = null;
for(var i=0; i < measures.length; i++) {
var measure = measures[i];
var error = 0;
var minLocalError = 0x7FFFFFFF;
for(var j=0; j < data.length; j++) {
for(var k=0; k < measure.length; k++) {
var localError = Math.abs( (measure[k] - data[j]) / measure[k] );
if(localError < minLocalError) {
minLocalError = localError;
}
}
error += minLocalError;
}
// average of sum of error percentages
error /= data.length;
if(error < minError) {
minError = error;
result = measures[i];
}
}
return result;
}
console.log(findClosest([2,3,4,5])); // [1,2,3,4]
console.log(findClosest([70,80,90,100])); // [66,77,88,99]
console.log(findClosest([9,19,304,405])); // [101,202,303,404]
console.log(findClosestV2([404])); // [101,202,303,404]
console.log(findClosestV2([66,67,68,69])); // [66,77,88,99]
console.log(findClosestV2([9,19,304,405])); // [10,20,30,40]
console.log(findClosestV3([404])); // [101,202,303,404]
console.log(findClosestV3([66,67,68,69])); // [66,77,88,99]
console.log(findClosestV3([9,19,304,405])); // [10,20,30,40]

Resources