I came across a situation where having single handler helps simplify the project. Does this have any impact on performance? Especially when the mouse move event called, having too many conditions have any impact on performance?
var myHandler = function(e){
if (e.type == 'mousemove'){
} else if (e.type == 'mouseover'){
} else if (e.type == 'mouseout'){
} else if (e.type == 'mousedown'){
} else if (e.type == 'mouseup'){
}
};
$('#Element').bind('mouseover mouseout mousedown mouseup mousemove', myHandler);
If you do need to handle all those events and you order the if-else statements by the frequency that the events are fired (as you already have) there is insignificant performance penalty, i.e. at most 4 short string comparisons. The following code tries to benchmark the performance of 10,000,000 string comparisons of fixed size:
$(function (){
Function.prototype.benchmark = function(name, times, args){
var iteration;
var start = new Date();
for (iteration=0; iteration<times; iteration++) {
var result = this.apply(this, args);
}
var end = new Date();
alert(name + " : " + (end-start));
}
function test(args){
return args[0] == "thisistest";
}
function overhead(args){
}
test.benchmark("string comparison", 10000000,["thisistesT"]);
//run same without the string comparison
overhead.benchmark("overhead", 10000000,["thisistesT"]);
});
Since the browser is not the only application on my PC the results vary between the executions, however, I very rarely got results of under 100ms in Chrome (remember that this is for 10,000,000 iterations).
Anyway, while your performance will not suffer from binding multiple events to a single function I really doubt that this will simplify your project. Having many if-else statements is usually considered a bad practice and a design flaw.
If you do this so that you can share state between the handlers by having them under the common scope of a function, you are better off having something like:
$(function (){
var elementSelector = "#Element";
var i = 0; //shared
var events = {
mouseover : function(){alert("mouseOver " + elementSelector + i++);},
mouseout : function(){alert("mouseOut " + elementSelector + i++);}
// define the rest of the event handlers...
};
var $target = $(elementSelector);
for(var k in events){
$target.bind(k, events[k]);
}
});
One more note regarding performance (plus code readability), switch is significantly faster than else ifs.
var myHandler = function(e){
switch(e.type){
case 'mousemove':
break;
case 'mouseover':
break;
case 'mouseout':
break;
case 'mousedown':
break;
case 'mouseup':
break;
}
};
For more detials you can tajke a look at Performance of if-else, switch or map based conditioning
Related
I decided to break this onEdit(e) trigger function up into multiple functions, but when I did, the event objects (e) portion got "lost." After messing with it for a while, I finally got it to work again, but I don't think it's the most efficient solution.
Any suggestions, or is this good enough? Basically, I just added var e = e; and that made it work again.
function onEdit(e){
Logger.log(e);
if(e.range.getSheet().getName() == 'Estimate'){
var e = e;
Logger.log("Starting subCatDV...");
subCatDV(e);
Logger.log("Finished subCatDV!");
Logger.log("Starting itemDV...");
itemDV(e);
Logger.log("Finished itemDV!");
Logger.log("Starting subItemDV...");
subItemDV(e);
Logger.log("Finished subItemDV!");
}
if(e.range.getSheet().getName() == 'Items'){
subCatDV();
}
return;
}
Here is the function that didn't seem to be receiving the event objects
function subItemDV(e){
// Populate sub-item data validations
var estss = SpreadsheetApp.getActive().getSheetByName('Estimate');
var itemss = SpreadsheetApp.getActive().getSheetByName('Items');
var subItemDVss = SpreadsheetApp.getActive().getSheetByName('subItemDataValidations');
var activeCell = estss.getActiveCell();
Logger.log("I'm in subItemDV...");
Logger.log(e);
Logger.log(activeCell);
Logger.log("Checking sheet name...");
if(activeCell.getColumn() == 3 && activeCell.getRow() > 1){
if(e.range.getSheet().getName() == 'Items') return;
Logger.log("Not in 'Items' sheet! Moving on...");
activeCell.offset(0, 1).clearContent().clearDataValidations();
var subItem = subItemDVss.getRange(activeCell.getRow(),activeCell.getColumn(),itemss.getLastColumn()).getValues();
var subItemIndex = subItem[0].indexOf(activeCell.getValue()) + 2;
Logger.log("Checking subItemIndex...");
if(subItemIndex != 0){
var subItemValidationRange = subItemDVss.getRange(activeCell.getRow(),4,1,subItemDVss.getLastColumn());
var subItemValidationRule = SpreadsheetApp.newDataValidation().requireValueInRange(subItemValidationRange).build();
activeCell.offset(0, 1).setDataValidation(subItemValidationRule);
Logger.log("Finished checking subItemIndex...");
}
}
}
So as not to inflate discussion in comments: you can safely remove the var e = e assignment from the script, as it does not affect the problems that your updated version of the script solved:
e is an event object that is constructed as a response to trigger being fired. Since in your case the trigger is an onEdit(e) trigger, event object is undefined until an edit is made in the target Spreadsheet (please, note that script-triggered edits do not count);
Even if you called the function with a parameter (like doSomething(e)), in case you either do not access the parameter via the arguments object, or explicitly define it in a function declaration function doSomething(e), the event object won't be persisted;
Also, you might've missed the e binding in the last subCatDV() call + the if statement can be optimized (btw, don't use the equality comparison, use the identity comparison instead, it will save you debugging time in the future):
var name = e.range.getSheet().getName();
if(name === 'Estimate') {
doSomething(e);
}else if(name === 'Items') { //identity comparison ensures type match;
doSomethingElse(e);
}
Useful links
event object reference;
arguments object reference;
I create a simple video calling app by using web Rtc and websockets.
But when i run the code, the following error occured.
DOMException [InvalidStateError: "setRemoteDescription needs to called before addIceCandidate"
code: 11
I don't know how to resolve this error.
Here is my code below:
enter code here
var localVideo;
var remoteVideo;
var peerConnection;
var uuid;
var localStream;
var peerConnectionConfig = {
'iceServers': [
{'urls': 'stun:stun.services.mozilla.com'},
{'urls': 'stun:stun.l.google.com:19302'},
]
};
function pageReady() {
uuid = uuid();
console.log('Inside Page Ready');
localVideo = document.getElementById('localVideo');
remoteVideo = document.getElementById('remoteVideo');
serverConnection = new WebSocket('wss://' + window.location.hostname +
':8443');
serverConnection.onmessage = gotMessageFromServer;
var constraints = {
video: true,
audio: true,
};
if(navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia(constraints)
.then(getUserMediaSuccess).catch(errorHandler);
}else
{
alert('Your browser does not support getUserMedia API');
}
}
function getUserMediaSuccess(stream) {
localStream = stream;
localVideo.src = window.URL.createObjectURL(stream);
}
function start(isCaller) {
console.log('Inside isCaller');
peerConnection = new RTCPeerConnection(peerConnectionConfig);
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.onaddstream = gotRemoteStream;
peerConnection.addStream(localStream);
if(isCaller) {
console.log('Inside Caller to create offer');
peerConnection.createOffer().
then(createdDescription).catch(errorHandler);
}
}
function gotMessageFromServer(message) {
console.log('Message from Server');
if(!peerConnection)
{
console.log('Inside !Peer Conn');
start(false);
}
var signal = JSON.parse(message.data);
// Ignore messages from ourself
if(signal.uuid == uuid) return;
if(signal.sdp) {
console.log('Inside SDP');
peerConnection.setRemoteDescription(new
RTCSessionDescription(signal.sdp)).then(function() {
// Only create answers in response to offers
if(signal.sdp.type == 'offer') {
console.log('Before Create Answer');
peerConnection.createAnswer().then(createdDescription)
.catch(errorHandler);
}
}).catch(errorHandler);
} else if(signal.ice) {
console.log('Inside Signal Ice');
peerConnection.addIceCandidate(new
RTCIceCandidate(signal.ice)).catch(errorHandler);
}
}
function gotIceCandidate(event) {
console.log('Inside Got Ice Candi');
if(event.candidate != null) {
serverConnection.send(JSON.stringify({'ice': event.candidate,
'uuid': uuid}));
}
}
function createdDescription(description) {
console.log('got description');
peerConnection.setLocalDescription(description).then(function() {
console.log('Inside Setting ');
serverConnection.send(JSON.stringify({'sdp':
peerConnection.localDescription, 'uuid': uuid}));
}).catch(errorHandler);
}
function gotRemoteStream(event) {
console.log('got remote stream');
remoteVideo.src = window.URL.createObjectURL(event.stream);
}
function errorHandler(error) {
console.log(error);
}
// Taken from http://stackoverflow.com/a/105074/515584
// Strictly speaking, it's not a real UUID, but it gets the job done here
function uuid() {
function s4() {
return Math.floor((1 + Math.random()) *
0x10000).toString(16).substring(1);
}
return s4() + s4() + '-' + s4() + '-' + s4() + '-' + s4() + '-' + s4() +
s4() + s4();
}
This is my code, I don't know how to arrange the addIceCandidate and addRemoteDescription function.
You need to make sure that
peerConnection.addIceCandidate(new RTCIceCandidate(signal.ice))
is called after description is set.
You have sitution where you receive ice candidate and try to add it to peerConnection before peerConnection has completed with setting description.
I had similar situation, and I created array for storing candidates that arrived before setting description is completed, and a variable that checks if description is set. If description is set, I would add candidates to peerConnection, otherwise I would add them to array. (when you set your variable to true, you can also go through array and add all stored candidates to peerConnection.
The way WebRTC works (as much as i understand) is you have to make two peers to have a deal to how to communicate eachother in the order of give an offer to your peer get your peers answer and select an ICE candidate to communicate on then if you want to send your media streams for an video conversation
for you to have a good exampe to look on how to implemet those funcitons and in which order you can visit https://github.com/alexan1/SignalRTC he has a good understading of how to do this.
you might already have a solution to your problem at this time but im replying in case you do not.
EDIT: As I have been told, this solution is an anti-pattern and you should NOT implement it this way. For more info on how I solved it while still keeping a reasonable flow, follow this answer and comment section: https://stackoverflow.com/a/57257449/779483
TLDR: Instead of calling addIceCandidate as soon as the signaling information arrives, add the candidates to a queue. After calling setRemoteDescription, go through candidates queue and call addIceCandidate on each one.
--
From this answer I learned that we have to call setRemoteDescription(offer) before we add the Ice Candidates data.
So, expanding on #Luxior answer, I did the following:
When signaling message with candidate arrives:
Check if remote was set (via a boolean flag, ie: remoteIsReady)
If it was, call addIceCandidate
If it wasn't, add to a queue
After setRemoteDescription is called (in answer signal or answer client action):
Call a method to go through the candidates queue and call addIceCandidate on each one.
Set boolean flag (remoteIsReady) to true
Empty queue
I have some code that saves data using Breeze and reports progress over multiple saves that is working reasonably well.
However, sometimes a save will timeout, and I'd like to retry it once automatically. (Currently the user is shown an error and has to retry manually)
I am struggling to find an appropriate way to do this, but I am confused by promises, so I'd appreciate some help.
Here is my code:
//I'm using Breeze, but because the save takes so long, I
//want to break the changes down into chunks and report progress
//as each chunk is saved....
var surveys = EntityQuery
.from('PropertySurveys')
.using(manager)
.executeLocally();
var promises = [];
var fails = [];
var so = new SaveOptions({ allowConcurrentSaves: false});
var count = 0;
//...so I iterate through the surveys, creating a promise for each survey...
for (var i = 0, len = surveys.length; i < len; i++) {
var query = EntityQuery.from('AnsweredQuestions')
.where('PropertySurveyID', '==', surveys[i].ID)
.expand('ActualAnswers');
var graph = manager.getEntityGraph(query)
var changes = graph.filter(function (entity) {
return !entity.entityAspect.entityState.isUnchanged();
});
if (changes.length > 0) {
promises.push(manager
.saveChanges(changes, so)
.then(function () {
//reporting progress
count++;
logger.info('Uploaded ' + count + ' of ' + promises.length);
},
function () {
//could I retry the fail here?
fails.push(changes);
}
));
}
}
//....then I use $q.all to execute the promises
return $q.all(promises).then(function () {
if (fails.length > 0) {
//could I retry the fails here?
saveFail();
}
else {
saveSuccess();
}
});
Edit
To clarify why I have been attempting this:
I have an http interceptor that sets a timeout on all http requests. When a request times out, the timeout is adjusted upwards, the user is displayed an error message, telling them they can retry with a longer wait if they wish.
Sending all the changes in one http request is looking like it could take several minutes, so I decided to break the changes down into several http requests, reporting progress as each request succeeds.
Now, some requests in the batch might timeout and some might not.
Then I had the bright idea that I would set a low timeout for the http request to start with and automatically increase it. But the batch is sent asynchronously with the same timeout setting and the time is adjusted for each failure. That is no good.
To solve this I wanted to move the timeout adjustment after the batch completes, then also retry all requests.
To be honest I'm not so sure an automatic timeout adjustment and retry is such a great idea in the first place. And even if it was, it would probably be better in a situation where http requests were made one after another - which I've also been looking at: https://stackoverflow.com/a/25730751/150342
Orchestrating retries downstream of $q.all() is possible but would be very messy indeed. It's far simpler to perform retries before aggregating the promises.
You could exploit closures and retry-counters but it's cleaner to build a catch chain :
function retry(fn, n) {
/*
* Description: perform an arbitrary asynchronous function,
* and, on error, retry up to n times.
* Returns: promise
*/
var p = fn(); // first try
for(var i=0; i<n; i++) {
p = p.catch(function(error) {
// possibly log error here to make it observable
return fn(); // retry
});
}
return p;
}
Now, amend your for loop :
use Function.prototype.bind() to define each save as a function with bound-in parameters.
pass that function to retry().
push the promise returned by retry().then(...) onto the promises array.
var query, graph, changes, saveFn;
for (var i = 0, len = surveys.length; i < len; i++) {
query = ...; // as before
graph = ...; // as before
changes = ...; // as before
if (changes.length > 0) {
saveFn = manager.saveChanges.bind(manager, changes, so); // this is what needs to be tried/retried
promises.push(retry(saveFn, 1).then(function() {
// as before
}, function () {
// as before
}));
}
}
return $q.all(promises)... // as before
EDIT
It's not clear why you might want to retry downsteam of $q.all(). If it's a matter of introducing some delay before retrying, the simplest way would be to do within the pattern above.
However, if retrying downstream of $q.all() is a firm requirement, here's a cleanish recursive solution that allows any number of retries, with minimal need for outer vars :
var surveys = //as before
var limit = 2;
function save(changes) {
return manager.saveChanges(changes, so).then(function () {
return true; // true signifies success
}, function (error) {
logger.error('Save Failed');
return changes; // retry (subject to limit)
});
}
function saveChanges(changes_array, tries) {
tries = tries || 0;
if(tries >= limit) {
throw new Error('After ' + tries + ' tries, ' + changes_array.length + ' changes objects were still unsaved.');
}
if(changes_array.length > 0) {
logger.info('Starting try number ' + (tries+1) + ' comprising ' + changes_array.length + ' changes objects');
return $q.all(changes_array.map(save)).then(function(results) {
var successes = results.filter(function() { return item === true; };
var failures = results.filter(function() { return item !== true; }
logger.info('Uploaded ' + successes.length + ' of ' + changes_array.length);
return saveChanges(failures), tries + 1); // recursive call.
});
} else {
return $q(); // return a resolved promise
}
}
//using reduce to populate an array of changes
//the second parameter passed to the reduce method is the initial value
//for memo - in this case an empty array
var changes_array = surveys.reduce(function (memo, survey) {
//memo is the return value from the previous call to the function
var query = EntityQuery.from('AnsweredQuestions')
.where('PropertySurveyID', '==', survey.ID)
.expand('ActualAnswers');
var graph = manager.getEntityGraph(query)
var changes = graph.filter(function (entity) {
return !entity.entityAspect.entityState.isUnchanged();
});
if (changes.length > 0) {
memo.push(changes)
}
return memo;
}, []);
return saveChanges(changes_array).then(saveSuccess, saveFail);
Progress reporting is slightly different here. With a little more thought it could be made more like in your own answer.
This is a very rough idea of how to solve it.
var promises = [];
var LIMIT = 3 // 3 tris per promise.
data.forEach(function(chunk) {
promises.push(tryOrFail({
data: chunk,
retries: 0
}));
});
function tryOrFail(data) {
if (data.tries === LIMIT) return $q.reject();
++data.tries;
return processChunk(data.chunk)
.catch(function() {
//Some error handling here
++data.tries;
return tryOrFail(data);
});
}
$q.all(promises) //...
Two useful answers here, but having worked through this I have concluded that immediate retries is not really going to work for me.
I want to wait for the first batch to complete, then if the failures are because of timeouts, increase the timeout allowance, before retrying failures.
So I took Juan Stiza's example and modified it to do what I want. i.e. retry failures with $q.all
My code now looks like this:
var surveys = //as before
var successes = 0;
var retries = 0;
var failedChanges = [];
//The saveChanges also keeps a track of retries, successes and fails
//it resolves first time through, and rejects second time
//it might be better written as two functions - a save and a retry
function saveChanges(data) {
if (data.retrying) {
retries++;
logger.info('Retrying ' + retries + ' of ' + failedChanges.length);
}
return manager
.saveChanges(data.changes, so)
.then(function () {
successes++;
logger.info('Uploaded ' + successes + ' of ' + promises.length);
},
function (error) {
if (!data.retrying) {
//store the changes and resolve the promise
//so that saveChanges can be called again after the call to $q.all
failedChanges.push(data.changes);
return; //resolved
}
logger.error('Retry Failed');
return $q.reject();
});
}
//using map instead of a for loop to call saveChanges
//and store the returned promises in an array
var promises = surveys.map(function (survey) {
var changes = //as before
return saveChanges({ changes: changes, retrying: false });
});
logger.info('Starting data upload');
return $q.all(promises).then(function () {
if (failedChanges.length > 0) {
var retries = failedChanges.map(function (data) {
return saveChanges({ changes: data, retrying: true });
});
return $q.all(retries).then(saveSuccess, saveFail);
}
else {
saveSuccess();
}
});
I have decent large data set of around 1100 records. This data set is mapped to an observable array which is then bound to a view. Since these records are updated frequently, the observable array is updated every time using the ko.mapping.fromJS helper.
This particular command takes around 40s to process all the rows. The user interface just locks for that period of time.
Here is the code -
var transactionList = ko.mapping.fromJS([]);
//Getting the latest transactions which are around 1100 in number;
var data = storage.transactions();
//Mapping the data to the observable array, which takes around 40s
ko.mapping.fromJS(data,transactionList)
Is there a workaround for this? Or should I just opt of web workers to improve performances?
Knockout.viewmodel is a replacement for knockout.mapping that is significantly faster at creating viewmodels for large object arrays like this. You should notice a significant performance increase.
http://coderenaissance.github.com/knockout.viewmodel/
I have also thought of a workaround as follows, this uses less amount of code-
var transactionList = ko.mapping.fromJS([]);
//Getting the latest transactions which are around 1100 in number;
var data = storage.transactions();
//Mapping the data to the observable array, which takes around 40s
// Instead of - ko.mapping.fromJS(data,transactionList)
var i = 0;
//clear the list completely first
transactionList.destroyAll();
//Set an interval of 0 and keep pushing the content to the list one by one.
var interval = setInterval(function () {if (i == data.length - 1 ) {
clearInterval(interval);}
transactionList.push(ko.mapping.fromJS(data[i++]));
}, 0);
I had the same problem with mapping plugin. Knockout team says that mapping plugin is not intended to work with large arrays. If you have to load such big data to the page then likely you have improper design of the system.
The best way to fix this is to use server pagination instead of loading all the data on page load. If you don't want to change design of your application there are some workarounds which maybe help you:
Map your array manually:
var data = storage.transactions();
var mappedData = ko.utils.arrayMap(data , function(item){
return ko.mapping.fromJS(item);
});
var transactionList = ko.observableArray(mappedData);
Map array asynchronously. I have written a function that processes array by portions in another thread and reports progress to the user:
function processArrayAsync(array, itemFunc, afterStepFunc, finishFunc) {
var itemsPerStep = 20;
var processor = new function () {
var self = this;
self.array = array;
self.processedCount = 0;
self.itemFunc = itemFunc;
self.afterStepFunc = afterStepFunc;
self.finishFunc = finishFunc;
self.step = function () {
var tillCount = Math.min(self.processedCount + itemsPerStep, self.array.length);
for (; self.processedCount < tillCount; self.processedCount++) {
self.itemFunc(self.array[self.processedCount], self.processedCount);
}
self.afterStepFunc(self.processedCount);
if (self.processedCount < self.array.length - 1)
setTimeout(self.step, 1);
else
self.finishFunc();
};
};
processor.step();
};
Your code:
var data = storage.transactions();
var transactionList = ko.observableArray([]);
processArrayAsync(data,
function (item) { // Step function
var transaction = ko.mapping.fromJS(item);
transactionList().push(transaction);
},
function (processedCount) {
var percent = Math.ceil(processedCount * 100 / data.length);
// Show progress to the user.
ShowMessage(percent);
},
function () { // Final function
// This function will fire when all data are mapped. Do some work (i.e. Apply bindings).
});
Also you can try alternative mapping library: knockout.wrap. It should be faster than mapping plugin.
I have chosen the second option.
Mapping is not magic. In most of the cases this simple recursive function can be sufficient:
function MyMapJS(a_what, a_path)
{
a_path = a_path || [];
if (a_what != null && a_what.constructor == Object)
{
var result = {};
for (var key in a_what)
result[key] = MyMapJS(a_what[key], a_path.concat(key));
return result;
}
if (a_what != null && a_what.constructor == Array)
{
var result = ko.observableArray();
for (var index in a_what)
result.push(MyMapJS(a_what[index], a_path.concat(index)));
return result;
}
// Write your condition here:
switch (a_path[a_path.length-1])
{
case 'mapThisProperty':
case 'andAlsoThisOne':
result = ko.observable(a_what);
break;
default:
result = a_what;
break;
}
return result;
}
The code above makes observables from the mapThisProperty and andAlsoThisOne properties at any level of the object hierarchy; other properties are left constant. You can express more complex conditions using a_path.length for the level (depth) the value is at, or using more elements of a_path. For example:
if (a_path.length >= 2
&& a_path[a_path.length-1] == 'mapThisProperty'
&& a_path[a_path.length-2] == 'insideThisProperty')
result = ko.observable(a_what);
You can use typeOf a_what in the condition, e.g. to make all strings observable.
You can ignore some properties, and insert new ones at certain levels.
Or, you can even omit a_path. Etc.
The advantages are:
Customizable (more easily than knockout.mapping).
Short enough to copy-paste it and write individual mappings for different objects if needed.
Smaller code, knockout.mapping-latest.js is not included into your page.
Should be faster as it does only what is absolutely necessary.
I'm using LESS CSS (more exactly less.js) which seems to exploit LocalStorage under the hood. I had never seen such an error like this before while running my app locally, but now I get "Persistent storage maximum size reached" at every page display, just above the link the unique .less file of my app.
This only happens with Firefox 12.0 so far.
Is there any way to solve this?
P.S.: mainly inspired by Calculating usage of localStorage space, this is what I ended up doing (this is based on Prototype and depends on a custom trivial Logger class, but this should be easily adapted in your context):
"use strict";
var LocalStorageChecker = Class.create({
testDummyKey: "__DUMMY_DATA_KEY__",
maxIterations: 100,
logger: new Logger("LocalStorageChecker"),
analyzeStorage: function() {
var result = false;
if (Modernizr.localstorage && this._isLimitReached()) {
this._clear();
}
return result;
},
_isLimitReached: function() {
var localStorage = window.localStorage;
var count = 0;
var limitIsReached = false;
do {
try {
var previousEntry = localStorage.getItem(this.testDummyKey);
var entry = (previousEntry == null ? "" : previousEntry) + "m";
localStorage.setItem(this.testDummyKey, entry);
}
catch(e) {
this.logger.debug("Limit exceeded after " + count + " iteration(s)");
limitIsReached = true;
}
}
while(!limitIsReached && count++ < this.maxIterations);
localStorage.removeItem(this.testDummyKey);
return limitIsReached;
},
_clear: function() {
try {
var localStorage = window.localStorage;
localStorage.clear();
this.logger.debug("Storage clear successfully performed");
}
catch(e) {
this.logger.error("An error occurred during storage clear: ");
this.logger.error(e);
}
}
});
document.observe("dom:loaded",function() {
var checker = new LocalStorageChecker();
checker.analyzeStorage();
});
P.P.S.: I didn't measure the performance impact on the UI yet, but a decorator could be created and perform the storage test only every X minutes (with the last timestamp of execution in the local storage for instance).
Here is a good resource for the error you are running into.
http://www.sitepoint.com/building-web-pages-with-local-storage/#fbid=5fFWRXrnKjZ
Gives some insight that localstorage only has so much room and you can max it out in each browser. Look into removing some data from localstorage to resolve your problem.
Less.js persistently caches content that is #imported. You can use this script to clear content that is cached. Using the script below you can call the function destroyLessCache('/path/to/css/') and it will clear your localStorage of css files that have been cached.
function destroyLessCache(pathToCss) { // e.g. '/css/' or '/stylesheets/'
if (!window.localStorage || !less || less.env !== 'development') {
return;
}
var host = window.location.host;
var protocol = window.location.protocol;
var keyPrefix = protocol + '//' + host + pathToCss;
for (var key in window.localStorage) {
if (key.indexOf(keyPrefix) === 0) {
delete window.localStorage[key];
}
}
}