I'm building a chat application using OpenFire server and JSJaC client library.
The page loads from http://staging.mysite.com and XMPP runs on http://xmpp.mysite.com. As you can see they both share the same domain. So I use the following code on page load.
function OnPageLoad (){
document.domain = "mysite.com";
DoLogin();
}
Anyway it throws me exception saying that I violate the security. Why document.domain doesn't work? Should it work or is it done just for a "beauty"? If yes, what can be done in this specific situation?
I don't have access to the XMLHttpRequest object inside the library and do not control it.
Anyway. I had to dig a little bit deeper the JSJaC library and made some injections to the code. But first I've done some workaround. Basically I added the following headers to the response
Access-Control-Allow-Methods: GET, POST, OPTIONS
Access-Control-Allow-Credentials: true
Access-Control-Allow-Origin: *
Access-Control-Allow-Headers: Content-Type, *
Generally this allowed to make crossdomain requests using a native xhr. However it proved to work in only modern browsers. For instance it didn't work in IE8 and any version of Opera simply rejected this header.
Then I used flash based solution. I used flXHR and modified jsjac.uncompressed.js like this.
XmlHttp.create = function () {
// try {
// if (window.XMLHttpRequest) {
// var req = new XMLHttpRequest();
//
// // some versions of Moz do not support the readyState property
// // and the onreadystate event so we patch it!
// if (req.readyState == null) {
// req.readyState = 1;
// req.addEventListener("load", function () {
// req.readyState = 4;
// if (typeof req.onreadystatechange == "function")
// req.onreadystatechange();
// }, false);
// }
//
// return req;
// }
// if (window.ActiveXObject) {
// return new ActiveXObject(XmlHttp.getPrefix() + ".XmlHttp");
// }
// }
// catch (ex) {}
// // fell through
// throw new Error("Your browser does not support XmlHttp objects");
var AsyncClient = new flensed.flXHR({
"autoUpdatePlayer": true,
"instanceId": "myproxy" + _xhrpf.toString(),
// This is important because the library uses the response xml of the object to manipulate the data
"xmlResponseText": true,
"onreadystatechange": function () { }
});
// counter for giving a unique id for the flash xhr object.
_xhrpf++;
return AsyncClient;
};
var _xhrpf = 1;
Then I just added a crossdomain.xml in the root of the target domain. Now it works perfectly if the browser has the flash plugin. Further I want to make some detection mechanism if there is no flash plugin, just make a native xhr and hope for that the browser supports the headers for cross domain requests.
Related
So I'm using express and express-http-proxy as a proxy to an API web server that requires basic authentication. Then in my app, I'll be issuing Ajax calls to these APIs. After some effort I got this working but I'm sure there's a better way out there, hence this post.
Initially I set up the express proxy as follows:
var express = require('express'),
app = express(),
proxy = require('express-http-proxy');
app.use('/apis', proxy("https://myserver", {
forwardPath: function(req, res) {
return "/apis" + require('url').parse(req.url).path;
}
}));
When calling a URL directly in the browser (not via Ajax), eg. https://myserver/apis/myapi.ashx, I would see the authentication dialog asking me for my credentials, and I could authenticate and see the result.
However, when accessing the same URL via an Ajax call in my app, I was not getting a popup. Why this difference of behavior?
So I decided I needed to add my own basic authentication middleware using request and basic-auth as follows:
var express = require('express'),
app = express(),
proxy = require('express-http-proxy'),
request = require('request'),
basicAuth = require('basic-auth');
var myAuth = function (req, res, next) {
function unauthorized(res) {
res.set('WWW-Authenticate', 'Basic realm=Rimes');
return res.sendStatus(401);
};
var user = basicAuth(req);
if (!user || !user.name || !user.pass) {
return unauthorized(res);
};
var connectUrl = 'https://'+user.name+':'+user.pass+'#myserver/apis/connect.ashx';
request.get(connectUrl, function(error, response, body) {
if (!error && response.statusCode == 200) {
return next();
} else {
return unauthorized(res);
}
});
};
app.use('/apis', proxy("https://myserver", {
forwardPath: function(req, res) {
return "/apis" + require('url').parse(req.url).path;
}
}));
This worked fine, showing me the authentication popup during the Ajax call.
The obvious disadvantage here is:
Credential verification for every API request, although there may be a way to cache valid credentials. But in its defence, this is only on the development environment.
So is there a better way of doing this? Would a different middleware package do a better job?
I've got a socketio server/client working well together, however I want to start writing events for when the server is offline on page load or during normal run.
I'm including the remote socket.io code in my header:
<script src="<?=NODE_HOST?>/socket.io/socket.io.js"></script>
<script>
var nodeHost = '<?=NODE_HOST?>';
</script>
And in my client controller I have
if(typeof io != 'undefined')
this.socket = io.connect(this.settings.server);
else
this.handleDisconnect();
The function I have to attempt to re-connect over and over if a) A socket disconnect occurs during normal operation, or b) the server is down on page load
botController.prototype.handleDisconnect = function() {
$.getScript(nodeHost+"/socket.io/socket.io.js").done(function(script, textStatus) {
bot.control.socket = io.connect(bot.control.settings.server);
}).fail(function(jqxhr, settings, exception) {
setTimeout(function() {
bot.control.handleDisconnect();
}, 5000);
});
}
Am I going about this the correct way?
The main issue I have right now (which made me create this question) is my code errors on page load when the server is down because I have functions like:
socket.on(...
When socket doesn't yet exist. I could wrap those in a function and call it when I detect the global socket object exists on successful reconnection? Would it matter if that function that contains socket.on... is called multiple times (if the server goes down more than once during operation)?
OK I managed to come up with this solution that seems to work well using yepnope which I already had using Modernizr (it handles the cross domain issue for me too).
<script src="<?=NODE_HOST?>/socket.io/socket.io.js"></script>
<script>
var nodeHost = '<?=NODE_HOST?>';
</script>
// Attempt to connect to nodejs server
botController.prototype.start = function() {
// Is our nodejs server up yet?
if(typeof io != 'undefined') {
this.socket = io.connect(this.settings.server);
this.startSocketEvents();
} else {
this.handleDisconnect();
}
}
// Our connection to the server has been lost, we need to keep
// trying to get it back until we have it!
botController.prototype.handleDisconnect = function(destroySocketObject) {
if(destroySocketObject === undefined)
destroySocketObject = true;
// Destroy any cached io object before requesting the script again
if(destroySocketObject)
io = undefined;
yepnope.injectJs(nodeHost+"/socket.io/socket.io.js",
function(result) {
// Did it actually download the script OK?
if(typeof io != 'undefined') {
bot.control.socket = io.connect(bot.control.settings.server);
bot.control.startSocketEvents();
} else {
setTimeout(function() {
bot.control.handleDisconnect(false);
}, 5000);
}
}
);
Where startSocketEvents() function contains all of my socket.on events
I am using the Add-on builder and I need to receive binary data (image). I would like to do this using the request module but as you can see from the documentation:
https://addons.mozilla.org/en-US/developers/docs/sdk/latest/packages/addon-kit/docs/request.html
There are only text and json properties and raw is absent.
How should I receive binary data in the add-on script?
You cannot do this using the request module, you will have to use the regular XMLHttpRequest via chrome authority. Something like this should work:
var {Cc, Ci} = require("chrome");
var request = Cc["#mozilla.org/xmlextras/xmlhttprequest;1"]
.createInstance(Ci.nsIJSXMLHttpRequest);
request.open("GET", "...");
request.onload = function()
{
onUnload.unload();
var arrayBuffer = request.response;
if (arrayBuffer)
{
var byteArray = new Uint8Array(arrayBuffer);
...
}
};
request.onerror = function()
{
onUnload.unload();
}
request.send(null);
var onUnload = {
unload: function()
{
// Make sure to abort the request if the extension is disabled
try
{
request.abort();
}
catch (e) {}
}
};
require("unload").ensure(onUnload);
The mechanism to ensure that the request is aborted if your extension is suddenly disabled is rather awkward, that's the main reason the request module exists rather than simply giving you XMLHttpRequest. Note that it is important to call onUnload.unload() once the request finishes, otherwise the Add-on SDK will keep it in the list of methods to be called on unload (a memory leak). See documentation of unload module.
I'm having the classic IE-caches-everything-in-Ajax issue. I have a bit of data that refreshes every minute.
Having researched the forums the solutions boil down to these options (http://stackoverflow.com/questions/5997857/grails-best-way-to-send-cache-headers-with-every-ajax-call):
add a cache-busting token to the query string (like ?time=[timestamp])
send a HTTP response header that specifically forbids IE to cache the request
use an ajax POST instead of a GET
Unfortunately the obvious querysting or "cache: false" setting will not work for me as the updated data file is hosted on Akamai Netstorage and cannot accept querystrings. I don't want to use POST either.
What I want to do is try send an HTTP response header that specifically forbids IE to cache the request or if anyone else knows another cache busting solution??
Does anyone know how this might be done? Any help would be much appreciated.
Here is my code:
(function ($) {
var timer = 0;
var Browser = {
Version: function () {
var version = 999;
if (navigator.appVersion.indexOf("MSIE") != -1) version = parseFloat(navigator.appVersion.split("MSIE")[1]);
return version;
}
}
$.fn.serviceboard = function (options) {
var settings = { "refresh": 60};
return this.each(function () {
if (options) $.extend(settings, options);
var obj = $(this);
GetLatesData(obj, settings.refresh);
if (settings.refresh > 9 && Browser.Version() > 6) {
timer = setInterval(function () { GetLatestData(obj, settings.refresh) }, settings.refresh * 1000);
}
});
};
function GetLatestData(obj, refresh) {
var _url = "/path/updated-data.htm";
$.ajax({
url: _url,
dataType: "html",
complete: function () {},
success: function (data) {
obj.empty().append(data);
}
}
});
}
})(jQuery);
Add a random number to the GET request so that IE will not identify it as "the same" in its cache. This number could be a timestamp:
new Date().getTime()
EDIT perhaps make the requested url:
var _url = "/path/updated-data.htm?" + new Date().getTime()
This shouldn't cause any errors I believe.
EDIT2 Sorry I just read your post a bit better and saw that this is not an option for you.
You say "is hosted on Akamai and cannot accept querystrings" but why not?
I've never heard of a page that won't accept an additional: "?blabla", even when it's html.
This was driving me crazy. I tried many cache busting techniques and setting cache headers. So many of these either did not work or were wild goose chases. The only solution I found which tested to work correctly was setting:
Header Pragma: no-cache
I hope it saves others with IE headaches.
I am wondering what is the best way to stop duplciate submissions when using jquery and ajax?
I come up with 2 possible ways but not sure if these are the only 2.
On Ajax start disable all buttons till request is done. 2 problems I see with this though is I use jquery model dialog so I don't know how easy it would be to disable those button as I not sure if they have id's. Second I if the the request hangs the user has really no way to try again since all the buttons are disabled.
I am looking into something called AjaxQueue at this time I have no clue if it is what I need or how it works since the site where the plugin is apparently down for maintenance.
http://docs.jquery.com/AjaxQueue
Edit
I think this is a spin off of what I was looking at.
http://www.protofunc.com/scripts/jquery/ajaxManager/
The only problem I see with this ajaxManager is that I think I have to change all my $.post, $.get and $.ajax ones to their type.
But what happens if I need a special parameter from $.ajax? Or that fact I like using .post and .get.
Edit 2
I think it can take in all $.ajax options. I am still looking into it. However what I am unsure about now is can I use the same constructor for all requests that will use the same options.
First you have to construct/configure a new Ajaxmanager
//create an ajaxmanager named someAjaxProfileName
var someManagedAjax = $.manageAjax.create('someAjaxProfileName', {
queue: true,
cacheResponse: true
});
Or do I have to make the above every single time?
How about setting a flag when the user clicks the button? You will only clear the flag when the AJAX request completes successfully (in complete, which is called after the success and error callbacks), and you will only send an AJAX request if the flag is not set.
Related to AJAX queuing there is a plugin called jQuery Message Queuing that is very good. I've used it myself.
var requestSent = false;
jQuery("#buttonID").click(function() {
if(!requestSent) {
requestSent = true;
jQuery.ajax({
url: "http://example.com",
....,
timeout: timeoutValue,
complete: function() {
...
requestSent = false;
},
});
}
});
You can set a timeout value for long-running requests (value is in milliseconds) if you think your request has a possibility of hanging. If an timeout occurs, the error callback is called, after which the complete callback gets called.
You could store an active request in a variable, then clear it when there's a response.
var request; // Stores the XMLHTTPRequest object
$('#myButton').click(function() {
if(!request) { // Only send the AJAX request if there's no current request
request = $.ajax({ // Assign the XMLHTTPRequest object to the variable
url:...,
...,
complete: function() { request = null } // Clear variable after response
});
}
});
EDIT:
One nice thing about this, is that you could cancel long running requests using abort().
var request; // Stores the XMLHTTPRequest object
var timeout; // Stores timeout reference for long running requests
$('#myButton').click(function() {
if(!request) { // Only send the AJAX request if there's no current request
request = $.ajax({ // Assign the XMLHTTPRequest object to the variable
url:...,
...,
complete: function() { timeout = request = null } // Clear variables after response
});
timeout = setTimeout( function() {
if(request) request.abort(); // abort request
}, 10000 ); // after 10 seconds
}
});
$.xhrPool = {};
$.xhrPool['hash'] = []
$.ajaxSetup({
beforeSend: function(jqXHR,settings) {
var hash = settings.url+settings.data
if ( $.xhrPool['hash'].indexOf(hash) === -1 ){
jqXHR.url = settings.url;
jqXHR.data = settings.data;
$.xhrPool['hash'].push(hash);
}else{
console.log('Duplicate request cancelled!');
jqXHR.abort();
}
},
complete: function(jqXHR,settings) {
var hash = jqXHR.url+jqXHR.data
if (index > -1) {
$.xhrPool['hash'].splice(index, 1);
}
}
});