How do I run priviledged javascript code from Selenium? - firefox

I'm trying to change firefox user agent string (and oscpu) without having to restart it and edit profile settings. Some firefox plugins do that, for example user agent overrider. Here's how it does it:
let UAManager = (function() {
Cu.import('resource://gre/modules/UserAgentOverrides.jsm');
// Orignal UA selector function, a method of UserAgentOverrides.
// Keep it for revert to default.
let orignalGetOverrideForURI = UserAgentOverrides.getOverrideForURI;
let revert = function() {
UserAgentOverrides.getOverrideForURI = orignalGetOverrideForURI;
};
let change = function(uastring) {
UserAgentOverrides.getOverrideForURI = function() uastring;
};
let exports = {
revert: revert,
change: change,
};
return exports;
})();
where Cu=Components.utils;.
So, here's what I've tried to do:
FirefoxDriver fd=new FirefoxDriver();
fd.ExecuteScript(#"Cu=Components.utils;
Cu.import('resource://gre/modules/UserAgentOverrides.jsm');
UserAgentOverrides.getOverrideForURI = function() ""my user agent"";");
On this, I'm getting permission denied error. Is there any way that I can execute this script with plugin priviledges in selinum?

Related

Cypress switch to latest window

After clicking a button my app opens new window.
I need to switch to the latest window.
Currently I am using this approach (I've added Command to Cypress commands.js file) to open URL in new window, but I don't have a clue how to modify it to solve my current issue.
Cypress.Commands.add('openWindow', (url, features) => {
const w = Cypress.config('viewportWidth')
const h = Cypress.config('viewportHeight')
if (!features) {
features = `width=${w}, height=${h}`
}
console.log('openWindow %s "%s"', url, features)
return new Promise(resolve => {
if (window.top.aut) {
console.log('window exists already')
window.top.aut.close()
}
// https://developer.mozilla.org/en-US/docs/Web/API/Window/open
window.top.aut = window.top.open(url, 'aut', features)
// letting page enough time to load and set "document.domain = localhost"
// so we can access it
setTimeout(() => {
cy.state('document', window.top.aut.document);
cy.state('window', window.top.aut);
resolve()
}, 500)
}) })
Is there any workaround to switch between windows in Cypress?
I've just submitted a pull request to the cypress-open-child-window Github Repo which adds 2 extra commands to help with this issue. switchWindow() and closeWindow()
See: https://github.com/bahmutov/cypress-open-child-window/pull/4/commits/fed977edb03b8b67f934b33617867572dec0e994
In case it gets deleted, here's my forked branch:
https://github.com/jakedowns/cypress-open-child-window/tree/add-switchWindow-and-closeWindow
The gist of it is covered here, in this Gist: https://gist.github.com/jakedowns/32b75add88736fcd99c7202a98354384
UPDATE
here's some additional helpers I use:
https://github.com/jakedowns/CypressHelpers

publisher initiates twice, one proper one only to self

for some reason my publisher initiates twice when I create a new a new session. However the 2nd one, isn't in the div where it's supposed to be. Also if you connect to the session you'll get the same so it only show for yourself.
I'm trying to find out why it's appearing. Here's some snippets:
var getApiAndToken, initializeSession;
​
getApiAndToken = function() {
var apiKey, customer_id, sessionId, token;
if (gon) {
apiKey = gon.api_key;
}
if (gon) {
sessionId = gon.session_id;
}
if (gon) {
token = gon.token;
}
if (gon) {
customer_id = gon.customer_id;
}
initializeSession();
};
​
initializeSession = function() {
var publishStream, session;
session = OT.initSession(apiKey, sessionId);
session.connect(token, function(error) {
if (!error) {
session.publish(publishStream(true));
layout();
} else {
console.log('There was an error connecting to the session', error.code, error.message);
}
});
$('#audioInputDevices').change(function() {
publishStream(false);
});
$('#videoInputDevices').change(function() {
publishStream(false);
});
return publishStream = function(loadDevices) {
var publisherOptions;
publisherOptions = {
audioSource: $('#audioInputDevices').val() || 0,
videoSource: $('#videoInputDevices').val() || 0
};
OT.initPublisher('publisherContainer', publisherOptions, function(error) {
if (error) {
console.log(error);
} else {
if (loadDevices) {
OT.getDevices(function(error, devices) {
var audioInputDevices, videoInputDevices;
audioInputDevices = devices.filter(function(element) {
return element.kind === 'audioInput';
});
videoInputDevices = devices.filter(function(element) {
return element.kind === 'videoInput';
});
$.each(audioInputDevices, function() {
$('#audioInputDevices').append($('<option></option>').val(this['deviceId']).html(this['label']));
});
$.each(videoInputDevices, function() {
$('#videoInputDevices').append($('<option></option>').val(this['deviceId']).html(this['label']));
});
});
}
}
});
};
};
it also asks me for device access twice.
I see two general problems in the code you provided:
The variables api_key, session_id, and token inside the getApiAndToken() function are scoped to only that function, and therefore not visible inside initializeSession() where you try to use them.
The goal of the publishStream() function is not clear and its use is not consistent. Each time you invoke it (once the session connects and each time the dropdown value changes) this function creates a new Publisher. It also does not return anything, so when using it in the expression session.publish(publishStream(true)), you are effectively just calling session.publish() which results in a new Publisher being added to the end of the page because there is no element ID specified. That last part is the reason why you said its not in the <div> where its supposed to be.
It sounds like what you want is a Publisher with a dropdown to select which devices its using. I created an example of this for you: https://jsbin.com/sujufog/11/edit?html,js,output.
Briefly, the following is how it works. It first initializes a dummy publisher so that the browser can prompt the user for permission to use the camera and microphone. This is necessary for reading the available devices. Note that if you use a page served over HTTPS, browsers such as Chrome will remember the permissions you allowed on that domain earlier and will not have to prompt the user again. Therefore on Chrome, the dummy publisher doesn't cause any prompt be shown for a user who has already run the application. Next, the dummy publisher is thrown away, and OT.getDevices() is called to read the available devices and populate the dropdown menu. While this is happening, the session would have also connected, and on every change of the selection in either of the dropdowns, the publish() function is called. In that function, if a previous publisher existed, it is first removed, and then a new publisher is created with the devices that are currently selected. Then that new publisher is passed into session.publish().

Firefox file downloading process structure figure

For my project, I need to study some info like "FireFox/Gecko file downloading structure overview"(if any), or somewhat "file downloading process flow chart of FireFox/Gecko". I couldn't find something like that in the Internet so far. Is there any info about it? Thanks a lot.
PS: It must include the paths about all file downloading through FireFox browser, which are via the network connection info APIs and file handling APIs, just like "httpOpenRequest" or "DoFileDownload" API(if any).
What would be the Firefox downloading process API paths?? Is there any figure or chart?
Please help me...
You are probably going to need to look at the code to get the information you desire. You will need to build the flowchart yourself.
There are a couple of different ways downloading is done in the code.
If you are talking about a Firefox add-on performing a download, then it is probably being done using Downloads.jsm (although there is an older method for doing so). The source code for that JavaScript module is at resource://gre/modules/Downloads.jsm (This URL is only valid in Firefox). There appear to be several files all located in the jsloader\resource\gre\modules directory within the zip format file called omni.ja in the root of the Firefox distribution. You can just copy that file and change the name to omni.zip and access it as a normal .zip file.
If you are wanting to know how Firefox saves a page when it is requested by the user: It is defined in the context menu with the oncommand value being gContextMenu.saveLink();. saveLink() is defined in: chrome://browser/content/nsContextMenu.js. It does some housekeeping and then calls saveHelper() which is in the same file.
The saveHelper() code is the following:
// Helper function to wait for appropriate MIME-type headers and
// then prompt the user with a file picker
saveHelper: function(linkURL, linkText, dialogTitle, bypassCache, doc) {
// canonical def in nsURILoader.h
const NS_ERROR_SAVE_LINK_AS_TIMEOUT = 0x805d0020;
// an object to proxy the data through to
// nsIExternalHelperAppService.doContent, which will wait for the
// appropriate MIME-type headers and then prompt the user with a
// file picker
function saveAsListener() {}
saveAsListener.prototype = {
extListener: null,
onStartRequest: function saveLinkAs_onStartRequest(aRequest, aContext) {
// if the timer fired, the error status will have been caused by that,
// and we'll be restarting in onStopRequest, so no reason to notify
// the user
if (aRequest.status == NS_ERROR_SAVE_LINK_AS_TIMEOUT)
return;
timer.cancel();
// some other error occured; notify the user...
if (!Components.isSuccessCode(aRequest.status)) {
try {
const sbs = Cc["#mozilla.org/intl/stringbundle;1"].
getService(Ci.nsIStringBundleService);
const bundle = sbs.createBundle(
"chrome://mozapps/locale/downloads/downloads.properties");
const title = bundle.GetStringFromName("downloadErrorAlertTitle");
const msg = bundle.GetStringFromName("downloadErrorGeneric");
const promptSvc = Cc["#mozilla.org/embedcomp/prompt-service;1"].
getService(Ci.nsIPromptService);
promptSvc.alert(doc.defaultView, title, msg);
} catch (ex) {}
return;
}
var extHelperAppSvc =
Cc["#mozilla.org/uriloader/external-helper-app-service;1"].
getService(Ci.nsIExternalHelperAppService);
var channel = aRequest.QueryInterface(Ci.nsIChannel);
this.extListener =
extHelperAppSvc.doContent(channel.contentType, aRequest,
doc.defaultView, true);
this.extListener.onStartRequest(aRequest, aContext);
},
onStopRequest: function saveLinkAs_onStopRequest(aRequest, aContext,
aStatusCode) {
if (aStatusCode == NS_ERROR_SAVE_LINK_AS_TIMEOUT) {
// do it the old fashioned way, which will pick the best filename
// it can without waiting.
saveURL(linkURL, linkText, dialogTitle, bypassCache, false,
doc.documentURIObject, doc);
}
if (this.extListener)
this.extListener.onStopRequest(aRequest, aContext, aStatusCode);
},
onDataAvailable: function saveLinkAs_onDataAvailable(aRequest, aContext,
aInputStream,
aOffset, aCount) {
this.extListener.onDataAvailable(aRequest, aContext, aInputStream,
aOffset, aCount);
}
}
function callbacks() {}
callbacks.prototype = {
getInterface: function sLA_callbacks_getInterface(aIID) {
if (aIID.equals(Ci.nsIAuthPrompt) || aIID.equals(Ci.nsIAuthPrompt2)) {
// If the channel demands authentication prompt, we must cancel it
// because the save-as-timer would expire and cancel the channel
// before we get credentials from user. Both authentication dialog
// and save as dialog would appear on the screen as we fall back to
// the old fashioned way after the timeout.
timer.cancel();
channel.cancel(NS_ERROR_SAVE_LINK_AS_TIMEOUT);
}
throw Cr.NS_ERROR_NO_INTERFACE;
}
}
// if it we don't have the headers after a short time, the user
// won't have received any feedback from their click. that's bad. so
// we give up waiting for the filename.
function timerCallback() {}
timerCallback.prototype = {
notify: function sLA_timer_notify(aTimer) {
channel.cancel(NS_ERROR_SAVE_LINK_AS_TIMEOUT);
return;
}
}
// set up a channel to do the saving
var ioService = Cc["#mozilla.org/network/io-service;1"].
getService(Ci.nsIIOService);
var channel = ioService.newChannelFromURI(makeURI(linkURL));
if (channel instanceof Ci.nsIPrivateBrowsingChannel) {
let docIsPrivate = PrivateBrowsingUtils.isWindowPrivate(doc.defaultView);
channel.setPrivate(docIsPrivate);
}
channel.notificationCallbacks = new callbacks();
let flags = Ci.nsIChannel.LOAD_CALL_CONTENT_SNIFFERS;
if (bypassCache)
flags |= Ci.nsIRequest.LOAD_BYPASS_CACHE;
if (channel instanceof Ci.nsICachingChannel)
flags |= Ci.nsICachingChannel.LOAD_BYPASS_LOCAL_CACHE_IF_BUSY;
channel.loadFlags |= flags;
if (channel instanceof Ci.nsIHttpChannel) {
channel.referrer = doc.documentURIObject;
if (channel instanceof Ci.nsIHttpChannelInternal)
channel.forceAllowThirdPartyCookie = true;
}
// fallback to the old way if we don't see the headers quickly
var timeToWait =
gPrefService.getIntPref("browser.download.saveLinkAsFilenameTimeout");
var timer = Cc["#mozilla.org/timer;1"].createInstance(Ci.nsITimer);
timer.initWithCallback(new timerCallback(), timeToWait,
timer.TYPE_ONE_SHOT);
// kick off the channel with our proxy object as the listener
channel.asyncOpen(new saveAsListener(), null);
}

How do I mock polymer core ajax, for unit testing

I am building the scaffolding for my new polymer project, and am considering unit tests. I think I will be using the karma/jasmine combination. There is an interesting post at http://japhr.blogspot.co.uk/2014/03/polymer-page-objects-and-jasmine-20.html which I understand enough to get me started, but the key question I will have to address and haven't found any standard way to do it is how do I mock the ajax calls.
When I was using jasmine, standalone, on a JQuery Mobile project, I was able to directly use the Jasmine SpyOn ability to mock the JQuery.ajax call. Is there something similar for Polymer?
I came across an element <polymer-mock-data> but there is no real documentation for it, so I couldn't figure out if they might help
Instead of importing core-ajax/core-ajax.html, create your own core-ajax element.
<polymer-element name="core-ajax" attributes="response">
<script>
Polymer('core-ajax', {
attached: function() {
this.response = ['a', 'b', 'c'];
}
});
</script>
</polymer-element>
Obviously, this is just an example, the actual implementation depends on the desired mocking behavior.
This is just one way to solve it, there are many others. I'm interested to hear what you find (in)convenient.
It turns out that Jasmine2.0 has an Jasmine-ajax plugin that will mock the global XMLHttpRequest. core-ajax uses this under the hood, so I can directly get at the call.
It works well, in a beforeEach function at the top the suite you call jasmine.Ajax.install and in the afterEach function you call jasmine.Ajax.uninstall, and it automatically replaces the XMLHttpRequest.
Timing is also crucial, in that you need to ensure you have mocked the Ajax call before the element under test uses it. I achieve that using a separate function to specifically load the fixture which contains the element under test, which is called after jasmine.Ajax.install has been called. I use a special setup script thus
(function(){
var PolymerTests = {};
//I am not sure if we can just do this once, or for every test. I am hoping just once
var script = document.createElement("script");
script.src = "/base/components/platform/platform.js";
document.getElementsByTagName("head")[0].appendChild(script);
var POLYMER_READY = false;
var container; //Used to hold fixture
PolymerTests.loadFixture = function(fixture,done) {
window.addEventListener('polymer-ready', function(){
POLYMER_READY = true;
done();
});
container = document.createElement("div");
container.innerHTML = window.__html__[fixture];
document.body.appendChild(container);
if (POLYMER_READY) done();
};
//After every test, we remove the fixture
afterEach(function(){
document.body.removeChild(container);
});
window.PolymerTests = PolymerTests;
})();
The only point to note here is that the fixture files have been loaded by the karma html2js pre-processor, which loads them into the window.__html__ array, from where we use the code to add to the test context
My test suite is like so
describe('<smf-auth>',function(){
beforeEach(function(done){
jasmine.Ajax.install();
PolymerTests.loadFixture('client/smf-auth/smf-auth-fixture.html',done);
});
afterEach(function(){
jasmine.Ajax.uninstall();
});
describe("The element authenticates",function(){
it("Should Make an Ajax Request to the url given in the login Attribute",function(){
var req = jasmine.Ajax.requests;
expect(req.mostRecent().url).toBe('/football/auth_json.php'); //Url declared in our fixture
});
})
});
For this answer, I took an entirely different approach. Inspiration came from Web Component Tester, which includes sinon within its capabilities. sinon includes the ability to call sinon.useFakeXMLHttpRequest to replace the standard xhr object that core-ajax uses and return responses baked on that.
As far as I can see, haven't quite got as far as running module tests using it, Web Component Tester runs sinon in the node.js context so the build of sinon supplied with it can "require" the various sinon components. In a normal browser environment this doesn't work and I was looking for a way to allow me to manually run the app I was developing without a php capable server running..
However, downloading and installing with Bower the actual releases from the sinonjs.org web site, does provide a completely built sinon that will run in the context of a web server.
So I can include the following scripts in my main index.html file
<!--build:remove -->
<script type="text/javascript" src="/bower_components/sinon-1.14.1/index.js"></script>
<script type="text/javascript" src="/fake/fake.js"></script>
<!--endbuild-->
which is automatically removed by the gulp build scrips and then fake JS has the following in it
var PAS = (function (my) {
'use strict';
my.Faker = my.Faker || {};
var getLocation = function(href) {
var a = document.createElement('a');
a.href = href;
return a;
};
sinon.FakeXMLHttpRequest.useFilters = true;
sinon.FakeXMLHttpRequest.addFilter(function(method,url){
if(method === 'POST' && getLocation(url).pathname.substring(0,7) === '/serve/') {
return false;
}
return true;
});
var server = sinon.fakeServer.create();
server.autoRespond = true;
my.Faker.addRoute = function(route,params,notfound){
server.respondWith('POST','/serve/' + route + '.php',function(request){
var postParams = JSON.parse(request.requestBody);
var foundMatch = false;
var allMatch;
/*
* First off, we will work our way through the parameter list seeing if we got a parameter
* which matches the parameters received from our post. If all components of a parameter match,
* then we found one
*/
for(var i=0; i <params.length; i++) {
//check to see parameter is in request
var p = params[i][0];
allMatch = true; //start of optimisic
for(var cp in p ) {
//see if this parameter was in the request body
if(typeof postParams[cp] === 'undefined') {
allMatch = false;
break;
}
if(p[cp] !== postParams[cp]) {
allMatch = false;
break;
}
}
if (allMatch) {
request.respond(200,{'Content-Type':'application/json'},JSON.stringify(params[i][1]));
foundMatch = true;
break;
}
}
//see if we found a match. If not, then we will have to respond with the not found option
if (!foundMatch) {
request.respond(200,{'Content-Type':'application/json'},JSON.stringify(notfound));
}
});
};
return my;
})(PAS||{});
/**********************************************************************
Thses are all the routinee we have and their responses.
**********************************************************************/
PAS.Faker.addRoute('logon',[
[{password:'password1',username:'alan'},{isLoggedOn:true,userID:1,name:'Alan',token:'',keys:['A','M']}],
[{username:'alan'},{isLoggedIn:false,userID:1,name:'Alan'}],
[{password:'password2',username:'babs'},{isLoggedOn:true,userID:2,name:'Barbara',token:'',keys:['M']}],
[{username:'babs'},{isLoggedIn:false,userID:2,name:'Barbara'}]
],{isLoggedOn:false,userID:0,name:''});
The PAS function initialises a sinon fake server and provides a way of providing tests cases with the addRoute function. For a given route, it checks the list of possible POST parameter combinations, and as soon as it finds one, issues that response.
In this case testing /serve/logon.php for various combinations of username and password. It only checks the parameters actually in the particular entry.
So if username = "alan" and password = "password1" the first response is made, but if username is "alan" and any other password is supplied - since it isn't checked, the second pattern matches and the response to that pattern is made.
If non of the patterns match, the last "notfound" parameter is the response pattern that is made.
I believe I could use this same technique in my module test fixtures if I wanted to, but I am more likely to do more specific sinon spying and checking actual parameters in that mode
For 0.8, the tests for PolylmerElements/iron-ajax show how to do this with sinon.
Since SO doesn't like link-only answers, I've copied their code below. However I'd highly recommend going to the source linked above, since 0.8 components are in a high state of flux currently.
var jsonResponseHeaders = {
'Content-Type': 'application/json'
};
var ajax;
var request;
var server;
setup(function () {
server = sinon.fakeServer.create();
server.respondWith(
'GET',
'/responds_to_get_with_json',
[
200,
jsonResponseHeaders,
'{"success":true}'
]
);
server.respondWith(
'POST',
'/responds_to_post_with_json',
[
200,
jsonResponseHeaders,
'{"post_success":true}'
]
);
ajax = fixture('TrivialGet');
});
teardown(function () {
server.restore();
});
suite('when making simple GET requests for JSON', function () {
test('has sane defaults that love you', function () {
request = ajax.generateRequest();
server.respond();
expect(request.response).to.be.ok;
expect(request.response).to.be.an('object');
expect(request.response.success).to.be.equal(true);
});
test('will be asynchronous by default', function () {
expect(ajax.toRequestOptions().async).to.be.eql(true);
});
});

Detecting Gmail attachment downloads

Is there a way to detect if a particular file that is being downloaded is a Gmail attachment?
I am looking for a way to write a Greasemonkey script which would help me organize the downloads, based on their download sources, say Gmail email attachments would have a different behavior from other stuff.
So far, I've found out that attachments redirect to https://mail-attachment.googleusercontent.com/attachment/u/0/ , which I guess is not sufficient.
EDIT
Since an add-on would be more powerful than a userscript, I've decided to pursue the Add On idea. However, the problem of detection remains unsolved.
This is too complicated for just one question; it has at least these major parts:
Do you want to redirect downloads when the user clicks, or automatically download select files? Clarify the question.
Your GM script must identify the appropriate download links, and on which pages, and for which views? For gMail, this is not a trivial task, and the question needs to be clearer. It's worthy of a whole question just on this issue given the variety of views and AJAX involved.
Once identified, the script probably needs to intercept clicks on those links. (Depends on your goal (clarify!) and what the Firefox extension can do.)
Greasemonkey needs to interact with an extension that either intercepts the user-initiated download, or allows for an automatic download. I've detailed the auto-download approach, below.
Once your script has identified the appropriate file URLs and/or links (Open a new question for more help with that, and include pictures of the types of pages and links you want.), it can interface with a Firefox add-on, like the one below, to automatically save those files.
Automatically saving files from Greasemonkey with the help of an additional Add-on:
WARNING: The following is a working proof of concept for education only. It has no security features, and if you use it as-is, for actual surfing, some webpage or script writer or extension writer will use it to completely pwn your computer.
If you use the Add-on builder or SDK to install or "Test" the DANGER. DANGER. DANGER. File download utility,
Then you can use a Greasemonkey script, like this, to automatically save files:
// ==UserScript==
// #name _Call our File download add-on to trigger a file download.
// #include https://mail.google.com/mail/*
// #include https://stackoverflow.com/questions/14440362/*
// #require http://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js
// #grant GM_addStyle
// ==/UserScript==
/*- The #grant directive is needed to work around a design change
introduced in GM 1.0. It restores the sandbox.
*/
var fileURL = "http://userscripts.org/scripts/source/29222.user.js";
var savePath = "D:\\temp\\";
var extensionLoaded = false;
window.addEventListener ("ImAlivefromExtension", function (zEvent) {
console.log ("The test extension appears to be loaded!", zEvent.detail);
extensionLoaded = true;
} );
window.addEventListener ("ReplyToDownloadRequest", function (zEvent) {
//var xxxx = JSON.parse (zEvent.detail);
console.log ("Extension replied: ", zEvent.detail);
} );
$("body").prepend ('<button id="gmFileDownloadBtn">Click to File download request.</button>');
$("#gmFileDownloadBtn").click ( function () {
if (extensionLoaded) {
detailVal = JSON.stringify (
{targFileURL: fileURL, targSavePath: savePath}
);
var zEvent = new CustomEvent (
"SuicidalDownloadRequestToAddOn",
{"detail": detailVal }
);
window.dispatchEvent (zEvent);
}
else {
alert ("The file download extension is not loaded!");
}
} );
You can test the script on this SO question page.
Note that any other extension, userscript, web page, or plugin can listen to or send spoof events, the only security, so far, is to limit which pages the extension runs on.
For reference, the extension source files are below. The rest is supplied by Firefox's Add-on SDK.
The content script:
var zEvent = new CustomEvent ("ImAlivefromExtension",
{"detail": "GM, DANGER, DANGER, DANGER, File download utility" }
);
window.dispatchEvent (zEvent)
window.addEventListener ("SuicidalDownloadRequestToAddOn", function (zEvent) {
console.log ("Extension received download request: ", zEvent.detail);
//-- Relay request to extension main.js
self.port.emit ("SuicidalDownloadRequestRelayed", zEvent.detail);
//-- Reply back to GM, or whoever is pretending to be GM.
var zEvent = new CustomEvent ("ReplyToDownloadRequest",
{"detail": "Your funeral!" }
);
window.dispatchEvent (zEvent)
} );
The background JS:
//--- For security, MAKE THESE AS RESTRICTIVE AS POSSIBLE!
const includePattern = [
'https://mail.google.com/mail/*',
'https://stackoverflow.com/questions/14440362/*'
];
let {Cc, Cu, Ci} = require ("chrome");
Cu.import ("resource://gre/modules/Services.jsm");
Cu.import ("resource://gre/modules/XPCOMUtils.jsm");
Cu.import ("resource://gre/modules/FileUtils.jsm");
let data = require ("sdk/self").data;
let pageMod = require ('sdk/page-mod');
let dlManageWindow = Cc['#mozilla.org/download-manager-ui;1'].getService (Ci.nsIDownloadManagerUI);
let fileURL = "";
let savePath = "";
let activeWindow = Services.wm.getMostRecentWindow ("navigator:browser");
let mod = pageMod.PageMod ( {
include: includePattern,
contentScriptWhen: 'end',
contentScriptFile: [ data.url ('ContentScript.js') ],
onAttach: function (worker) {
console.log ('DANGER download utility attached to: ' + worker.tab.url);
worker.port.on ('SuicidalDownloadRequestRelayed', function (message) {
var detailVal = JSON.parse (message);
fileURL = detailVal.targFileURL;
savePath = detailVal.targSavePath;
console.log ("Received request to \ndownload: ", fileURL, "\nto:", savePath);
downloadFile (fileURL, savePath);
} );
}
} );
function downloadFile (fileURL, savePath) {
dlManageWindow.show (activeWindow, 1);
try {
let newFile;
let fileURIToDownload = Services.io.newURI (fileURL, null, null);
let persistWin = Cc['#mozilla.org/embedding/browser/nsWebBrowserPersist;1']
.createInstance (Ci.nsIWebBrowserPersist);
let fileName = fileURIToDownload.path.slice (fileURIToDownload.path.lastIndexOf ('/') + 1);
let fileObj = new FileUtils.File (savePath);
fileObj.append (fileName);
if (fileObj.exists ()) {
console.error ('*** Error! File "' + fileName + '" already exists!');
}
else {
let newFile = Services.io.newFileURI (fileObj);
let newDownload = Services.downloads.addDownload (
0, fileURIToDownload, newFile, fileName, null, null, null, persistWin, false
);
persistWin.progressListener = newDownload;
persistWin.savePrivacyAwareURI (fileURIToDownload, null, null, null, "", newFile, false);
}
} catch (exception) {
console.error ("Error saving the file! ", exception);
dump (exception);
}
}
So far from what you are saying,the only thing you can do is making add-on(Firefox) and Extension(for chrome if you want).
If you have closer look at download of attachment,it happens when:
1) You click on icon of attachments
2) If you click download
For these two things you can find the click event of <a> tag containing download_url value.You can easily do that using js/jquery for creting extension.
So you can write the functionality when user tries to download attachment.
You could use Gmail contextual gadgets to modify the behavior on the Google side:
Gmail Contexual Gadgets
Contextual Gadgets don't have direct access to attachments but server side, you could use IMAP to access the attachment (based on the Gmail message ID identified by the gadget):
Gmail IMAP Extensions
Using gadgets and server-side IMAP has the advantage of being browser-agnostic.
It's not entirely clear what you want to do differently with the downloaded Gmail attachment as opposed to any given download (save it to a different location? Perform actions upon the attachment data?) But the contextual gadget and IMAP should give you some chance to modify the attachment data as needed before the browser download begins.

Resources