I am trying to create a fire-bug like extension for firefox which is actually dev-tool extension. I have registered httpRequestObserver to observe http-on-examine-response event. I have a listener with below method implemented.
onDataAvailable: function(request, context, inputStream, offset, count) {
//I get the request URL using request.name
//What I need to do is fetch response data from inputStream
}
I have read the documentation here but still can't figure out.
I am looking for a way to read all the data from inputStream.
Let me know in case more details are required.
Thanks in advance.
I'm not a stream expert but this is some ways of doing things. I recommend Method 1 below, if you find problems with that, then go to Method 2, than if still problems, then research Method 3. Method 1 is definitely the most proper, as it follows contratcts agreement.
Method 1 - nsIScriptableScream read
Hold a global buffer and on start request blank it, and on data available populate it.
var ScriptableInputStream = CC("#mozilla.org/scriptableinputstream;1", "nsIScriptableInputStream", "init");
var blah = {
data: '',
onStartRequest: function (aRequest, aContext) {
this.data = '';
},
onDataAvailable: function(request, context, inputStream, offset, count) {
var scriptStream = new ScriptableInputStream(inputStream);
this.data += scriptStream.read(count);
scriptStream.close();
}
};
Method 2 - nsIBinaryInputStream readBytes
I don't know what advantage this readBytes method has, it might be that you can use this method for images as well, and the above method won't read images I'm not sure. This method also holds a global buffer and on start request blank it, and on data available populate it. Another advantage may be that you don't need to close a binary input stream, I'm not sure about this point though. I think it has no contract so you can reuse it.
var bstream = Cc["#mozilla.org/binaryinputstream;1"].createInstance(Ci.nsIBinaryInputStream);
var blah = {
data: '',
onStartRequest: function (aRequest, aContext) {
this.data = '';
},
onDataAvailable: function(request, context, inputStream, offset, count) {
bstream.setInputStream(inputStream);
var size = 0;
while(size = bstream.available()) {
this.data.value += bstream.readBytes(size);
}
}
};
Method 3 - this is another method i used in tracing listener
This method here I'm just copy pasting, as I don't understand it that well. I use it here though: GitHub :: Noitidart / demo-nsITraceableChannel #L120
function CCIN(cName, ifaceName) {
return Cc[cName].createInstance(Ci[ifaceName]);
}
TracingListener.prototype =
{
onDataAvailable: function(request, context, inputStream, offset, count)
{
var binaryInputStream = CCIN("#mozilla.org/binaryinputstream;1",
"nsIBinaryInputStream");
var storageStream = CCIN("#mozilla.org/storagestream;1", "nsIStorageStream");
var binaryOutputStream = CCIN("#mozilla.org/binaryoutputstream;1",
"nsIBinaryOutputStream");
binaryInputStream.setInputStream(inputStream);
storageStream.init(8192, count, null);
binaryOutputStream.setOutputStream(storageStream.getOutputStream(0));
// Copy received data as they come.
var data = binaryInputStream.readBytes(count);
this.receivedData.push(data);
binaryOutputStream.writeBytes(data, count);
this.originalListener.onDataAvailable(request, context, storageStream.newInputStream(0), offset, count);
Related
I created a script in Google Sheets, which is working well but after a while I'm getting the following error:
Exception: Service invoked too many times for one day: urlfetch
I think I called the function like 200-300 times in the day, for what I checked it should be below the limit.
I read we can use cache to avoid this issue but not sure how to use it in my code.
function scrapercache(url) {
var result = [];
var description;
var options = {
'muteHttpExceptions': true,
'followRedirects': false,
};
var cache = CacheService.getScriptCache();
var properties = PropertiesService.getScriptProperties();
try {
let res = cache.get(url);
if (!res) {
// trim url to prevent (rare) errors
url.toString().trim();
var r = UrlFetchApp.fetch(url, options);
var c = r.getResponseCode();
// check for meta refresh if 200 ok
if (c == 200) {
var html = r.getContentText();
cache.put(url, "cached", 21600);
properties.setProperty(url, html);
var $ = Cheerio.load(html); // make sure this lib is added to your project!
// meta description
if ($('meta[name=description]').attr("content")) {
description = $('meta[name=description]').attr("content").trim();
}
}
result.push([description]);
}
}
catch (error) {
result.push(error.toString());
}
finally {
return result;
}
}
how can I use cache like this to enhance my script please?
var cache = CacheService.getScriptCache();
var result = cache.get(url);
if(!result) {
var response = UrlFetchApp.fetch(url);
result = response.getContentText();
cache.put(url, result, 21600);
Answer:
You can implement CacheService and PropertiesService together and only retrieve the URL again after a specified amount of time.
Code Change:
Be aware that additional calls to retrieving the cache and properties will slow your function down, especially if you are doing this a few hundred times.
As the values of the cache can be a maximum of 100 KB, we will use CacheService to keep track of which URLs are to be retrieved, but PropertiesService to store the data.
You can edit your try block as so:
var cache = CacheService.getScriptCache();
var properties = PropertiesService.getScriptProperties();
try {
let res = cache.get(url);
if (!res) {
// trim url to prevent (rare) errors
url.toString().trim();
var r = UrlFetchApp.fetch(url, options);
var c = r.getResponseCode();
// check for meta refresh if 200 ok
if (c == 200) {
var html = r.getContentText();
cache.put(url, "cached", 21600);
properties.setProperty(url, html);
var $ = Cheerio.load(html); // make sure this lib is added to your project!
// meta description
if ($('meta[name=description]').attr("content")) {
description = $('meta[name=description]').attr("content").trim();
}
}
result.push([description]);
}
}
catch (error) {
result.push(error.toString());
}
finally {
return result;
}
References:
Class CacheService | Apps Script | Google Developers
Class Cache | Apps Script | Google Developers
Class PropertiesService | Apps Script | Google Developers
Related Questions:
Service invoked too many times for one day: urlfetch
Problem Description
I am a .NET Core developer and I have recently been asked to transcribe mp3 audio files that are approximately 20 minutes long into text. Thus, the file is about 30.5mb. The issue is that speech is sparse in this file, varying anywhere between 2 minutes between a spoken sentence or 4 minutes of length.
I've written a small service based on the google speech documentation that sends 32kb of streaming data to be processed from the file at a time. All was progressing well until I hit this error that I share below as follows:
I have searched via google-fu, google forums, and other sources and I have not encountered documentation on this error. Suffice it to say, I think this is due to the sparsity of spoken words in my file? I am wondering if there is a programmatical centric workaround?
Code
I have used some code that is a slight modification of the google .net sample for 32kb streaming. You can find it here.
public async void Run()
{
var speech = SpeechClient.Create();
var streamingCall = speech.StreamingRecognize();
// Write the initial request with the config.
await streamingCall.WriteAsync(
new StreamingRecognizeRequest()
{
StreamingConfig = new StreamingRecognitionConfig()
{
Config = new RecognitionConfig()
{
Encoding =
RecognitionConfig.Types.AudioEncoding.Flac,
SampleRateHertz = 22050,
LanguageCode = "en",
},
InterimResults = true,
}
});
// Helper Function: Print responses as they arrive.
Task printResponses = Task.Run(async () =>
{
while (await streamingCall.ResponseStream.MoveNext(
default(CancellationToken)))
{
foreach (var result in streamingCall.ResponseStream.Current.Results)
{
//foreach (var alternative in result.Alternatives)
//{
// Console.WriteLine(alternative.Transcript);
//}
if(result.IsFinal)
{
Console.WriteLine(result.Alternatives.ToString());
}
}
}
});
string filePath = "mono_1.flac";
using (FileStream fileStream = new FileStream(filePath, FileMode.Open))
{
//var buffer = new byte[32 * 1024];
var buffer = new byte[64 * 1024]; //Trying 64kb buffer
int bytesRead;
while ((bytesRead = await fileStream.ReadAsync(
buffer, 0, buffer.Length)) > 0)
{
await streamingCall.WriteAsync(
new StreamingRecognizeRequest()
{
AudioContent = Google.Protobuf.ByteString
.CopyFrom(buffer, 0, bytesRead),
});
await Task.Delay(500);
};
}
await streamingCall.WriteCompleteAsync();
await printResponses;
}//End of Run
Attempts
I've increased the stream to 64kb of streaming data to be processed and then I received the following error as can be seen below:
Which, I believe, means the actual api timed out. Which is decidely a step in the wrong direction. Has anybody encountered a problem such as mine with the Google Speech Api when dealing with a audio file with sparse speech? Is there a method in which I can filter the audio down to only spoken words progamatically and then process that? I'm open to suggestions, but my research and attempts have only lead me to further breaking my code.
There is to way for recognize audio in Google Speech API:
normal recognize
long running recognize
Your sample is uses the normal recognize, which has a limit for 15 minutes.
Try to use the long recognize method:
{
var speech = SpeechClient.Create();
var longOperation = speech.LongRunningRecognize( new RecognitionConfig()
{
Encoding = RecognitionConfig.Types.AudioEncoding.Linear16,
SampleRateHertz = 16000,
LanguageCode = "hu",
}, RecognitionAudio.FromFile( filePath ) );
longOperation = longOperation.PollUntilCompleted();
var response = longOperation.Result;
foreach ( var result in response.Results )
{
foreach ( var alternative in result.Alternatives )
{
Console.WriteLine( alternative.Transcript );
}
}
return 0;
}
I hope it helps for you.
I found that for expensive IO bound operation I can use TaskCompletionSource
as shown here http://msdn.microsoft.com/en-us/library/hh873177.aspx#workloads
But the example shown is only waiting for some time and return DateTime.
public static Task<DateTimeOffset> Delay(int millisecondsTimeout)
{
TaskCompletionSource<DateTimeOffset> tcs = null;
Timer timer = null;
timer = new Timer(delegate
{
timer.Dispose();
tcs.TrySetResult(DateTimeOffset.UtcNow);
}, null, Timeout.Infinite, Timeout.Infinite);
tcs = new TaskCompletionSource<DateTimeOffset>(timer);
timer.Change(millisecondsTimeout, Timeout.Infinite);
return tcs.Task;
}
Above code waits for timeout. I have a database call which I want to fire in the above way, but little confused in how to write it:
using (var context = new srdb_sr2_context())
{
return context.GetData("100", "a2acfid");
}
I wrote the function as below, but not sure if this is correct way of doing it:
TaskCompletionSource<IList<InstructorsOut>> tcs = null;
Timer timer = null;
timer = new Timer(delegate
{
timer.Dispose();
//prepare for expensive data call
using (var context = new srdb_sr2_context())
{
var output = context.GetData("100", "a2acfid");
//set the result
tcs.TrySetResult(output);
}
}, null, Timeout.Infinite, Timeout.Infinite);
tcs = new TaskCompletionSource<IList<InstructorsOut>>(timer);
timer.Change(0, Timeout.Infinite);
return tcs.Task;
Any help would be appreciated.
Your code doesn't make much sense to me. Timer is useful if you want to execute the code after some time, but that's not what you need here.
If you want to execute an operation on a background thread, you can use Task.Run():
Task<IList<InstructorsOut>> GetDataBackground()
{
return Task.Run(() =>
{
using (var context = new srdb_sr2_context())
{
return context.GetData("100", "a2acfid");
}
});
}
Using a background thread this way can be useful in UI apps, where you don't want to block the UI thread. But if you have something like ASP.NET application, this won't actually give you any performance or scalability improvements. For that, the GetData() method would have to be made truly asynchronous.
In my MVC project I generate an array of images and store the array as a session variable, I animate the images using slidebar and by detecting mouse movement while mouse button is down by calculating the distance between the first click and x position while the mouse is moving on a canvas.
In the controller I use:
public ActionResult Animate(int slice = 0, int udm = 0)
{
FileContentResult data;
Image objImage = null;
Bitmap im = null;
try
{
im = MySession.Current.imageArray[slice];
....
MySession.Current.image = im;
}
else
{
return RedirectToAction("Index",new {.... });
}
}
catch { }
return null;
}
and
public ActionResult ImageOut(int udm = 0)
{
FileContentResult data;
Image objImage = null;
Bitmap im = null;
im = MySession.Current.image;
...
objImage = im.Bitmap(outputSize, PixelFormat.Format24bppRgb, m);
MemoryStream ms1 = new MemoryStream();
using (var memStream = new MemoryStream())
{
objImage.Save(memStream, ImageFormat.Png);
data = this.File(memStream.GetBuffer(), "image/png");
}
objImage.Dispose();
return data;
}
From the view I use Ajax:
$.ajax({
url: '/Home/Animate',
type: 'POST',
async: false,
data: {
slice: ((lastX - firstX) + nSlice),
udm: ++udm
},
success: function(data) {
if (data.udm) {
nSlice = (data.slice);
image.src = '/Home/ImageOut?' + $.param({
udm: data.udm
});
}
},
error: function() {
}
});
I have two problems, first it takes time to update the view and skips a number of images, the second is it open many threads and if a number of users accessing the same page it slows down. I thought of using async but I am still using c# 4 and this may requires lots of changes to my code. I was reading about SignalR, my question is can this be done (providing I just update the user screen not all users) or is there a better solution.
The sequence of events I would like to achieve is:
Ajax send to the first action a request or generate the first image and wait
When the image is generated, Ajax receive success, then display the image on the screen using the second action
Then the first action generate the second image
The challenge I see is the first image keep generating the images without waiting, so my question is how I make the first action wait, and how to send to it a message to generate the following image.
I just installed VS2012 c#5, is there any example that can help me!! Would appreciate your suggestions, thanks in advance.
Using TPL, you could try this (taking your code above), the same can be applied for the animate method:
public ActionResult ImageOut(int udm = 0)
{
FileContentResult data = null;
Image objImage = null;
Task.Run(() =>
{
Bitmap im = MySession.Current.dicomImage;
objImage = im.Bitmap(outputSize, PixelFormat.Format24bppRgb, m);
using (var memStream = new MemoryStream())
{
objImage.Save(memStream, ImageFormat.Png);
data = this.File(memStream.GetBuffer(), "image/png");
}
});
objImage.Dispose();
return data;
}
Task.Run is just shorthand for Task.Factory.StartNew
Rather than changing my program to use TPL because of the learning curve; I just added async: false to my ajax; this helped to delay the refresh of the screen. Not the best approach but helped a bit.
I'm currently working on a proxy server where we in this case have to modify the data (by using regexp) that we push through it.
In most cases it works fine except for websites that use gzip as content-encoding (I think), I've come across a module called compress and tried to push the chunks that I receive through a decompress / gunzip stream but it isn't really turning out as I expected. (see below for code)
figured i'd post some code to support my prob, this is the proxy that gets loaded with mvc (express):
module.exports = {
index: function(request, response){
var iframe_url = "www.nu.nl"; // site with gzip encoding
var http = require('http');
var httpClient = http.createClient(80, iframe_url);
var headers = request.headers;
headers.host = iframe_url;
var remoteRequest = httpClient.request(request.method, request.url, headers);
request.on('data', function(chunk) {
remoteRequest.write(chunk);
});
request.on('end', function() {
remoteRequest.end();
});
remoteRequest.on('response', function (remoteResponse){
var body_regexp = new RegExp("<head>"); // regex to find first head tag
var href_regexp = new RegExp('\<a href="(.*)"', 'g'); // regex to find hrefs
response.writeHead(remoteResponse.statusCode, remoteResponse.headers);
remoteResponse.on('data', function (chunk) {
var body = doDecompress(new compress.GunzipStream(), chunk);
body = body.replace(body_regexp, "<head><base href=\"http://"+ iframe_url +"/\">");
body = body.replace(href_regexp, '<a href="#" onclick="javascript:return false;"');
response.write(body, 'binary');
});
remoteResponse.on('end', function() {
response.end();
});
});
}
};
at the var body part i want to read the body and for example in this case remove all hrefs by replacing them with an #. The problem here of course is when we have an site which is gzip encoded/ compressed it's all jibberish and we can't apply the regexps.
now I've already tired to mess around with the node-compress module:
doDecompress(new compress.GunzipStream(), chunk);
which refers to
function doDecompress(decompressor, input) {
var d1 = input.substr(0, 25);
var d2 = input.substr(25);
sys.puts('Making decompression requests...');
var output = '';
decompressor.setInputEncoding('binary');
decompressor.setEncoding('utf8');
decompressor.addListener('data', function(data) {
output += data;
}).addListener('error', function(err) {
throw err;
}).addListener('end', function() {
sys.puts('Decompressed length: ' + output.length);
sys.puts('Raw data: ' + output);
});
decompressor.write(d1);
decompressor.write(d2);
decompressor.close();
sys.puts('Requests done.');
}
But it fails on it since the chunk input is an object, so i tried supplying it as an chunk.toString() which also fails with invalid input data.
I was wondering if I am at all heading in the right direction?
The decompressor expects binary encoded input. The chunk that your response receives is an instance of Buffer which toString() method does by default give you an UTF-8 encoded string back.
So you have to use chunk.toString('binary') to make it work, this can also be seen in the demo.