Streaming audio from FFMPEG to browser via WebSocket and WebAudioApi - ffmpeg

My project has 2 parts:
a web interface that the user accesses
and a standalone app installed on the computer, that acts as a websocket server.
From the web UI, the user has to hear his computer's microphone.
At this moment, I have a working solution that listens to microphone and sends the raw PCM audio chunks back to web-UI which is able to play them. But some serious lag gets added in time, despite it all runs on the same computer, so there's no internet latency/etc. That is why I am testing FFMPEG now.
So, here's the FFMPEG command for streaming microphone data:
ffmpeg.exe -re -f dshow -i audio="Microphone (HD Pro Webcam C920)" -ar 44100 -ac 1 -f f32le pipe:1
Data gets sent successfully via websocket, but playing it using WebAudioApi is not working, i mean i don't hear anything.
Can anyone point me to what am I doing wrong?
Here's the web javascript:
let ipOfAudioServer = 'localhost';
let wsClient = null;
var audioCtx = null;
var subcounter = 0;
var audiobuffer = [];
var source = null;
// must match the values in the audio-server. Thought despite audio-server could send 2channels.. we resume to only one, to save bandwidth
var sampleRate = 44100;
var channels = 1;
var microphone = 'Microphone (HD Pro Webcam C920)';
// this method reads current position from the audiobuffer and plays the audio
// the method will re-call itself, in order to play the next item in queue
this.play = function(soundName) {
var ffs = audiobuffer[subcounter];
if (ffs) {
var frameCount = ffs.byteLength;
console.log(frameCount, audiobuffer.length);
var myAudioBuffer = audioCtx.createBuffer(channels, frameCount, sampleRate);
myAudioBuffer.getChannelData(0).set(ffs)
if (myAudioBuffer != null)
{
subcounter += 1;
source = audioCtx.createBufferSource();
source.buffer = myAudioBuffer;
source.connect(audioCtx.destination);
source.onended = () => { console.log("finished, continuing to seek buffer!"); play(soundName); }
source.start();
}
}
// just in case the counter got to be bigger than the actual amount of items in the list, set it back to last one
if (subcounter > audiobuffer.length)
subcounter = audiobuffer.length;
};
// the method to initialize WS client
this.initWebsocketClient = function ()
{
if (wsClient == null)
{
wsClient = new WebSocket(`ws://${ipOfAudioServer}:23233`, "protocol");
wsClient.binaryType = "arraybuffer";
wsClient.onmessage = function (event)
{
if (typeof event.data === 'object') {
console.log(event.data, event.data.size);
// clear memory in case buffer is already too big
if (subcounter > 50) {
console.log('cleared memory');
audiobuffer = [];
subcounter = 0;
}
audiobuffer.push(event.data);
if (audiobuffer.length == 1) {
play('sinewave');
}
}
else {
if (event.data == 'stopMicrophone=ok') {
wsClient.close();
wsClient = null;
audiobuffer = [];
subcounter = 0;
}
console.log(event.data);
}
}
}
};
// method used in send() which will actually send the message only after connection has been established successfully.
this.waitForConnection = function (callback, interval) {
if (wsClient.readyState === 1) {
callback();
} else {
var that = this;
// optional: implement backoff for interval here
setTimeout(function () {
that.waitForConnection(callback, interval);
}, interval);
}
};
// using this method to send WS messages to the audio-server
this.send = function (message, callback)
{
this.initWebsocketClient();
this.waitForConnection(function () {
wsClient.send(message);
if (typeof callback !== 'undefined') {
callback();
}
}, 1000);
};
// called by clicking the start button
function startCapture() {
if (audioCtx == null)
audioCtx = new (window.AudioContext || window.webkitAudioContext)();
audiobuffer = [];
subcounter = 0;
this.send(`startMicrophone?device=${microphone}`);
}
// called by clicking the stop button
function stopCapture() {
this.send('stopMicrophone');
}

Related

How to seek to a position in a song Discord.js?

I am facing some difficulty with seeking to a specified timestamp in the current song. I have separate files for all my commands. I want to create a seek.js file which takes input a specified time and then passes it to the play.js file(it plays the current song in the queue) but the problem is I cant seem to find a way to how do this.
This is my play command.
const { Collector } = require("discord.js");
const ytdlDiscord = require("ytdl-core-discord");
//const play = require("../commands/play");
module.exports = {
async play(song, message){
const queue = message.client.queue.get(message.guild.id);
if(!song){
setTimeout(function(){
if(!queue.connection.dispatcher && message.guild.me.voice.channel){
queue.channel.leave();
queue.textChannel.send(`**Cadenza** left successfully`).catch(console.error);
}
else return;
},120000);
message.client.queue.delete(message.guild.id);
return queue.textChannel.send(`**Music Queue Ended**`);
}
let stream = await ytdlDiscord(song.url,{filter: 'audioonly', quality: 'highestaudio', highWaterMark: 1<<25});
let streamType = song.url.includes("youtube.com") ? "opus" : "ogg/opus";
queue.connection.on("disconnect", () => message.client.queue.delete(message.guild.id));
const dispatcher = queue.connection
.play(stream, {type: streamType, highWaterMark: 1})
.on("finish", () => {
if(queue.loop){
let last = queue.songs.shift();
queue.songs.push(last);
module.exports.play(queue.songs[0], message);
}else{
queue.songs.shift();
module.exports.play(queue.songs[0], message);
}
})
.on("error", (err) => {
console.error(err);
queue.songs.shift();
module.exports.play(queue.songs[0], message);
});
dispatcher.setVolumeLogarithmic(queue.volume / 100);
queue.textChannel.send(`Started Playing **${song.title}**`);
}
};
seek command
const { play } = require("../include/play");
function timeConvert(str){
const t = str.split(':');
let s = 0, m = 1;
while(t.length > 0){
s = +m * parseInt(t.pop(),10);
m = m * 60;
}
return s;
}
module.exports = {
name: 'seek',
description: 'Seeks to a certain point in the current track.',
execute(message,args){
const queue = message.client.queue.get(message.guild.id);
if(!queue) return message.channel.send("There is no song playing.").catch(console.error);
queue.playing = true;
let time = timeConvert(args[0]);
if( time > queue.songs[0].duration)
return message.channel.send(`**Input a valid time**`);
else{
let time = timeConvert(args[0]) * 1000;
#main code here
}
}
}
How can I pass the time variable to play() so that the current song seeks to that amount?

How to play and seek fragmented MP4 audio using MSE SourceBuffer?

Note:
If you end up here, you might want to take a look at shaka-player and the accompanying shaka-streamer. Use it. Don't implement this yourself unless you really have to.
I am trying for quite some time now to be able to play an audio track on Chrome, Firefox, Safari, etc. but I keep hitting brick walls. My problem is currently that I am just not able to seek within a fragmented MP4 (or MP3).
At the moment I am converting audio files such as MP3 to fragmented MP4 (fMP4) and send them chunk-wise to the client. What I do is defining a CHUNK_DURACTION_SEC (chunk duration in seconds) and compute a chunk size like this:
chunksTotal = Math.ceil(this.track.duration / CHUNK_DURATION_SEC);
chunkSize = Math.ceil(this.track.fileSize / this.chunksTotal);
With this I partition the audio file and can fetch it entirely jumping chunkSize-many bytes for each chunk:
-----------------------------------------
| chunk 1 | chunk 2 | ... | chunk n |
-----------------------------------------
How audio files are converted to fMP4
ffmpeg -i input.mp3 -acodec aac -b:a 256k -f mp4 \
-movflags faststart+frag_every_frame+empty_moov+default_base_moof \
output.mp4
This seems to work with Chrome and Firefox (so far).
How chunks are appended
After following this example, and realizing that it's simply not working as it is explained here, I threw it away and started over from scratch. Unfortunately without success. It's still not working.
The following code is supposed to play a track from the very beginning to the very end. However, I also need to be able to seek. So far, this is simply not working. Seeking will just stop the audio after the seeking event got triggered.
The code
/* Desired chunk duration in seconds. */
const CHUNK_DURATION_SEC = 20;
const AUDIO_EVENTS = [
'ended',
'error',
'play',
'playing',
'seeking',
'seeked',
'pause',
'timeupdate',
'canplay',
'loadedmetadata',
'loadstart',
'updateend',
];
class ChunksLoader {
/** The total number of chunks for the track. */
public readonly chunksTotal: number;
/** The length of one chunk in bytes */
public readonly chunkSize: number;
/** Keeps track of requested chunks. */
private readonly requested: boolean[];
/** URL of endpoint for fetching audio chunks. */
private readonly url: string;
constructor(
private track: Track,
private sourceBuffer: SourceBuffer,
private logger: NGXLogger,
) {
this.chunksTotal = Math.ceil(this.track.duration / CHUNK_DURATION_SEC);
this.chunkSize = Math.ceil(this.track.fileSize / this.chunksTotal);
this.requested = [];
for (let i = 0; i < this.chunksTotal; i++) {
this.requested[i] = false;
}
this.url = `${environment.apiBaseUrl}/api/tracks/${this.track.id}/play`;
}
/**
* Fetch the first chunk.
*/
public begin() {
this.maybeFetchChunk(0);
}
/**
* Handler for the "timeupdate" event. Checks if the next chunk should be fetched.
*
* #param currentTime
* The current time of the track which is currently played.
*/
public handleOnTimeUpdate(currentTime: number) {
const nextChunkIndex = Math.floor(currentTime / CHUNK_DURATION_SEC) + 1;
const hasAllChunks = this.requested.every(val => !!val);
if (nextChunkIndex === (this.chunksTotal - 1) && hasAllChunks) {
this.logger.debug('Last chunk. Calling mediaSource.endOfStream();');
return;
}
if (this.requested[nextChunkIndex] === true) {
return;
}
if (currentTime < CHUNK_DURATION_SEC * (nextChunkIndex - 1 + 0.25)) {
return;
}
this.maybeFetchChunk(nextChunkIndex);
}
/**
* Fetches the chunk if it hasn't been requested yet. After the request finished, the returned
* chunk gets appended to the SourceBuffer-instance.
*
* #param chunkIndex
* The chunk to fetch.
*/
private maybeFetchChunk(chunkIndex: number) {
const start = chunkIndex * this.chunkSize;
const end = start + this.chunkSize - 1;
if (this.requested[chunkIndex] == true) {
return;
}
this.requested[chunkIndex] = true;
if ((end - start) == 0) {
this.logger.warn('Nothing to fetch.');
return;
}
const totalKb = ((end - start) / 1000).toFixed(2);
this.logger.debug(`Starting to fetch bytes ${start} to ${end} (total ${totalKb} kB). Chunk ${chunkIndex + 1} of ${this.chunksTotal}`);
const xhr = new XMLHttpRequest();
xhr.open('get', this.url);
xhr.setRequestHeader('Authorization', `Bearer ${AuthenticationService.getJwtToken()}`);
xhr.setRequestHeader('Range', 'bytes=' + start + '-' + end);
xhr.responseType = 'arraybuffer';
xhr.onload = () => {
this.logger.debug(`Range ${start} to ${end} fetched`);
this.logger.debug(`Requested size: ${end - start + 1}`);
this.logger.debug(`Fetched size: ${xhr.response.byteLength}`);
this.logger.debug('Appending chunk to SourceBuffer.');
this.sourceBuffer.appendBuffer(xhr.response);
};
xhr.send();
};
}
export enum StreamStatus {
NOT_INITIALIZED,
INITIALIZING,
PLAYING,
SEEKING,
PAUSED,
STOPPED,
ERROR
}
export class PlayerState {
status: StreamStatus = StreamStatus.NOT_INITIALIZED;
}
/**
*
*/
#Injectable({
providedIn: 'root'
})
export class MediaSourcePlayerService {
public track: Track;
private mediaSource: MediaSource;
private sourceBuffer: SourceBuffer;
private audioObj: HTMLAudioElement;
private chunksLoader: ChunksLoader;
private state: PlayerState = new PlayerState();
private state$ = new BehaviorSubject<PlayerState>(this.state);
public stateChange = this.state$.asObservable();
private currentTime$ = new BehaviorSubject<number>(null);
public currentTimeChange = this.currentTime$.asObservable();
constructor(
private httpClient: HttpClient,
private logger: NGXLogger
) {
}
get canPlay() {
const state = this.state$.getValue();
const status = state.status;
return status == StreamStatus.PAUSED;
}
get canPause() {
const state = this.state$.getValue();
const status = state.status;
return status == StreamStatus.PLAYING || status == StreamStatus.SEEKING;
}
public playTrack(track: Track) {
this.logger.debug('playTrack');
this.track = track;
this.startPlayingFrom(0);
}
public play() {
this.logger.debug('play()');
this.audioObj.play().then();
}
public pause() {
this.logger.debug('pause()');
this.audioObj.pause();
}
public stop() {
this.logger.debug('stop()');
this.audioObj.pause();
}
public seek(seconds: number) {
this.logger.debug('seek()');
this.audioObj.currentTime = seconds;
}
private startPlayingFrom(seconds: number) {
this.logger.info(`Start playing from ${seconds.toFixed(2)} seconds`);
this.mediaSource = new MediaSource();
this.mediaSource.addEventListener('sourceopen', this.onSourceOpen);
this.audioObj = document.createElement('audio');
this.addEvents(this.audioObj, AUDIO_EVENTS, this.handleEvent);
this.audioObj.src = URL.createObjectURL(this.mediaSource);
this.audioObj.play().then();
}
private onSourceOpen = () => {
this.logger.debug('onSourceOpen');
this.mediaSource.removeEventListener('sourceopen', this.onSourceOpen);
this.mediaSource.duration = this.track.duration;
this.sourceBuffer = this.mediaSource.addSourceBuffer('audio/mp4; codecs="mp4a.40.2"');
// this.sourceBuffer = this.mediaSource.addSourceBuffer('audio/mpeg');
this.chunksLoader = new ChunksLoader(
this.track,
this.sourceBuffer,
this.logger
);
this.chunksLoader.begin();
};
private handleEvent = (e) => {
const currentTime = this.audioObj.currentTime.toFixed(2);
const totalDuration = this.track.duration.toFixed(2);
this.logger.warn(`MediaSource event: ${e.type} (${currentTime} of ${totalDuration} sec)`);
this.currentTime$.next(this.audioObj.currentTime);
const currentStatus = this.state$.getValue();
switch (e.type) {
case 'playing':
currentStatus.status = StreamStatus.PLAYING;
this.state$.next(currentStatus);
break;
case 'pause':
currentStatus.status = StreamStatus.PAUSED;
this.state$.next(currentStatus);
break;
case 'timeupdate':
this.chunksLoader.handleOnTimeUpdate(this.audioObj.currentTime);
break;
case 'seeking':
currentStatus.status = StreamStatus.SEEKING;
this.state$.next(currentStatus);
if (this.mediaSource.readyState == 'open') {
this.sourceBuffer.abort();
}
this.chunksLoader.handleOnTimeUpdate(this.audioObj.currentTime);
break;
}
};
private addEvents(obj, events, handler) {
events.forEach(event => obj.addEventListener(event, handler));
}
}
Running it will give me the following output:
Apologies for the screenshot but it's not possible to just copy the output without all the stack traces in Chrome.
What I also tried was following this example and call sourceBuffer.abort() but that didn't work. It looks more like a hack that used to work years ago but it's still referenced in the docs (see "Example" -> "You can see something similar in action in Nick Desaulnier's bufferWhenNeeded demo ..").
case 'seeking':
currentStatus.status = StreamStatus.SEEKING;
this.state$.next(currentStatus);
if (this.mediaSource.readyState === 'open') {
this.sourceBuffer.abort();
}
break;
Trying with MP3
I have tested the above code under Chrome by converting tracks to MP3:
ffmpeg -i input.mp3 -acodec aac -b:a 256k -f mp3 output.mp3
and creating a SourceBuffer using audio/mpeg as type:
this.mediaSource.addSourceBuffer('audio/mpeg')
I have the same problem when seeking.
The issue wihout seeking
The above code has another issue:
After two minutes of playing, the audio playback starts to stutter and comes to a halt prematurely. So, the audio plays up to a point and then it stops without any obvious reason.
For whatever reason there is another canplay and playing event. A few seconds after, the audio simply stops..

Epson js SDK unable to use multiple printers

Intro
We're developing this javascript based web application that is supposed to print receipts using the epson javascript sdk.
Right now we've got this poc where multiple printers can be added to the app and where receipts can be printed per individual printer.
The problem is that the receipt will ONLY be printer from the last added printer.
Further investigating tells us that the sdk just uses the last added (connected) printer. This can be seen at the following images.
In the first image there are 2 printers setup. Notice the different ip addresses.
In the second image we log what EpsonPrinter instance is being used while printing. Notice the ip address is clearly the first printer.
In the third image we trace the network. Notice the ip address that is actually used (ignore the error).
We created our own EpsonPrinter class that can be found here or here below.
EpsonPrinter
export default class EpsonPrinter {
name = null
ipAddress = null
port = null
deviceId = null
crypto = false
buffer = false
eposdev = null
printer = null
intervalID = null
restry = 0
constructor (props) {
const {
name = 'Epson printer',
ipAddress,
port = 8008,
deviceId = 'local_printer',
crypto = false,
buffer = false
} = props
this.name = name
this.ipAddress = ipAddress
this.port = port
this.deviceId = deviceId
this.crypto = crypto
this.buffer = buffer
this.eposdev = new window.epson.ePOSDevice()
this.eposdev.onreconnecting = this.onReconnecting
this.eposdev.onreconnect = this.onReconnect
this.eposdev.ondisconnect = this.onDisconnect
this.connect()
}
onReconnecting = () => {
this.consoleLog('reconnecting')
}
onReconnect = () => {
this.consoleLog('reconnect')
}
onDisconnect = () => {
this.consoleLog('disconnect')
if (this.intervalID === null ){
this.intervalID = setInterval(() => this.reconnect(), 5000)
}
}
connect = () => {
this.consoleLog('connect')
this.eposdev.ondisconnect = null
this.eposdev.disconnect()
this.eposdev.connect(this.ipAddress, this.port, this.connectCallback)
}
reconnect = () => {
this.consoleLog('(Re)connect')
this.eposdev.connect(this.ipAddress, this.port, this.connectCallback)
}
connectCallback = (data) => {
clearInterval(this.intervalID)
this.intervalID = null
this.eposdev.ondisconnect = this.onDisconnect
if (data === 'OK' || data === 'SSL_CONNECT_OK') {
this.createDevice()
} else {
setTimeout(() => this.reconnect(), 5000)
}
}
createDevice = () => {
console.log('create device, try: ' + this.restry)
const options = {
crypto: this.crypto,
buffer: this.buffer
}
this.eposdev.createDevice(this.deviceId, this.eposdev.DEVICE_TYPE_PRINTER, options, this.createDeviceCallback)
}
createDeviceCallback = (deviceObj, code) => {
this.restry++
if (code === 'OK') {
this.printer = deviceObj
this.printer.onreceive = this.onReceive
} else if (code === 'DEVICE_IN_USE') {
if (this.restry < 5) {
setTimeout(() => this.createDevice(), 3000)
}
}
}
onReceive = (response) => {
this.consoleLog('on receive: ', response)
let message = `Print ${this.name} ${response.success ? 'success' : 'failute'}\n`
message += `Code: ${response.code}\n`
message += `Status: \n`
if (response.status === this.printer.ASB_NO_RESPONSE) { message += ' No printer response\n' }
if (response.status === this.printer.ASB_PRINT_SUCCESS) { message += ' Print complete\n' }
if (response.status === this.printer.ASB_DRAWER_KICK) { message += ' Status of the drawer kick number 3 connector pin = "H"\n' }
if (response.status === this.printer.ASB_OFF_LINE) { message += ' Offline status\n' }
if (response.status === this.printer.ASB_COVER_OPEN) { message += ' Cover is open\n' }
if (response.status === this.printer.ASB_PAPER_FEED) { message += ' Paper feed switch is feeding paper\n' }
if (response.status === this.printer.ASB_WAIT_ON_LINE) { message += ' Waiting for online recovery\n' }
if (response.status === this.printer.ASB_PANEL_SWITCH) { message += ' Panel switch is ON\n' }
if (response.status === this.printer.ASB_MECHANICAL_ERR) { message += ' Mechanical error generated\n' }
if (response.status === this.printer.ASB_AUTOCUTTER_ERR) { message += ' Auto cutter error generated\n' }
if (response.status === this.printer.ASB_UNRECOVER_ERR) { message += ' Unrecoverable error generated\n' }
if (response.status === this.printer.ASB_AUTORECOVER_ERR) { message += ' Auto recovery error generated\n' }
if (response.status === this.printer.ASB_RECEIPT_NEAR_END) { message += ' No paper in the roll paper near end detector\n' }
if (response.status === this.printer.ASB_RECEIPT_END) { message += ' No paper in the roll paper end detector\n' }
if (response.status === this.printer.ASB_SPOOLER_IS_STOPPED) { message += ' Stop the spooler\n' }
if (!response.success) {
alert(message)
// TODO: error message?
} else {
// TODO: success -> remove from queue
}
}
printReceipt = () => {
this.consoleLog(`Print receipt, `, this)
try {
if (!this.printer) {
throw `No printer created for ${this.name}`
}
this.printer.addPulse(this.printer.DRAWER_1, this.printer.PULSE_100)
this.printer.addText(`Printed from: ${this.name}\n`)
this.printer.send()
} catch (err) {
let message = `Print ${this.name} failure\n`
message += `Error: ${err}`
alert(message)
}
}
consoleLog = (...rest) => {
console.log(`${this.name}: `, ...rest)
}
}
Poc
The full working poc can be found here.
Epson javascript sdk
2.9.0
Does anyone have any experience with the epson sdk? It it supposed to be able to support multiple connections on the same time? Please let use know.
For the ones looking for a way to handle multiple printers using this SDK. We came up with the following work around:
We created a separated 'printer app' that is responsible for handling ONE printer connection and hosted it online. We then 'load' this printer app into our app that needs multiple connections using Iframes. Communication between app and printer app is done by means of window.PostMessage API to, for example, initialise the printer with the correct printer connection and providing data that has to be printed.
It takes some effort but was the most stable solution we could come up with handling multiple connections.
If anyone else comes up with a better approach please let me know!
You can checkout our printer app here for inspiration (inspect the app because it doesn't show much visiting it just like that).
For use your class EpsonPrinter, i add also myPrinters class after your class:
class myPrinters {
printers = null;
cantidad = 0;
constructor() {
console.log("Creo la coleccion de printers");
this.printers = [];
}
inicializarConeccionImpresora(idImpresora, ip, puerto, _deviceId) {
let ipAddress = ip;
let port = puerto;
let deviceId = _deviceId;
console.log("Agrego una impresora");
let myPrinter = new EpsonPrinter(ipAddress);
myPrinter.port = port;
myPrinter.deviceId = deviceId;
myPrinter.id = idImpresora;
console.log('Id impresora antes de connect es: ' + idImpresora);
myPrinter.connect();
this.printers[this.cantidad] = myPrinter;
this.cantidad ++;
}
imprimirPruebaJS(idImpresora) {
let printer = null;
let printerTemp = null
for(var i = 0; i < this.printers.length; i++) {
printerTemp = this.printers[i];
if (printerTemp.id == idImpresora) {
printer = printerTemp.printer;
}
}
if (printer == null) {
console.log("La impresora no esta iniciada en clase myPrinters");
return;
}
printer.addText('Hola mundo texto normal\n');
printer.addFeed();
printer.addCut(printer.CUT_FEED);
}
}
call myPrinters class in this way :
myEpsonPrinters = new myPrinters();
myEpsonPrinters.inicializarConeccionImpresora(1, '192.168.0.51', 8008, 'local_printer');
myEpsonPrinters.inicializarConeccionImpresora(2, '192.168.0.52', 8008, 'local_printer');
myEpsonPrinters.imprimirPruebaJS(1)
or
myEpsonPrinters.imprimirPruebaJS(2)
Test it and tell me.
Juan
Just create multiple objects for printing simple as this
this.eposdev = [];
let printersCnt = 3;
let self = this;
for(let i=1 ; i <= printersCnt ; i++){
this.eposdev[i] = new window.epson.ePOSDevice()
this.eposdev[i].onreconnecting = function (){
this.consoleLog('reConnecting')
}
this.eposdev[i].onreconnect = function (){
this.consoleLog('onReconnect')
}
this.eposdev[i].ondisconnect = function (){
this.consoleLog('onDisconnect')
}
}
function connect(printerKey) => {
this.consoleLog('connect')
this.eposdev.ondisconnect = null
this.eposdev.disconnect()
this.eposdev.connect(self.ipAddress[printerKey], self.port[printerKey], function(){
clearInterval(self.intervalID)
self.intervalID = null
self.eposdev[i].ondisconnect = self.ondisconnect
if (data === 'OK' || data === 'SSL_CONNECT_OK') {
console.log('create device, try: ' + self.restry)
const options = {
crypto: self.crypto,
buffer: self.buffer
}
self.eposdev[printerKey].createDevice(self.deviceId, self.eposdev[printerKey].DEVICE_TYPE_PRINTER, options, function(deviceObj, code){
this.restry++
if (code === 'OK') {
self.printer[printerKey] = deviceObj
self.printer.onreceive = function(){
console.log("onreceive");
}
} else if (code === 'DEVICE_IN_USE') {
if (self.restry < 5) {
setTimeout(() => self.createDevice(printerKey), 3000)
}
})
}
} else {
setTimeout(() => self.reconnect(printerKey), 5000)
}
})
}
Epson says that with version 2.12.0 you can add more than one printer.

Can't advertise on bluetooth

I want to create a Gatt Server in my Xamarin.Forms app so that other devices can scan for it via bluetooth. I am using this plugin:
https://github.com/aritchie/bluetoothle
This is my code to create a Gatt Server and advertise data:
server = CrossBleAdapter.Current.CreateGattServer();
var service = server.AddService(serviceGuid, true);
var characteristic = service.AddCharacteristic(
characteristicGuid,
CharacteristicProperties.Read |
CharacteristicProperties.Write | CharacteristicProperties.WriteNoResponse,
GattPermissions.Read | GattPermissions.Write
);
var notifyCharacteristic = service.AddCharacteristic
(
notifyCharacteristicGuid,
CharacteristicProperties.Indicate | CharacteristicProperties.Notify,
GattPermissions.Read | GattPermissions.Write
);
IDisposable notifyBroadcast = null;
notifyCharacteristic.WhenDeviceSubscriptionChanged().Subscribe(e =>
{
var #event = e.IsSubscribed ? "Subscribed" : "Unsubcribed";
if (notifyBroadcast == null)
{
notifyBroadcast = Observable
.Interval(TimeSpan.FromSeconds(1))
.Where(x => notifyCharacteristic.SubscribedDevices.Count > 0)
.Subscribe(_ =>
{
Debug.WriteLine("Sending Broadcast");
var dt = DateTime.Now.ToString("g");
var bytes = Encoding.UTF8.GetBytes("SendingBroadcast");
notifyCharacteristic.Broadcast(bytes);
});
}
});
characteristic.WhenReadReceived().Subscribe(x =>
{
var write = "HELLO";
// you must set a reply value
x.Value = Encoding.UTF8.GetBytes(write);
x.Status = GattStatus.Success; // you can optionally set a status, but it defaults to Success
});
characteristic.WhenWriteReceived().Subscribe(x =>
{
var write = Encoding.UTF8.GetString(x.Value, 0, x.Value.Length);
Debug.WriteLine("in WhenWriteReceived() value: " + write);
// do something value
});
await server.Start(new AdvertisementData
{
LocalName = "DariusServer",
ServiceUuids = new List<Guid>() { serverServiceGuid }
});
I am using this app to scan for my advertisement data:
https://play.google.com/store/apps/details?id=no.nordicsemi.android.mcp
I can't discover my app with it. I don't know what I'm doing wrong? I am testing with a real device, SM-T350 tablet
I spent countless hours to get this plugin to work with no luck. But this native code works for anyone else who has the same problem:
private async Task AndroidBluetooth()
{
try
{
await Task.Delay(5000); // just to make sure bluetooth is ready to go, this probably isn't needed, but good for peace of mind during testing
BluetoothLeAdvertiser advertiser = BluetoothAdapter.DefaultAdapter.BluetoothLeAdvertiser;
var advertiseBuilder = new AdvertiseSettings.Builder();
var parameters = advertiseBuilder.SetConnectable(true)
.SetAdvertiseMode(AdvertiseMode.Balanced)
//.SetTimeout(10000)
.SetTxPowerLevel(AdvertiseTx.PowerHigh)
.Build();
AdvertiseData data = (new AdvertiseData.Builder()).AddServiceUuid(new ParcelUuid(Java.Util.UUID.FromString("your UUID here"))).Build();
MyAdvertiseCallback callback = new MyAdvertiseCallback();
advertiser.StartAdvertising(parameters, data, callback);
}
catch(Exception e)
{
}
}
public class MyAdvertiseCallback : AdvertiseCallback
{
public override void OnStartFailure([GeneratedEnum] AdvertiseFailure errorCode)
{
// put a break point here, in case something goes wrong, you can see why
base.OnStartFailure(errorCode);
}
public override void OnStartSuccess(AdvertiseSettings settingsInEffect)
{
base.OnStartSuccess(settingsInEffect);
}
}
}
Just to note, it wouldn't work if if I included the device name, because the bluetooth transmission would be too large in that case with a service UUID (max 31 bytes I believe).

How to modify http response in Firefox extension

Hey i have been able to write an nsIStreamListener listener to listen on responses and get the response text following tutorials at nsitraceablechannel-intercept-http-traffic .But i am unable to modify the response sent to browser.Actually if i return the reponse and sent back to chain it reflects in firebug but not in browser.
What i am guessing is we will have to replace default listener rather than listening in the chain.I cant get any docs anywhere which explains how to do this.
Could anyone give me some insight into this.This is mainly for education purposes.
Thanks in advance
Edit : As of now i have arrived at a little solutions i am able to do this
var old;
function TracingListener() {}
TracingListener.prototype = {
originalListener: null,
receivedData: null, //will be an array for incoming data.
//For the listener this is step 1.
onStartRequest: function (request, context) {
this.receivedData = []; //initialize the array
//Pass on the onStartRequest call to the next listener in the chain -- VERY IMPORTANT
//old.onStartRequest(request, context);
},
//This is step 2. This gets called every time additional data is available
onDataAvailable: function (request, context, inputStream, offset, count) {
var binaryInputStream = CCIN("#mozilla.org/binaryinputstream;1",
"nsIBinaryInputStream");
binaryInputStream.setInputStream(inputStream);
var storageStream = CCIN("#mozilla.org/storagestream;1",
"nsIStorageStream");
//8192 is the segment size in bytes, count is the maximum size of the stream in bytes
storageStream.init(8192, count, null);
var binaryOutputStream = CCIN("#mozilla.org/binaryoutputstream;1",
"nsIBinaryOutputStream");
binaryOutputStream.setOutputStream(storageStream.getOutputStream(0));
// Copy received data as they come.
var data = binaryInputStream.readBytes(count);
this.receivedData.push(data);
binaryOutputStream.writeBytes(data, count);
//Pass it on down the chain
//old.onDataAvailable(request, context,storageStream.newInputStream(0), offset, count);
},
onStopRequest: function (request, context, statusCode) {
try {
//QueryInterface into HttpChannel to access originalURI and requestMethod properties
request.QueryInterface(Ci.nsIHttpChannel);
//Combine the response into a single string
var responseSource = this.receivedData.join('');
//edit data as needed
responseSource = "test";
console.log(responseSource);
} catch (e) {
//standard function to dump a formatted version of the error to console
dumpError(e);
}
var stream = Cc["#mozilla.org/io/string-input-stream;1"]
.createInstance(Ci.nsIStringInputStream);
stream.setData(responseSource, -1);
//Pass it to the original listener
//old.originalListener=null;
old.onStartRequest(channel, context);
old.onDataAvailable(channel, context, stream, 0, stream.available());
old.onStopRequest(channel, context, statusCode);
},
QueryInterface: function (aIID) {
if (aIID.equals(Ci.nsIStreamListener) ||
aIID.equals(Ci.nsISupports)) {
return this;
}
throw components.results.NS_NOINTERFACE;
},
readPostTextFromRequest: function (request, context) {
try {
var is = request.QueryInterface(Ci.nsIUploadChannel).uploadStream;
if (is) {
var ss = is.QueryInterface(Ci.nsISeekableStream);
var prevOffset;
if (ss) {
prevOffset = ss.tell();
ss.seek(Ci.nsISeekableStream.NS_SEEK_SET, 0);
}
// Read data from the stream..
var charset = "UTF-8";
var text = this.readFromStream(is, charset, true);
if (ss && prevOffset == 0)
ss.seek(Ci.nsISeekableStream.NS_SEEK_SET, 0);
return text;
} else {
dump("Failed to Query Interface for upload stream.\n");
}
} catch (exc) {
dumpError(exc);
}
return null;
},
readFromStream: function (stream, charset, noClose) {
var sis = CCSV("#mozilla.org/binaryinputstream;1",
"nsIBinaryInputStream");
sis.setInputStream(stream);
var segments = [];
for (var count = stream.available(); count; count = stream.available())
segments.push(sis.readBytes(count));
if (!noClose)
sis.close();
var text = segments.join("");
return text;
}
}
httpRequestObserver = {
observe: function (request, aTopic, aData) {
if (typeof Cc == "undefined") {
var Cc = components.classes;
}
if (typeof Ci == "undefined") {
var Ci = components.interfaces;
}
if (aTopic == "http-on-examine-response") {
request.QueryInterface(Ci.nsIHttpChannel);
console.log(request.statusCode);
var newListener = new TracingListener();
request.QueryInterface(Ci.nsITraceableChannel);
channel = request;
//newListener.originalListener
//add new listener as default and save old one
old = request.setNewListener(newListener);
old.originalListener = null;
var threadManager = Cc["#mozilla.org/thread-manager;1"]
.getService(Ci.nsIThreadManager);
threadManager.currentThread.dispatch(newListener, Ci.nsIEventTarget.DISPATCH_NORMAL);
}
},
QueryInterface: function (aIID) {
if (typeof Cc == "undefined") {
var Cc = components.classes;
}
if (typeof Ci == "undefined") {
var Ci = components.interfaces;
}
if (aIID.equals(Ci.nsIObserver) ||
aIID.equals(Ci.nsISupports)) {
return this;
}
throw components.results.NS_NOINTERFACE;
},
};
var observerService = Cc["#mozilla.org/observer-service;1"]
.getService(Ci.nsIObserverService);
observerService.addObserver(httpRequestObserver,
"http-on-examine-response", false);
This example works for me on Firefox 34 (current nightly): https://github.com/Noitidart/demo-nsITraceableChannel
I downloaded the xpi, edited bootstrap.js to modify the stream:
132 // Copy received data as they come.
133 var data = binaryInputStream.readBytes(count);
134 data = data.replace(/GitHub/g, "TEST");
135 this.receivedData.push(data);
installed the XPI then reloaded the github page. It read "TEST" in the footer.
The version of code you posted doesn't actually pass the results back to the old listener, so that's the first thing that ought to be changed.
It also may have interacted with Firebug or another extension badly. It's a good idea to try reproducing the problem in a clean profile (with only your extension installed).

Resources