Alexa Custom skill in AWS lambda not recognizing Alexa.getSupportedInterfaces[Error handled: Alexa.getSupportedInterfaces is not a function] - aws-lambda

Trying to use Alexa presentation Language features in AWS hosted custom lambda function. Intent handler are firing but when I add the
Alexa.getSupportedInterfaces it is failing .
Message is "Error handled: Alexa.getSupportedInterfaces is not a function"
// 1. Intent Handlers =============================================
const LaunchRequest_Handler = {
canHandle(handlerInput) {
const request = handlerInput.requestEnvelope.request;
return request.type === 'LaunchRequest';
},
handle(handlerInput) {
let responseBuilder = handlerInput.responseBuilder;
let speakOutput = 'Welcome to test Bot. ';
// let skillTitle = capitalize(invocationName);
// Add APL directive to response
if (Alexa1.getSupportedInterfaces(handlerInput.requestEnvelope)['Alexa.Presentation.APL']) {
// Add the RenderDocument directive to the responseBuilder
responseBuilder.addDirective({
type: 'Alexa.Presentation.APL.RenderDocument',
token: Echo_Token,
document: Customer
});
// Tailor the speech for a device with a screen.
speakOutput += " You should now also see my greeting on the screen."
} else {
// User's device does not support APL, so tailor the speech to this situation
speakOutput += " This example would be more interesting on a device with a screen, such as an Echo Show or Fire TV.";
}
return responseBuilder
.speak(speakOutput)
.withShouldEndSession(false)
.reprompt('try again, ' + speakOutput)
.withSimpleCard("CustomerSupport!", "CustomerSupport)")
// .reprompt('add a reprompt if you want to keep the session open for the user to respond')
//.withStandardCard('Welcome!',
// 'Hello!\nThis is a card for your skill, ' + skillTitle,
// welcomeCardImg.smallImageUrl, welcomeCardImg.largeImageUrl)
.getResponse();
},
};

Instead of using the below condition:
Alexa1.getSupportedInterfaces(handlerInput.requestEnvelope['Alexa.Presentation.APL]
you can use, below condition to check if the device supports APL:
if (supportsAPL(handlerInput))
Make sure you include below functions definition in your index file:
function supportsAPL(handlerInput) {
const supportedInterfaces = handlerInput.requestEnvelope.context.System.device.supportedInterfaces;
const aplInterface = supportedInterfaces['Alexa.Presentation.APL'];
return aplInterface != null && aplInterface != undefined;
}
function supportsAPLT(handlerInput) {
const supportedInterfaces = handlerInput.requestEnvelope.context.System.device.supportedInterfaces;
const aplInterface = supportedInterfaces['Alexa.Presentation.APLT'];
return aplInterface != null && aplInterface != undefined;
}
Hope that helps as it worked for me.

Related

Custom google cast receiver stuck in "Load is in progress"

My custom v3 CAF receiver app is successfully playing the first few live & vod assets. After that, it gets into a state were media commands are being queued because "Load is in progress". It is still (successfully) fetching manifests, but MEDIA_STATUS remains "buffering". The log then shows:
[ 4.537s] [cast.receiver.MediaManager] Load is in progress, media command is being queued.
[ 5.893s] [cast.receiver.MediaManager] Buffering state changed, isPlayerBuffering: true old time: 0 current time: 0
[ 5.897s] [cast.receiver.MediaManager] Sending broadcast status message
CastContext Core event: {"type":"MEDIA_STATUS","mediaStatus":{"mediaSessionId":1,"playbackRate":1,"playerState":"BUFFERING","currentTime":0,"supportedMediaCommands":12303,"volume":{"level":1,"muted":false},"currentItemId":1,"repeatMode":"REPEAT_OFF","liveSeekableRange":{"start":0,"end":20.000999927520752,"isMovingWindow":true,"isLiveDone":false}}}
CastContext MEDIA_STATUS event: {"type":"MEDIA_STATUS","mediaStatus":{"mediaSessionId":1,"playbackRate":1,"playerState":"BUFFERING","currentTime":0,"supportedMediaCommands":12303,"volume":{"level":1,"muted":false},"currentItemId":1,"repeatMode":"REPEAT_OFF","liveSeekableRange":{"start":0,"end":20.000999927520752,"isMovingWindow":true,"isLiveDone":false}}}
Fetch finished loading: GET "(manifest url)".
No errors are shown.
Even after closing and restarting the cast session, the issue remains. The cast device itself has to be rebooted to resolve it. It looks like data is kept between sessions.
It could be important to note that the cast receiver app is not published yet. It is hosted on a local network.
My questions are:
What could be the cause of this stuck behavior?
Is there any session data kept between session?
How to fully reset the cast receiver app, without the necessity to restart the cast device.
The receiver app itself is very basic. Other than license wrapping it resembles the vanilla example app:
const { cast } = window;
const TAG = "CastContext";
class CastStore {
static instance = null;
error = observable.box();
framerate = observable.box();
static getInstance() {
if (!CastStore.instance) {
CastStore.instance = new CastStore();
}
return CastStore.instance;
}
get debugLog() {
return this.framerate.get();
}
get errorLog() {
return this.error.get();
}
init() {
const context = cast.framework.CastReceiverContext.getInstance();
const playerManager = context.getPlayerManager();
playerManager.addEventListener(
cast.framework.events.category.CORE,
event => {
console.log(TAG, "Core event: " + JSON.stringify(event));
}
);
playerManager.addEventListener(
cast.framework.events.EventType.MEDIA_STATUS,
event => {
console.log(TAG, "MEDIA_STATUS event: " + JSON.stringify(event));
}
);
playerManager.addEventListener(
cast.framework.events.EventType.BITRATE_CHANGED,
event => {
console.log(TAG, "BITRATE_CHANGED event: " + JSON.stringify(event));
runInAction(() => {
this.framerate.set(`bitrate: ${event.totalBitrate}`);
});
}
);
playerManager.addEventListener(
cast.framework.events.EventType.ERROR,
event => {
console.log(TAG, "ERROR event: " + JSON.stringify(event));
runInAction(() => {
this.error.set(`Error detailedErrorCode: ${event.detailedErrorCode}`);
});
}
);
// intercept the LOAD request to be able to read in a contentId and get data.
this.loadHandler = new LoadHandler();
playerManager.setMessageInterceptor(
cast.framework.messages.MessageType.LOAD,
loadRequestData => {
this.framerate.set(null);
this.error.set(null);
console.log(TAG, "LOAD message: " + JSON.stringify(loadRequestData));
if (!loadRequestData.media) {
const error = new cast.framework.messages.ErrorData(
cast.framework.messages.ErrorType.LOAD_CANCELLED
);
error.reason = cast.framework.messages.ErrorReason.INVALID_PARAM;
return error;
}
if (!loadRequestData.media.entity) {
// Copy the value from contentId for legacy reasons if needed
loadRequestData.media.entity = loadRequestData.media.contentId;
}
// notify loadMedia
this.loadHandler.onLoadMedia(loadRequestData, playerManager);
return loadRequestData;
}
);
const playbackConfig = new cast.framework.PlaybackConfig();
// intercept license requests & responses
playbackConfig.licenseRequestHandler = requestInfo => {
const challenge = requestInfo.content;
const { castToken } = this.loadHandler;
const wrappedRequest = DrmLicenseHelper.wrapLicenseRequest(
challenge,
castToken
);
requestInfo.content = wrappedRequest;
return requestInfo;
};
playbackConfig.licenseHandler = license => {
const unwrappedLicense = DrmLicenseHelper.unwrapLicenseResponse(license);
return unwrappedLicense;
};
// Duration of buffered media in seconds to start/resume playback after auto-paused due to buffering; default is 10.
playbackConfig.autoResumeDuration = 4;
// Minimum number of buffered segments to start/resume playback.
playbackConfig.initialBandwidth = 1200000;
context.start({
touchScreenOptimizedApp: true,
playbackConfig: playbackConfig,
supportedCommands: cast.framework.messages.Command.ALL_BASIC_MEDIA
});
}
}
The LoadHandler optionally adds a proxy (I'm using a cors-anywhere proxy to remove the origin header), and stores the castToken for licenseRequests:
class LoadHandler {
CORS_USE_PROXY = true;
CORS_PROXY = "http://192.168.0.127:8003";
castToken = null;
onLoadMedia(loadRequestData, playerManager) {
if (!loadRequestData) {
return;
}
const { media } = loadRequestData;
// disable cors for local testing
if (this.CORS_USE_PROXY) {
media.contentId = `${this.CORS_PROXY}/${media.contentId}`;
}
const { customData } = media;
if (customData) {
const { licenseUrl, castToken } = customData;
// install cast token
this.castToken = castToken;
// handle license URL
if (licenseUrl) {
const playbackConfig = playerManager.getPlaybackConfig();
playbackConfig.licenseUrl = licenseUrl;
const { contentType } = loadRequestData.media;
// Dash: "application/dash+xml"
playbackConfig.protectionSystem = cast.framework.ContentProtection.WIDEVINE;
// disable cors for local testing
if (this.CORS_USE_PROXY) {
playbackConfig.licenseUrl = `${this.CORS_PROXY}/${licenseUrl}`;
}
}
}
}
}
The DrmHelper wraps the license request to add the castToken and base64-encodes the whole. The license response is base64-decoded and unwrapped lateron:
export default class DrmLicenseHelper {
static wrapLicenseRequest(challenge, castToken) {
const wrapped = {};
wrapped.AuthToken = castToken;
wrapped.Payload = fromByteArray(new Uint8Array(challenge));
const wrappedJson = JSON.stringify(wrapped);
const wrappedLicenseRequest = fromByteArray(
new TextEncoder().encode(wrappedJson)
);
return wrappedLicenseRequest;
}
static unwrapLicenseResponse(license) {
try {
const responseString = String.fromCharCode.apply(String, license);
const responseJson = JSON.parse(responseString);
const rawLicenseBase64 = responseJson.license;
const decodedLicense = toByteArray(rawLicenseBase64);
return decodedLicense;
} catch (e) {
return license;
}
}
}
The handler for cast.framework.messages.MessageType.LOAD should always return:
the (possibly modified) loadRequestData, or
a promise for the (possibly modified) loadRequestData
null to discard the load request (I'm not 100% sure this works for load requests)
If you do not do this, the load request stays in the queue and any new request is queued after the initial one.
In your handler, you return an error if !loadRequestData.media, which will get you into that state. Another possibility is an exception in the load request handler, which will also get you in that state.
I guess we have a different approach and send everything possible through sendMessage, when we loading stuff we create a new cast.framework.messages.LoadRequestData() which we load with playerManager.load(loadRequest).
But I guess that you might be testing this on an integrated Chromecast, we see this problems as well!?
I suggest that you do one or more
Enable gzip compression on all responses!!!
Stop playback playerManager.stop() (maybe in the interseptor?)
Change how the licenseUrl is set
How we set licenseUrl
playerManager.setMediaPlaybackInfoHandler((loadRequestData, playbackConfig) => {
playbackConfig.licenseUrl = loadRequestData.customData.licenseUrl;
return playbackConfig;
}
);

BotBuilder type error :Cannot read property 'listen' of undefined

I'm following the official quick start of Microsoft Bot Builder(SDK v3) for NodeJs: Create a bot with the Bot Builder SDK for Node.js
1- I made a new project with
npm init
2- then
npm install --save botbuilder#3.13.1
3- then I created a new file "app.js"
var builder = require('botbuilder');
var connector = new builder.ConsoleConnector().listen();
var bot = new builder.UniversalBot(connector, function (session) {
session.send("You said: %s", session.message.text);
});
But when I run node app.js the following error is thrown:
var connector=builder.ConsoleConnector().listen(); TypeError:
Cannot read property 'listen' of undefined
You haven't assigned a storage option to your bot. The simplest option (for development only) is to use in memory storage. You're code should look like this:
var builder = require('botbuilder');
// Bot Storage: Here we register the state storage for your bot.
// Default store: volatile in-memory store - Only for prototyping!
var inMemoryStorage = new builder.MemoryBotStorage();
var connector = new builder.ConsoleConnector().listen();
var bot = new builder.UniversalBot(connector, function(session) {
session.send("You said: %s", session.message.text);
}).set('storage', inMemoryStorage); // Register in memory storage
That being said, please be aware that the v3 SDK is going to be DEPRECATED in the near future. It is advised that you start your development using the v4 Node SDK, instead. To get started, you can reference the docs here and review sample code here.
In short, in v4, you will utilize three files: index.js, bot.js, and consoleAdapter.js.
The index.js file essentially builds the server, api's, etc.
const path = require('path');
const {
ConsoleAdapter
} = require('./consoleAdapter');
// load environment variables from .env file.
const ENV_FILE = path.join(__dirname, '.env');
require('dotenv').config({
path: ENV_FILE
});
// Create the bot adapter, which is responsible for sending and receiving messages.
// We are using the ConsoleAdapter, which enables a bot you can chat with from within your terminal window.
const adapter = new ConsoleAdapter();
// Import our bot class.
const {
EchoBot
} = require('./bot');
const bot = new EchoBot();
// A call to adapter.listen tells the adapter to start listening for incoming messages and events, known as "activities."
// Activities are received as TurnContext objects by the handler function.
adapter.listen(async(context) => {
bot.onTurn(context);
});
// Emit a startup message with some instructions.
console.log('> Console EchoBot is online. I will repeat any message you send me!');
console.log('> Say "quit" to end.');
console.log(''); // Leave a blank line after instructions.
The bot.js file, generally, handles your bot's on[ActivityType] actions (e.g. onMessage()). In more complex bots, dialogs are extrapolated into their own files.
class EchoBot {
async onTurn(context) {
// Check to see if this activity is an incoming message.
// (It could theoretically be another type of activity.)
if(context.activity.type === 'message' && context.activity.text) {
// Check to see if the user sent a simple "quit" message.
if(context.activity.text.toLowerCase() === 'quit') {
// Send a reply.
context.sendActivity(`Bye!`);
process.exit();
} else {
// Echo the message text back to the user.
return context.sendActivity(`I heard you say "${ context.activity.text }"`);
}
}
}
}
module.exports.EchoBot = EchoBot;
Lastly, the consoleAdapter.js file is tasked with capturing the console activity and translating that to the bot.
'use strict';
var __importStar = (this && this.__importStar) || function(mod) {
if(mod && mod.__esModule) return mod;
var result = {};
if(mod != null)
for(var k in mod)
if(Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
result['default'] = mod;
return result;
};
Object.defineProperty(exports, '__esModule', {
value: true
});
const botbuilderCore = require('botbuilder-core');
const readline = __importStar(require('readline'));
const console = require('console');
/**
* Lets a user communicate with a bot from a console window.
*
*/
class ConsoleAdapter extends botbuilderCore.BotAdapter {
/**
* Creates a new ConsoleAdapter instance.
* #param reference (Optional) reference used to customize the address information of activities sent from the adapter.
*/
constructor(reference) {
super();
this.nextId = 0;
this.reference = Object.assign({
channelId: 'console',
user: {
id: 'user',
name: 'User1'
},
bot: {
id: 'bot',
name: 'Bot'
},
conversation: {
id: 'convo1',
name: '',
isGroup: false
},
serviceUrl: ''
}, reference);
}
/**
* Begins listening to console input. A function will be returned that can be used to stop the
* bot listening and therefore end the process.
*
* #param logic Function which will be called each time a message is input by the user.
*/
listen(logic) {
const rl = this.createInterface({
input: process.stdin,
output: process.stdout,
terminal: false
});
rl.on('line', (line) => {
// Initialize activity
const activity = botbuilderCore.TurnContext.applyConversationReference({
type: botbuilderCore.ActivityTypes.Message,
id: (this.nextId++).toString(),
timestamp: new Date(),
text: line
}, this.reference, true);
// Create context and run middleware pipe
const context = new botbuilderCore.TurnContext(this, activity);
this.runMiddleware(context, logic)
.catch((err) => {
this.printError(err.toString());
});
});
return() => {
rl.close();
};
}
/**
* Lets a bot proactively message the user.
*
* #param reference A `ConversationReference` saved during a previous message from a user. This can be calculated for any incoming activity using `TurnContext.getConversationReference(context.activity)`.
* #param logic A function handler that will be called to perform the bots logic after the the adapters middleware has been run.
*/
continueConversation(reference, logic) {
// Create context and run middleware pipe
const activity = botbuilderCore.TurnContext.applyConversationReference({}, reference, true);
const context = new botbuilderCore.TurnContext(this, activity);
return this.runMiddleware(context, logic)
.catch((err) => {
this.printError(err.toString());
});
}
/**
* Logs a set of activities to the console.
*
* #param context Context for the current turn of conversation with the user.
* #param activities List of activities to send.
*/
sendActivities(context, activities) {
const that = this;
// tslint:disable-next-line:promise-must-complete
return new Promise((resolve, reject) => {
const responses = [];
function next(i) {
if(i < activities.length) {
responses.push({});
const a = activities[i];
switch(a.type) {
case 'delay':
setTimeout(() => next(i + 1), a.value);
break;
case botbuilderCore.ActivityTypes.Message:
if(a.attachments && a.attachments.length > 0) {
const append = a.attachments.length === 1 ?
`(1 attachment)` : `(${ a.attachments.length } attachments)`;
that.print(`${ a.text } ${ append }`);
} else {
that.print(a.text || '');
}
next(i + 1);
break;
default:
that.print(`[${ a.type }]`);
next(i + 1);
break;
}
} else {
resolve(responses);
}
}
next(0);
});
}
/**
* Not supported for the ConsoleAdapter. Calling this method or `TurnContext.updateActivity()`
* will result an error being returned.
*/
updateActivity(context, activity) {
return Promise.reject(new Error(`ConsoleAdapter.updateActivity(): not supported.`));
}
/**
* Not supported for the ConsoleAdapter. Calling this method or `TurnContext.deleteActivity()`
* will result an error being returned.
*/
deleteActivity(context, reference) {
return Promise.reject(new Error(`ConsoleAdapter.deleteActivity(): not supported.`));
}
/**
* Allows for mocking of the console interface in unit tests.
* #param options Console interface options.
*/
createInterface(options) {
return readline.createInterface(options);
}
/**
* Logs text to the console.
* #param line Text to print.
*/
print(line) {
console.log(line);
}
/**
* Logs an error to the console.
* #param line Error text to print.
*/
printError(line) {
console.error(line);
}
}
exports.ConsoleAdapter = ConsoleAdapter;
The above code is taken from the 01.console-echo sample of the Botbuilder-Samples repo. I removed some inline commentary. Please refer to the project for the complete code/files and associated remarks.
Hope of help!

create local notification in xamarin ios with http request

i have xamarin forms app that support notification, i have done it in android with broadcast receiver now i have to do notification in ios ! , my service is depending on API REST so i want every 60 second ios app run HTTP request and get data then show it as notification, i searched for many days but i can't reach to my approach ?
if this is impossible can i use nuget or something like that in ios project only "in xamarin forms solution " or not ?
content = new UNMutableNotificationContent();
content.Title = "Notification Title";
content.Subtitle = "Notification Subtitle";
content.Body = "This is the message body of the notification.";
content.Badge = 1;
content.CategoryIdentifier = "message";
var trigger = UNTimeIntervalNotificationTrigger.CreateTrigger(60, true);
var requestID = "sampleRequest";
var request = UNNotificationRequest.FromIdentifier(requestID, content, trigger);
UNUserNotificationCenter.Current.AddNotificationRequest(request, (err) =>
{
if (err != null)
{
// Do something with error...
}
});
Here is my code for generating a local notification on iOS
var alertsAllowed = false;
UNUserNotificationCenter.Current.GetNotificationSettings((settings) =>
{
alertsAllowed = (settings.AlertSetting == UNNotificationSetting.Enabled);
});
if (alertsAllowed)
{
var content = new UNMutableNotificationContent();
content.Title = "Incident Recorder";
content.Subtitle = "Not Synchronised";
content.Body = "There are one or more new incidents that have not been synchronised to the server.";
var trigger = UNTimeIntervalNotificationTrigger.CreateTrigger(5, false);
var requestID = "sampleRequest";
var request = UNNotificationRequest.FromIdentifier(requestID, content, trigger);
UNUserNotificationCenter.Current.AddNotificationRequest(request, (err) =>
{
if (err != null)
{
Console.WriteLine(err.LocalizedFailureReason);
}
});
}
The first parameter in CreateTrigger is how long before the notification is generated. I notice you have 60 in yours. Also bear in mind a notification will not appear if your app is foregrounded.

[InvalidStateError: "setRemoteDescription needs to called before addIceCandidate" code: 11

I create a simple video calling app by using web Rtc and websockets.
But when i run the code, the following error occured.
DOMException [InvalidStateError: "setRemoteDescription needs to called before addIceCandidate"
code: 11
I don't know how to resolve this error.
Here is my code below:
enter code here
var localVideo;
var remoteVideo;
var peerConnection;
var uuid;
var localStream;
var peerConnectionConfig = {
'iceServers': [
{'urls': 'stun:stun.services.mozilla.com'},
{'urls': 'stun:stun.l.google.com:19302'},
]
};
function pageReady() {
uuid = uuid();
console.log('Inside Page Ready');
localVideo = document.getElementById('localVideo');
remoteVideo = document.getElementById('remoteVideo');
serverConnection = new WebSocket('wss://' + window.location.hostname +
':8443');
serverConnection.onmessage = gotMessageFromServer;
var constraints = {
video: true,
audio: true,
};
if(navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia(constraints)
.then(getUserMediaSuccess).catch(errorHandler);
}else
{
alert('Your browser does not support getUserMedia API');
}
}
function getUserMediaSuccess(stream) {
localStream = stream;
localVideo.src = window.URL.createObjectURL(stream);
}
function start(isCaller) {
console.log('Inside isCaller');
peerConnection = new RTCPeerConnection(peerConnectionConfig);
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.onaddstream = gotRemoteStream;
peerConnection.addStream(localStream);
if(isCaller) {
console.log('Inside Caller to create offer');
peerConnection.createOffer().
then(createdDescription).catch(errorHandler);
}
}
function gotMessageFromServer(message) {
console.log('Message from Server');
if(!peerConnection)
{
console.log('Inside !Peer Conn');
start(false);
}
var signal = JSON.parse(message.data);
// Ignore messages from ourself
if(signal.uuid == uuid) return;
if(signal.sdp) {
console.log('Inside SDP');
peerConnection.setRemoteDescription(new
RTCSessionDescription(signal.sdp)).then(function() {
// Only create answers in response to offers
if(signal.sdp.type == 'offer') {
console.log('Before Create Answer');
peerConnection.createAnswer().then(createdDescription)
.catch(errorHandler);
}
}).catch(errorHandler);
} else if(signal.ice) {
console.log('Inside Signal Ice');
peerConnection.addIceCandidate(new
RTCIceCandidate(signal.ice)).catch(errorHandler);
}
}
function gotIceCandidate(event) {
console.log('Inside Got Ice Candi');
if(event.candidate != null) {
serverConnection.send(JSON.stringify({'ice': event.candidate,
'uuid': uuid}));
}
}
function createdDescription(description) {
console.log('got description');
peerConnection.setLocalDescription(description).then(function() {
console.log('Inside Setting ');
serverConnection.send(JSON.stringify({'sdp':
peerConnection.localDescription, 'uuid': uuid}));
}).catch(errorHandler);
}
function gotRemoteStream(event) {
console.log('got remote stream');
remoteVideo.src = window.URL.createObjectURL(event.stream);
}
function errorHandler(error) {
console.log(error);
}
// Taken from http://stackoverflow.com/a/105074/515584
// Strictly speaking, it's not a real UUID, but it gets the job done here
function uuid() {
function s4() {
return Math.floor((1 + Math.random()) *
0x10000).toString(16).substring(1);
}
return s4() + s4() + '-' + s4() + '-' + s4() + '-' + s4() + '-' + s4() +
s4() + s4();
}
This is my code, I don't know how to arrange the addIceCandidate and addRemoteDescription function.
You need to make sure that
peerConnection.addIceCandidate(new RTCIceCandidate(signal.ice))
is called after description is set.
You have sitution where you receive ice candidate and try to add it to peerConnection before peerConnection has completed with setting description.
I had similar situation, and I created array for storing candidates that arrived before setting description is completed, and a variable that checks if description is set. If description is set, I would add candidates to peerConnection, otherwise I would add them to array. (when you set your variable to true, you can also go through array and add all stored candidates to peerConnection.
The way WebRTC works (as much as i understand) is you have to make two peers to have a deal to how to communicate eachother in the order of give an offer to your peer get your peers answer and select an ICE candidate to communicate on then if you want to send your media streams for an video conversation
for you to have a good exampe to look on how to implemet those funcitons and in which order you can visit https://github.com/alexan1/SignalRTC he has a good understading of how to do this.
you might already have a solution to your problem at this time but im replying in case you do not.
EDIT: As I have been told, this solution is an anti-pattern and you should NOT implement it this way. For more info on how I solved it while still keeping a reasonable flow, follow this answer and comment section: https://stackoverflow.com/a/57257449/779483
TLDR: Instead of calling addIceCandidate as soon as the signaling information arrives, add the candidates to a queue. After calling setRemoteDescription, go through candidates queue and call addIceCandidate on each one.
--
From this answer I learned that we have to call setRemoteDescription(offer) before we add the Ice Candidates data.
So, expanding on #Luxior answer, I did the following:
When signaling message with candidate arrives:
Check if remote was set (via a boolean flag, ie: remoteIsReady)
If it was, call addIceCandidate
If it wasn't, add to a queue
After setRemoteDescription is called (in answer signal or answer client action):
Call a method to go through the candidates queue and call addIceCandidate on each one.
Set boolean flag (remoteIsReady) to true
Empty queue

Can websites view Ios device name? [duplicate]

Is there a way to get the name of a mobile device (e.g. "John's iPhone" ) using javascript?
Maybe I wasn't very clear... what I meant is not whether it's an iPhone, iPad etc. but the "device name" - for example it can be "John's iPhone".
You can't do this through javascript for a web app running in a native browser - javascript generally doesn't have access to this personal identifying data.
One possible way is to use a framework like PhoneGap which may have an API to access the device name. But then, you can only deploy your web site via an app store, so this could be very limiting based on your use case.
Your best bet is to use the user agent:
e.g.
const ua = navigator.userAgent
const device = {
iPad: /iPad/.test(ua),
iPhone: /iPhone/.test(ua),
Android4: /Android 4/.test(ua)
}
The object will allow you to write nice conditional logic such as if(device.iPad) { /* do stuff */ }
I'm working with mobile device with embedded scanner. To be able to use some the JavaScript library of different devices and to avoid conflict with those libraries of different manufacturer (Zebra, Honeywell, Datalogic, iOs ect...) I need to come up with a way to identify each devices so I can load the proper library and this is what I came up with. Enjoy
getDeviceName: function () {
var deviceName = '';
var isMobile = {
Android: function() {
return navigator.userAgent.match(/Android/i);
},
Datalogic: function() {
return navigator.userAgent.match(/DL-AXIS/i);
},
Bluebird: function() {
return navigator.userAgent.match(/EF500/i);
},
Honeywell: function() {
return navigator.userAgent.match(/CT50/i);
},
Zebra: function() {
return navigator.userAgent.match(/TC70|TC55/i);
},
BlackBerry: function() {
return navigator.userAgent.match(/BlackBerry/i);
},
iOS: function() {
return navigator.userAgent.match(/iPhone|iPad|iPod/i);
},
Windows: function() {
return navigator.userAgent.match(/IEMobile/i);
},
any: function() {
return (isMobile.Datalogic() || isMobile.Bluebird() || isMobile.Honeywell() || isMobile.Zebra() || isMobile.BlackBerry() || isMobile.Android() || isMobile.iOS() || isMobile.Windows());
}
};
if (isMobile.Datalogic())
deviceName = 'Datalogic';
else if (isMobile.Bluebird())
deviceName = 'Bluebird';
else if (isMobile.Honeywell())
deviceName = 'Honeywell';
else if (isMobile.Zebra())
deviceName = 'Zebra';
else if (isMobile.BlackBerry())
deviceName = 'BlackBerry';
else if (isMobile.iOS())
deviceName = 'iOS';
else if ((deviceName == '') && (isMobile.Android()))
deviceName = 'Android';
else if ((deviceName == '') && (isMobile.Windows()))
deviceName = 'Windows';
if (deviceName != '') {
consoleLog('Devices information deviceName = ' + deviceName);
consoleLog('Devices information any = ' + isMobile.any());
consoleLog('navigator.userAgent = ' + navigator.userAgent);
}
return deviceName;
},
and this is an example of how it can be used:
initializeHandheldScanners: function () {
if (DeviceCtrl.getDeviceName() == 'Zebra')
DeviceCtrl.initializeSymbolScanner();
if (DeviceCtrl.getDeviceName() == 'Honeywell')
DeviceCtrl.initializeHoneywellScanner();
if (DeviceCtrl.getDeviceName() == 'Datalogic')
DeviceCtrl.initializeDatalogicScanner();
},
You can say thanks to Cory LaViska. I did this based on his work. Here is the link if you want to know more
https://www.abeautifulsite.net/detecting-mobile-devices-with-javascript
You can use this snippet:
const getUA = () => {
let device = "Unknown";
const ua = {
"Generic Linux": /Linux/i,
"Android": /Android/i,
"BlackBerry": /BlackBerry/i,
"Bluebird": /EF500/i,
"Chrome OS": /CrOS/i,
"Datalogic": /DL-AXIS/i,
"Honeywell": /CT50/i,
"iPad": /iPad/i,
"iPhone": /iPhone/i,
"iPod": /iPod/i,
"macOS": /Macintosh/i,
"Windows": /IEMobile|Windows/i,
"Zebra": /TC70|TC55/i,
}
Object.keys(ua).map(v => navigator.userAgent.match(ua[v]) && (device = v));
return device;
}
console.log(getUA());

Resources