BotBuilder type error :Cannot read property 'listen' of undefined - botframework

I'm following the official quick start of Microsoft Bot Builder(SDK v3) for NodeJs: Create a bot with the Bot Builder SDK for Node.js
1- I made a new project with
npm init
2- then
npm install --save botbuilder#3.13.1
3- then I created a new file "app.js"
var builder = require('botbuilder');
var connector = new builder.ConsoleConnector().listen();
var bot = new builder.UniversalBot(connector, function (session) {
session.send("You said: %s", session.message.text);
});
But when I run node app.js the following error is thrown:
var connector=builder.ConsoleConnector().listen(); TypeError:
Cannot read property 'listen' of undefined

You haven't assigned a storage option to your bot. The simplest option (for development only) is to use in memory storage. You're code should look like this:
var builder = require('botbuilder');
// Bot Storage: Here we register the state storage for your bot.
// Default store: volatile in-memory store - Only for prototyping!
var inMemoryStorage = new builder.MemoryBotStorage();
var connector = new builder.ConsoleConnector().listen();
var bot = new builder.UniversalBot(connector, function(session) {
session.send("You said: %s", session.message.text);
}).set('storage', inMemoryStorage); // Register in memory storage
That being said, please be aware that the v3 SDK is going to be DEPRECATED in the near future. It is advised that you start your development using the v4 Node SDK, instead. To get started, you can reference the docs here and review sample code here.
In short, in v4, you will utilize three files: index.js, bot.js, and consoleAdapter.js.
The index.js file essentially builds the server, api's, etc.
const path = require('path');
const {
ConsoleAdapter
} = require('./consoleAdapter');
// load environment variables from .env file.
const ENV_FILE = path.join(__dirname, '.env');
require('dotenv').config({
path: ENV_FILE
});
// Create the bot adapter, which is responsible for sending and receiving messages.
// We are using the ConsoleAdapter, which enables a bot you can chat with from within your terminal window.
const adapter = new ConsoleAdapter();
// Import our bot class.
const {
EchoBot
} = require('./bot');
const bot = new EchoBot();
// A call to adapter.listen tells the adapter to start listening for incoming messages and events, known as "activities."
// Activities are received as TurnContext objects by the handler function.
adapter.listen(async(context) => {
bot.onTurn(context);
});
// Emit a startup message with some instructions.
console.log('> Console EchoBot is online. I will repeat any message you send me!');
console.log('> Say "quit" to end.');
console.log(''); // Leave a blank line after instructions.
The bot.js file, generally, handles your bot's on[ActivityType] actions (e.g. onMessage()). In more complex bots, dialogs are extrapolated into their own files.
class EchoBot {
async onTurn(context) {
// Check to see if this activity is an incoming message.
// (It could theoretically be another type of activity.)
if(context.activity.type === 'message' && context.activity.text) {
// Check to see if the user sent a simple "quit" message.
if(context.activity.text.toLowerCase() === 'quit') {
// Send a reply.
context.sendActivity(`Bye!`);
process.exit();
} else {
// Echo the message text back to the user.
return context.sendActivity(`I heard you say "${ context.activity.text }"`);
}
}
}
}
module.exports.EchoBot = EchoBot;
Lastly, the consoleAdapter.js file is tasked with capturing the console activity and translating that to the bot.
'use strict';
var __importStar = (this && this.__importStar) || function(mod) {
if(mod && mod.__esModule) return mod;
var result = {};
if(mod != null)
for(var k in mod)
if(Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
result['default'] = mod;
return result;
};
Object.defineProperty(exports, '__esModule', {
value: true
});
const botbuilderCore = require('botbuilder-core');
const readline = __importStar(require('readline'));
const console = require('console');
/**
* Lets a user communicate with a bot from a console window.
*
*/
class ConsoleAdapter extends botbuilderCore.BotAdapter {
/**
* Creates a new ConsoleAdapter instance.
* #param reference (Optional) reference used to customize the address information of activities sent from the adapter.
*/
constructor(reference) {
super();
this.nextId = 0;
this.reference = Object.assign({
channelId: 'console',
user: {
id: 'user',
name: 'User1'
},
bot: {
id: 'bot',
name: 'Bot'
},
conversation: {
id: 'convo1',
name: '',
isGroup: false
},
serviceUrl: ''
}, reference);
}
/**
* Begins listening to console input. A function will be returned that can be used to stop the
* bot listening and therefore end the process.
*
* #param logic Function which will be called each time a message is input by the user.
*/
listen(logic) {
const rl = this.createInterface({
input: process.stdin,
output: process.stdout,
terminal: false
});
rl.on('line', (line) => {
// Initialize activity
const activity = botbuilderCore.TurnContext.applyConversationReference({
type: botbuilderCore.ActivityTypes.Message,
id: (this.nextId++).toString(),
timestamp: new Date(),
text: line
}, this.reference, true);
// Create context and run middleware pipe
const context = new botbuilderCore.TurnContext(this, activity);
this.runMiddleware(context, logic)
.catch((err) => {
this.printError(err.toString());
});
});
return() => {
rl.close();
};
}
/**
* Lets a bot proactively message the user.
*
* #param reference A `ConversationReference` saved during a previous message from a user. This can be calculated for any incoming activity using `TurnContext.getConversationReference(context.activity)`.
* #param logic A function handler that will be called to perform the bots logic after the the adapters middleware has been run.
*/
continueConversation(reference, logic) {
// Create context and run middleware pipe
const activity = botbuilderCore.TurnContext.applyConversationReference({}, reference, true);
const context = new botbuilderCore.TurnContext(this, activity);
return this.runMiddleware(context, logic)
.catch((err) => {
this.printError(err.toString());
});
}
/**
* Logs a set of activities to the console.
*
* #param context Context for the current turn of conversation with the user.
* #param activities List of activities to send.
*/
sendActivities(context, activities) {
const that = this;
// tslint:disable-next-line:promise-must-complete
return new Promise((resolve, reject) => {
const responses = [];
function next(i) {
if(i < activities.length) {
responses.push({});
const a = activities[i];
switch(a.type) {
case 'delay':
setTimeout(() => next(i + 1), a.value);
break;
case botbuilderCore.ActivityTypes.Message:
if(a.attachments && a.attachments.length > 0) {
const append = a.attachments.length === 1 ?
`(1 attachment)` : `(${ a.attachments.length } attachments)`;
that.print(`${ a.text } ${ append }`);
} else {
that.print(a.text || '');
}
next(i + 1);
break;
default:
that.print(`[${ a.type }]`);
next(i + 1);
break;
}
} else {
resolve(responses);
}
}
next(0);
});
}
/**
* Not supported for the ConsoleAdapter. Calling this method or `TurnContext.updateActivity()`
* will result an error being returned.
*/
updateActivity(context, activity) {
return Promise.reject(new Error(`ConsoleAdapter.updateActivity(): not supported.`));
}
/**
* Not supported for the ConsoleAdapter. Calling this method or `TurnContext.deleteActivity()`
* will result an error being returned.
*/
deleteActivity(context, reference) {
return Promise.reject(new Error(`ConsoleAdapter.deleteActivity(): not supported.`));
}
/**
* Allows for mocking of the console interface in unit tests.
* #param options Console interface options.
*/
createInterface(options) {
return readline.createInterface(options);
}
/**
* Logs text to the console.
* #param line Text to print.
*/
print(line) {
console.log(line);
}
/**
* Logs an error to the console.
* #param line Error text to print.
*/
printError(line) {
console.error(line);
}
}
exports.ConsoleAdapter = ConsoleAdapter;
The above code is taken from the 01.console-echo sample of the Botbuilder-Samples repo. I removed some inline commentary. Please refer to the project for the complete code/files and associated remarks.
Hope of help!

Related

How does request.context in Parse Cloud Code work?

Back in Parse Server 3.0 update, there was an addition of request.context to pass data between BeforeSave and AfterSave as documented here:
https://docs.parseplatform.org/cloudcode/guide/#using-request-context
However, I'm having a bit of trouble understanding how and when Parse runs this code in the example.
const beforeSave = function beforeSave(request) {
const { object: role } = request;
// Get users that will be added to the users relation.
const usersOp = role.op('users');
if (usersOp && usersOp.relationsToAdd.length > 0) {
// add the users being added to the request context
request.context = { buyers: usersOp.relationsToAdd };
}
};
const afterSave = function afterSave(request) {
const { object: role, context } = request;
if (context && context.buyers) {
const purchasedItem = getItemFromRole(role);
const promises = context.buyers.map(emailBuyer.bind(null, purchasedItem));
item.increment('orderCount', context.buyers.length);
promises.push(item.save(null, { useMasterKey: true }));
Promise.all(promises).catch(request.log.error.bind(request.log));
}
};
in other examples, cloud code functions are run via Parse.Cloud.beforeSave or Parse.Cloud.afterSave. In this example above, the function beforeSave is assigned to a
const beforeSave.
Why was this done and is this supposed to be placed inside main.js top level or inside another function?

Alexa Custom skill in AWS lambda not recognizing Alexa.getSupportedInterfaces[Error handled: Alexa.getSupportedInterfaces is not a function]

Trying to use Alexa presentation Language features in AWS hosted custom lambda function. Intent handler are firing but when I add the
Alexa.getSupportedInterfaces it is failing .
Message is "Error handled: Alexa.getSupportedInterfaces is not a function"
// 1. Intent Handlers =============================================
const LaunchRequest_Handler = {
canHandle(handlerInput) {
const request = handlerInput.requestEnvelope.request;
return request.type === 'LaunchRequest';
},
handle(handlerInput) {
let responseBuilder = handlerInput.responseBuilder;
let speakOutput = 'Welcome to test Bot. ';
// let skillTitle = capitalize(invocationName);
// Add APL directive to response
if (Alexa1.getSupportedInterfaces(handlerInput.requestEnvelope)['Alexa.Presentation.APL']) {
// Add the RenderDocument directive to the responseBuilder
responseBuilder.addDirective({
type: 'Alexa.Presentation.APL.RenderDocument',
token: Echo_Token,
document: Customer
});
// Tailor the speech for a device with a screen.
speakOutput += " You should now also see my greeting on the screen."
} else {
// User's device does not support APL, so tailor the speech to this situation
speakOutput += " This example would be more interesting on a device with a screen, such as an Echo Show or Fire TV.";
}
return responseBuilder
.speak(speakOutput)
.withShouldEndSession(false)
.reprompt('try again, ' + speakOutput)
.withSimpleCard("CustomerSupport!", "CustomerSupport)")
// .reprompt('add a reprompt if you want to keep the session open for the user to respond')
//.withStandardCard('Welcome!',
// 'Hello!\nThis is a card for your skill, ' + skillTitle,
// welcomeCardImg.smallImageUrl, welcomeCardImg.largeImageUrl)
.getResponse();
},
};
Instead of using the below condition:
Alexa1.getSupportedInterfaces(handlerInput.requestEnvelope['Alexa.Presentation.APL]
you can use, below condition to check if the device supports APL:
if (supportsAPL(handlerInput))
Make sure you include below functions definition in your index file:
function supportsAPL(handlerInput) {
const supportedInterfaces = handlerInput.requestEnvelope.context.System.device.supportedInterfaces;
const aplInterface = supportedInterfaces['Alexa.Presentation.APL'];
return aplInterface != null && aplInterface != undefined;
}
function supportsAPLT(handlerInput) {
const supportedInterfaces = handlerInput.requestEnvelope.context.System.device.supportedInterfaces;
const aplInterface = supportedInterfaces['Alexa.Presentation.APLT'];
return aplInterface != null && aplInterface != undefined;
}
Hope that helps as it worked for me.

Custom google cast receiver stuck in "Load is in progress"

My custom v3 CAF receiver app is successfully playing the first few live & vod assets. After that, it gets into a state were media commands are being queued because "Load is in progress". It is still (successfully) fetching manifests, but MEDIA_STATUS remains "buffering". The log then shows:
[ 4.537s] [cast.receiver.MediaManager] Load is in progress, media command is being queued.
[ 5.893s] [cast.receiver.MediaManager] Buffering state changed, isPlayerBuffering: true old time: 0 current time: 0
[ 5.897s] [cast.receiver.MediaManager] Sending broadcast status message
CastContext Core event: {"type":"MEDIA_STATUS","mediaStatus":{"mediaSessionId":1,"playbackRate":1,"playerState":"BUFFERING","currentTime":0,"supportedMediaCommands":12303,"volume":{"level":1,"muted":false},"currentItemId":1,"repeatMode":"REPEAT_OFF","liveSeekableRange":{"start":0,"end":20.000999927520752,"isMovingWindow":true,"isLiveDone":false}}}
CastContext MEDIA_STATUS event: {"type":"MEDIA_STATUS","mediaStatus":{"mediaSessionId":1,"playbackRate":1,"playerState":"BUFFERING","currentTime":0,"supportedMediaCommands":12303,"volume":{"level":1,"muted":false},"currentItemId":1,"repeatMode":"REPEAT_OFF","liveSeekableRange":{"start":0,"end":20.000999927520752,"isMovingWindow":true,"isLiveDone":false}}}
Fetch finished loading: GET "(manifest url)".
No errors are shown.
Even after closing and restarting the cast session, the issue remains. The cast device itself has to be rebooted to resolve it. It looks like data is kept between sessions.
It could be important to note that the cast receiver app is not published yet. It is hosted on a local network.
My questions are:
What could be the cause of this stuck behavior?
Is there any session data kept between session?
How to fully reset the cast receiver app, without the necessity to restart the cast device.
The receiver app itself is very basic. Other than license wrapping it resembles the vanilla example app:
const { cast } = window;
const TAG = "CastContext";
class CastStore {
static instance = null;
error = observable.box();
framerate = observable.box();
static getInstance() {
if (!CastStore.instance) {
CastStore.instance = new CastStore();
}
return CastStore.instance;
}
get debugLog() {
return this.framerate.get();
}
get errorLog() {
return this.error.get();
}
init() {
const context = cast.framework.CastReceiverContext.getInstance();
const playerManager = context.getPlayerManager();
playerManager.addEventListener(
cast.framework.events.category.CORE,
event => {
console.log(TAG, "Core event: " + JSON.stringify(event));
}
);
playerManager.addEventListener(
cast.framework.events.EventType.MEDIA_STATUS,
event => {
console.log(TAG, "MEDIA_STATUS event: " + JSON.stringify(event));
}
);
playerManager.addEventListener(
cast.framework.events.EventType.BITRATE_CHANGED,
event => {
console.log(TAG, "BITRATE_CHANGED event: " + JSON.stringify(event));
runInAction(() => {
this.framerate.set(`bitrate: ${event.totalBitrate}`);
});
}
);
playerManager.addEventListener(
cast.framework.events.EventType.ERROR,
event => {
console.log(TAG, "ERROR event: " + JSON.stringify(event));
runInAction(() => {
this.error.set(`Error detailedErrorCode: ${event.detailedErrorCode}`);
});
}
);
// intercept the LOAD request to be able to read in a contentId and get data.
this.loadHandler = new LoadHandler();
playerManager.setMessageInterceptor(
cast.framework.messages.MessageType.LOAD,
loadRequestData => {
this.framerate.set(null);
this.error.set(null);
console.log(TAG, "LOAD message: " + JSON.stringify(loadRequestData));
if (!loadRequestData.media) {
const error = new cast.framework.messages.ErrorData(
cast.framework.messages.ErrorType.LOAD_CANCELLED
);
error.reason = cast.framework.messages.ErrorReason.INVALID_PARAM;
return error;
}
if (!loadRequestData.media.entity) {
// Copy the value from contentId for legacy reasons if needed
loadRequestData.media.entity = loadRequestData.media.contentId;
}
// notify loadMedia
this.loadHandler.onLoadMedia(loadRequestData, playerManager);
return loadRequestData;
}
);
const playbackConfig = new cast.framework.PlaybackConfig();
// intercept license requests & responses
playbackConfig.licenseRequestHandler = requestInfo => {
const challenge = requestInfo.content;
const { castToken } = this.loadHandler;
const wrappedRequest = DrmLicenseHelper.wrapLicenseRequest(
challenge,
castToken
);
requestInfo.content = wrappedRequest;
return requestInfo;
};
playbackConfig.licenseHandler = license => {
const unwrappedLicense = DrmLicenseHelper.unwrapLicenseResponse(license);
return unwrappedLicense;
};
// Duration of buffered media in seconds to start/resume playback after auto-paused due to buffering; default is 10.
playbackConfig.autoResumeDuration = 4;
// Minimum number of buffered segments to start/resume playback.
playbackConfig.initialBandwidth = 1200000;
context.start({
touchScreenOptimizedApp: true,
playbackConfig: playbackConfig,
supportedCommands: cast.framework.messages.Command.ALL_BASIC_MEDIA
});
}
}
The LoadHandler optionally adds a proxy (I'm using a cors-anywhere proxy to remove the origin header), and stores the castToken for licenseRequests:
class LoadHandler {
CORS_USE_PROXY = true;
CORS_PROXY = "http://192.168.0.127:8003";
castToken = null;
onLoadMedia(loadRequestData, playerManager) {
if (!loadRequestData) {
return;
}
const { media } = loadRequestData;
// disable cors for local testing
if (this.CORS_USE_PROXY) {
media.contentId = `${this.CORS_PROXY}/${media.contentId}`;
}
const { customData } = media;
if (customData) {
const { licenseUrl, castToken } = customData;
// install cast token
this.castToken = castToken;
// handle license URL
if (licenseUrl) {
const playbackConfig = playerManager.getPlaybackConfig();
playbackConfig.licenseUrl = licenseUrl;
const { contentType } = loadRequestData.media;
// Dash: "application/dash+xml"
playbackConfig.protectionSystem = cast.framework.ContentProtection.WIDEVINE;
// disable cors for local testing
if (this.CORS_USE_PROXY) {
playbackConfig.licenseUrl = `${this.CORS_PROXY}/${licenseUrl}`;
}
}
}
}
}
The DrmHelper wraps the license request to add the castToken and base64-encodes the whole. The license response is base64-decoded and unwrapped lateron:
export default class DrmLicenseHelper {
static wrapLicenseRequest(challenge, castToken) {
const wrapped = {};
wrapped.AuthToken = castToken;
wrapped.Payload = fromByteArray(new Uint8Array(challenge));
const wrappedJson = JSON.stringify(wrapped);
const wrappedLicenseRequest = fromByteArray(
new TextEncoder().encode(wrappedJson)
);
return wrappedLicenseRequest;
}
static unwrapLicenseResponse(license) {
try {
const responseString = String.fromCharCode.apply(String, license);
const responseJson = JSON.parse(responseString);
const rawLicenseBase64 = responseJson.license;
const decodedLicense = toByteArray(rawLicenseBase64);
return decodedLicense;
} catch (e) {
return license;
}
}
}
The handler for cast.framework.messages.MessageType.LOAD should always return:
the (possibly modified) loadRequestData, or
a promise for the (possibly modified) loadRequestData
null to discard the load request (I'm not 100% sure this works for load requests)
If you do not do this, the load request stays in the queue and any new request is queued after the initial one.
In your handler, you return an error if !loadRequestData.media, which will get you into that state. Another possibility is an exception in the load request handler, which will also get you in that state.
I guess we have a different approach and send everything possible through sendMessage, when we loading stuff we create a new cast.framework.messages.LoadRequestData() which we load with playerManager.load(loadRequest).
But I guess that you might be testing this on an integrated Chromecast, we see this problems as well!?
I suggest that you do one or more
Enable gzip compression on all responses!!!
Stop playback playerManager.stop() (maybe in the interseptor?)
Change how the licenseUrl is set
How we set licenseUrl
playerManager.setMediaPlaybackInfoHandler((loadRequestData, playbackConfig) => {
playbackConfig.licenseUrl = loadRequestData.customData.licenseUrl;
return playbackConfig;
}
);

AWS Lambda logging through Serilog UDP sink and logstash silently fails

We have a .NET Core 2.1 AWS Lambda that I'm trying to hook into our existing logging system.
I'm trying to log through Serilog using a UDP sink to our logstash instance for ingestion into our ElasticSearch logging database that is hosted on a private VPC. Running locally through a console logs fine, both to the console itself and through UDP into Elastic. However, when it runs as a lambda, it only logs to the console (i.e CloudWatch), and doesn't output anything indicating that anything is wrong. Possibly because UDP is stateless?
NuGet packages and versions:
Serilog 2.7.1
Serilog.Sinks.Udp 5.0.1
Here is the logging code we're using:
public static void Configure(string udpHost, int udpPort, string environment)
{
var udpFormatter = new JsonFormatter(renderMessage: true);
var loggerConfig = new LoggerConfiguration()
.Enrich.FromLogContext()
.MinimumLevel.Information()
.Enrich.WithProperty("applicationName", Assembly.GetExecutingAssembly().GetName().Name)
.Enrich.WithProperty("applicationVersion", Assembly.GetExecutingAssembly().GetName().Version.ToString())
.Enrich.WithProperty("tags", environment);
loggerConfig
.WriteTo.Console(outputTemplate: "[{Level:u}]: {Message}{N---ewLine}{Exception}")
.WriteTo.Udp(udpHost, udpPort, udpFormatter);
var logger = loggerConfig.CreateLogger();
Serilog.Log.Logger = logger;
Serilog.Debugging.SelfLog.Enable(Console.Error);
}
// this is output in the console from the lambda, but doesn't appear in the Database from the lambda
// when run locally, appears in both
Serilog.Log.Logger.Information("Hello from Serilog!");
...
// at end of lambda
Serilog.Log.CloseAndFlush();
And here is our UDP input on logstash:
udp {
port => 5000
tags => [ 'systest', 'serilog-nested' ]
codec => json
}
Does anyone know how I might go about resolving this? Or even just seeing what specifically is wrong so that I can start to find a solution.
Things tried so far include:
Pinging logstash from the lambda - impossible, lambda doesn't have ICMP
Various things to try and get the UDP sink to output errors, as seen above, various attempts at that. Even putting in a completely fake address yields no error though
Adding the lambda to a VPC where I know logging is possible from
Sleeping around at the end of the lambda. SO that the logs have time to go through before the lambda exits
Checking the logstash logs to see if anything looks odd. It doesn't really. And the fact that local runs get through fine makes me think it's not that.
Using UDP directly. It doesn't seem to reach the server. I'm not sure if that's connectivity issues or just UDP itself from a lambda.
Lots of cursing and swearing
In line with my comment above you can create a log subscription and stream to ES like so, I'm aware that this is NodeJS so it's not quite the right answer but you might be able to figure it out from here:
/* eslint-disable */
// Eslint disabled as this is adapted AWS code.
const zlib = require('zlib')
const { Client } = require('#elastic/elasticsearch')
const elasticsearch = new Client({ ES_CLUSTER_DETAILS })
/**
* This is an example function to stream CloudWatch logs to ElasticSearch.
* #param event
* #param context
* #param callback
*/
export default (event, context, callback) => {
context.callbackWaitsForEmptyEventLoop = true
const payload = new Buffer(event.awslogs.data, 'base64')
zlib.gunzip(payload, (err, result) => {
if (err) {
return callback(null, err)
}
const logObject = JSON.parse(result.toString('utf8'))
const elasticsearchBulkData = transform(logObject)
const params = { body: [] }
params.body.push(elasticsearchBulkData)
esClient.bulk(params, (err, resp) => {
if (err) {
callback(null, 'success')
return
}
})
callback(null, 'success')
})
}
function transform(payload) {
if (payload.messageType === 'CONTROL_MESSAGE') {
return null
}
let bulkRequestBody = ''
payload.logEvents.forEach((logEvent) => {
const timestamp = new Date(1 * logEvent.timestamp)
// index name format: cwl-YYYY.MM.DD
const indexName = [
`cwl-${process.env.NODE_ENV}-${timestamp.getUTCFullYear()}`, // year
(`0${timestamp.getUTCMonth() + 1}`).slice(-2), // month
(`0${timestamp.getUTCDate()}`).slice(-2), // day
].join('.')
const source = buildSource(logEvent.message, logEvent.extractedFields)
source['#id'] = logEvent.id
source['#timestamp'] = new Date(1 * logEvent.timestamp).toISOString()
source['#message'] = logEvent.message
source['#owner'] = payload.owner
source['#log_group'] = payload.logGroup
source['#log_stream'] = payload.logStream
const action = { index: {} }
action.index._index = indexName
action.index._type = 'lambdaLogs'
action.index._id = logEvent.id
bulkRequestBody += `${[
JSON.stringify(action),
JSON.stringify(source),
].join('\n')}\n`
})
return bulkRequestBody
}
function buildSource(message, extractedFields) {
if (extractedFields) {
const source = {}
for (const key in extractedFields) {
if (extractedFields.hasOwnProperty(key) && extractedFields[key]) {
const value = extractedFields[key]
if (isNumeric(value)) {
source[key] = 1 * value
continue
}
const jsonSubString = extractJson(value)
if (jsonSubString !== null) {
source[`$${key}`] = JSON.parse(jsonSubString)
}
source[key] = value
}
}
return source
}
const jsonSubString = extractJson(message)
if (jsonSubString !== null) {
return JSON.parse(jsonSubString)
}
return {}
}
function extractJson(message) {
const jsonStart = message.indexOf('{')
if (jsonStart < 0) return null
const jsonSubString = message.substring(jsonStart)
return isValidJson(jsonSubString) ? jsonSubString : null
}
function isValidJson(message) {
try {
JSON.parse(message)
} catch (e) { return false }
return true
}
function isNumeric(n) {
return !isNaN(parseFloat(n)) && isFinite(n)
}
One of my colleagues helped me get most of the way there, and then I managed to figure out the last bit.
I updated Serilog.Sinks.Udp to 6.0.0
I updated the UDP setup code to use the AddressFamily.InterNetwork specifier, which I don't believe was available in 5.0.1.
I removed enriching our log messages with "tags", since I believe it being present on the UDP endpoint somehow caused some kind of clash and I've seen it stop logging without a trace before.
And voila!
Here's the new logging setup code:
loggerConfig
.WriteTo.Udp(udpHost, udpPort, AddressFamily.InterNetwork, udpFormatter)
.WriteTo.Console(outputTemplate: "[{Level:u}]: {Message}{NewLine}{Exception}");

Monaca Debugger not handling persistent storage correctly

All,
I might have found a bug in Monaca Debugger.
I am using the following code to get a persistent or temporary storage:
window.requestFileSystem(__storageType, _size,
function(_fileSystem) { // Success
logger.info("initFilesystem: _fileSystem.root: " + _fileSystem.root.toURL());
_deferred.resolve(_fileSystem);
}, function(_error) { // Failure
_deferred.reject(_error);
});
Where the files are correctly managed on both Android and iOS.
However, the path is not correct on Android and persistent storage, where I assume that it is a bug in how Monaca Debugger gives paths to the cordova plugin (note that I have not tried yet to deploy to android and verify without the debugger).
See below for the paths I get for Android and iOS, where I only get root directory on Android for persistent (note that on all examples the files are managed correctly, it is only the path for Android and persistent that is not correct).
Is this a bug, or a feature I need to manage in code?
Persistent file paths:
iOS: _fileSystem.root: file:///var/mobile/Containers/Data/Application/43F2E2CE-5F06-43F0-AE1C-4DD35EFF723A/Documents/
Android: _fileSystem.root: file:///storage/emulated/0/
Temporary file paths:
iOS: _fileSystem.root: file:///var/mobile/Containers/Data/Application/43F2E2CE-5F06-43F0-AE1C-4DD35EFF723A/tmp/
Android: _fileSystem.root: file:///storage/emulated/0/Android/data/mobi.monaca.debugger/cache/
Versions:
iOS version: 8.1.2
iOS Monaca Debugger version: 3.1.1
Android version: 4.2.2
Android Monaca Debugger version: 3.1.1
Update 2015-02-04:
The file object used (simplified code):
function FileIO() {
if (false === (this instanceof FileIO)) { return new FileIO(); } // Enforce that every time a constructor function is called, this function is invoked properly using the new operator.
/** #public #method initFilesystem.
* #desc Initialize filesystem
* #param {boolean} Persistent = true, Temporary = false. Must be boolean. Defaults to true.
* #param {size} Size in bytes requested. Must be number. Defaults to 0 bytes.
* #return Promise (see examples above). */
FileIO.prototype.initFilesystem = function(_persistent, _size) {
/** #private */ var _deferred = new jQuery.Deferred(); // Used for object Promise.
/** #private */ var _err = { code: null };
if (__status != STATUS_INITIALIZED) { // Only perform if not already initialized.
if (typeof _persistent !== 'boolean') { _persistent = true; } // Value must be boolean. Default to true if not set correctly.
if (typeof _size !== 'number') { _size = 0; } // Value must be number. Default to 0 bytes if not set correctly.
(_persistent == true) ? __storageType = window.PERSISTENT : __storageType = window.TEMPORARY;
__status = STATUS_INITIALIZING;
try {
window.requestFileSystem = window.requestFileSystem || window.webkitRequestFileSystem;
window.storageInfo = window.storageInfo || window.webkitStorageInfo;
window.requestFileSystem(__storageType, _size,
function(_fileSystem) { // Success
__fileSystem = _fileSystem;
__status = STATUS_INITIALIZED;
_deferred.resolve(_fileSystem);
}, function(_error) { // Failure
__status = STATUS_NO_FILESYSTEM;
_deferred.reject(_error);
});
} catch (_error) {
__status = STATUS_NO_FILESYSTEM;
_deferred.reject(_error);
}
} else {
_err.code = FILESYSTEM_ALREADY_INITIALIZED;
_deferred.reject(_err);
}
return _deferred.promise();
};
/** #public #method createFile.
* #desc Create file.
* #param {string} (path/)filename. Must be string. Defaults to "file.txt".
* #return Promise (see examples above). */
FileIO.prototype.createFile = function(_fileName) {
/** #private */ var _err = { code: null };
/** #private */ var _deferred = new jQuery.Deferred(); // Used for object Promise.
if (typeof _fileName !== 'string') { _fileName = "file.txt"; } // Default to "file.txt" if not correct.
if (__status != STATUS_NO_FILESYSTEM) { // Object must be initialized.
try {
__fileSystem.root.getFile(_fileName, { create: true, exclusive: true },
function(_fileEntry) { // Success
_deferred.resolve(_fileEntry);
}, function(_error) { // Failure
_deferred.reject(_error);
});
} catch (_error) {
_deferred.reject(_error);
}
} else {
_err.code = FILESYSTEM_NOT_INITIALIZED;
_deferred.reject(_err);
}
return _deferred.promise();
};
/** #public #constructor init.
* #desc Constructor for the class.
* #param N/A.
* #return N/A. */
FileIO.prototype.init = function() {
};
// Construct
this.init();
}
Initializion:
var appFileSystem = new FileIO();
appFileSystem.initFilesystem(true); // Persistent.
appFileSystem.createFile("testfile.txt").done(function(fileEntry) { console.log("createFile: done. fileEntry: "+JSON.stringify(fileEntry)); }).fail(function(error) { console.log("createFile: fail. error.code: "+error.code); });
With the above, the FileIO object is created, filesystem initialized and file created, where the persistent storage is placed in root (tested and verified for Cordova 4 according to latest Monaca update), while non-persistent is placed in right directory.

Resources