Trying to open an app in the Play Store (android) - nativescript

I'm trying to direct the user to the play store.
import app = require("application");
var intent = new android.content.Intent("android.intent.action.VIEW" );
intent.setData( "market://details?id=MY.APP.LINK" );
app.android.currentContext.startActivity(intent);
The above isn't working!
I think the problem has to do with my translation from Java code to something {N} can use.

#Brad Martin gave the answer in comments but I'm adding this for readability, as I needed this snippet. Didn't try the iOS part yet.
Here is the code I used, with TypeScript.
First, add those lines in import
import * as application from "application";
import * as Utility from "utils/utils";
declare var android: any;
Then, you can use a function like this one for Android
gotoPlayStore() {
let androidPackageName = application.android.packageName;
let uri = android.net.Uri.parse("market://details?id=" + androidPackageName);
let myAppLinkToMarket = new android.content.Intent(android.content.Intent.ACTION_VIEW, uri);
// Launch the PlayStore
application.android.foregroundActivity.startActivity(myAppLinkToMarket);
}
You can use this code for iOS devices
gotoAppStore() {
let appStore = "";
appStore = "itms-apps://itunes.apple.com/en/app/id" + myAppId;
Utility.openUrl(appStore);
}
This code is based on the plugin nativescript-rating from Nic Raboy and this particular file : https://github.com/nraboy/nativescript-ratings/blob/master/ratings.ts
You may get this error on Android : ERROR Error: android.content.ActivityNotFoundException: No Activity found to handle Intent { act=android.intent.action.VIEW dat=market://details?id=your.package.id } , but it's working on real device, as Play Store application is running on it.

Here's what worked for me:
try
{
let uri = android.net.Uri.parse( "market://details?id=com.company" );
var intent = new android.content.Intent( android.content.Intent.ACTION_VIEW, uri );
application.android.foregroundActivity.startActivity( intent );
}
catch ( e2 )
{
console.error( "Error =" + e2.message + "=" );
}

Related

How to add custom Java script code to ElectronHostHook in electronnet asp.net mvc

I am trying to add this code snippet to the wed apis demo project but I tried and failed and there isnt much documentation on how to do it.
var os = require("os");
var bytesAvailable = os.totalmem(); // returns number in bytes
// 1 mb = 1048576 bytes
console.log("Total memory available MB :" + (bytesAvailable/1048576) );
it needs to have a type script file and a javascript file according to the implamentation with the create excel.js demo but im not sure how to go about that process.
FYI everyone looking at this, the developer made a decent tutorial for this but lets just go with im the type of developer who is kinda dumb but competent.
So Basically your gonna want to create a type script file using the index.ts file as a template
once you have a type script file place your custom JS in the onHostRead() part of the script
build it
this will create the js file and make it look similar to the other example files.
create a controller for your custom js like hosthook.cs, this is called the mainfunction in the api demo
add front facing logic to your software. ....so still testing idk If i got it right just yet
This did not work in visual studio code , I used visual studio 2022
dont install the type script nuget package visual studio recommends , its not in the documentation, will break build.
sometimes the people capable are too busy to help so dive deep in the code and get good (talking to myself here)
ipController.cs
using ElectronNET.API;
using ElectronNET.API.Entities;
using Microsoft.AspNetCore.Mvc;
using System.Linq;
namespace ElectronNET_API_Demos.Controllers
{
public class IPController : Controller
{
public IActionResult Index()
{
if (HybridSupport.IsElectronActive)
{
Electron.IpcMain.On("start-hoosthook", async (args) =>
{
var mainWindow = Electron.WindowManager.BrowserWindows.First();
var options = new OpenDialogOptions
{
Properties = new OpenDialogProperty[]
{
OpenDialogProperty.openDirectory
}
};
var folderPath = await Electron.Dialog.ShowOpenDialogAsync(mainWindow, options);
var resultFromTypeScript = await Electron.HostHook.CallAsync<string>("get-ip-address", folderPath);
Electron.IpcMain.Send(mainWindow, "ip-address-found", resultFromTypeScript);
});
}
return View();
}
}
}
ipAddress.ts
// #ts-ignore
import * as Electron from "electron";
import { Connector } from "./connector";
import { IPAddress } from "./ipAddress";
export class HookService extends Connector {
constructor(socket: SocketIO.Socket, public app: Electron.App) {
super(socket, app);
}
onHostReady(): void {
// execute your own JavaScript Host logic here
var os = require("os");
var result = console.log(os.networkInterfaces);
return result;
}
}
ipAddress.js
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.HookService = void 0;
const connector_1 = require("./connector");
class HookService extends connector_1.Connector {
constructor(socket, app) {
super(socket, app);
this.app = app;
}
onHostReady() {
// execute your own JavaScript Host logic here
var os = require("os");
var result = console.log(os.networkInterfaces);
return result;
}
}
exports.HookService = HookService;
//# sourceMappingURL=ipAddress.js.map

AWS Chime SDK js does not recognize video and audio elements

I am attempting to get the basic tutorial for the AWS Chime SDK to work in our application and the meetingSession.audioVideo.listVideoInputDevices() always returns nothing/null.
I am running this on lastest chrome, my operating system is a windows 10 workspace instance. I have headphones plugged in; but that shouldn't make a difference.
My expected result is to return at least one device for the video. Here is the output from the Logger.
2020-08-26T15:29:19.127Z [INFO] MyLogger - attempting to trigger media device labels since they are hidden
chime-bundle.js:1 2020-08-26T15:29:19.133Z [INFO] MyLogger - unable to get media device labels
chime-bundle.js:1 2020-08-26T15:29:19.134Z [INFO] MyLogger - API/DefaultDeviceController/listVideoInputDevices null -> []
chime-bundle.js:1 Uncaught (in promise) TypeError: Cannot read property 'deviceId' of undefined
*Note. The video and audio elements are not hidden.
I have tried the code snippits from various demos. Which are all just a copy of AWS's walkthrough. So pretty much zero information. I have researched how the audio devices work in html5 and looking through the files provided in the sdk-js, I am even more confused. Can someone point me in the right direction?
Here is the basic code, you can get it, and a description from the link above.
var fetchResult = await window.fetch(
window.encodeURI("<our endpoint for backend (running c# instead of node)>",
{
method: 'POST'
}
);
let result = await fetchResult.json();
console.log("Result from Chime API:", result);
const logger = new ConsoleLogger('MyLogger', LogLevel.INFO);
const deviceController = new DefaultDeviceController(logger);
const meetingResponse = result.JoinInfo.Meeting;
const attendeeResponse = result.JoinInfo.Attendee;
const configuration = new MeetingSessionConfiguration(meetingResponse, attendeeResponse);
// In the usage examples below, you will use this meetingSession object.
const meetingSession = new DefaultMeetingSession(
configuration,
logger,
deviceController
);
console.log("MEETING SESSION", meetingSession);
//SETUP AUDIO
const audioElement = document.getElementById('notary-audio');
meetingSession.audioVideo.bindAudioElement(audioElement);
const videoElement = document.getElementById('notary-video');
// Make sure you have chosen your camera. In this use case, you will choose the first device.
const videoInputDevices = await meetingSession.audioVideo.listVideoInputDevices();
// The camera LED light will turn on indicating that it is now capturing.
// See the "Device" section for details.
await meetingSession.audioVideo.chooseVideoInputDevice(videoInputDevices[0].deviceId);
const observer = {
audioVideoDidStart: () => {
console.log('Started');
},
audioVideoDidStop: sessionStatus => {
// See the "Stopping a session" section for details.
console.log('Stopped with a session status code: ', sessionStatus.statusCode());
},
audioVideoDidStartConnecting: reconnecting => {
if (reconnecting) {
// e.g. the WiFi connection is dropped.
console.log('Attempting to reconnect');
}
},
// videoTileDidUpdate is called whenever a new tile is created or tileState changes.
videoTileDidUpdate: tileState => {
// Ignore a tile without attendee ID and other attendee's tile.
if (!tileState.boundAttendeeId || !tileState.localTile) {
return;
}
// videoTileDidUpdate is also invoked when you call startLocalVideoTile or tileState changes.
console.log(`If you called stopLocalVideoTile, ${tileState.active} is false.`);
meetingSession.audioVideo.bindVideoElement(tileState.tileId, videoElement);
localTileId = tileState.tileId;
},
videoTileWasRemoved: tileId => {
if (localTileId === tileId) {
console.log(`You called removeLocalVideoTile. videoElement can be bound to another tile.`);
localTileId = null;
}
}
};
meetingSession.audioVideo.addObserver(observer);
meetingSession.audioVideo.start();

Issue with FCM Push Notification Android

I was able to receive push notifications some months ago, a day ago i started to work again on the app now the issue is it's not able to receive push notification. It does provide FCM token but onMessageReceived never gets called also if i try with Postman it gives an error of Mismatchsender ID, but the scenario here is a bit confusing. If i change the package name (after creating new project on console and added new goole-service.json file) it doesn't let me register for FCM token. i've stuck in this situation from last day. can anybody please help? what i'm doing wrong.
Here is implementaion of FCMToken
[Service]
[IntentFilter(new[] { "com.google.firebase.INSTANCE_ID_EVENT" })]
public class MyFirebaseIIDService : FirebaseInstanceIdService
{
const string TAG = "MyFirebaseIIDService";
public override void OnTokenRefresh()
{
var refreshedToken = FirebaseInstanceId.Instance.Token;
Log.Debug(TAG, "Refreshed token: " + refreshedToken);
SendRegistrationToServer(refreshedToken);
}
void SendRegistrationToServer(string token)
{
// Add custom implementation, as needed.
}
}
Here it gives me error if i change my package name to any other,
Error: Java.Lang.IllegalStateException: Default FirebaseApp is not
initialized in this process
try
{
var refreshedToken = FirebaseInstanceId.Instance.Token;
// PushNotificationManager.Initialize(this, false);
} catch(Exception ee)
{
}
I've solved my issue with with customization of FirebaseInitialize after creating new project on Firebase here is my code. But one bad thing is here that when new token gets initialized it never gets called on FirebaseInstanceIdReceiver.
var options = new FirebaseOptions.Builder()
.SetApplicationId("<AppID>")
.SetApiKey("<ApiKey>")
.SetDatabaseUrl("<DBURl>")
.SetStorageBucket("<StorageBucket>")
.SetGcmSenderId("<SenderID>").Build();
var fapp = FirebaseApp.InitializeApp(this, options);

Get network operator name in Appcelerator Titanium

I am looking to get the name of the operator for the user's Android device.
E.g. "Verizon" or "Vodafone", I think I have found the Android equivalent documented here called getSimOperatorName() from http://developer.android.com/reference/android/telephony/TelephonyManager.html#getNetworkOperatorName()
I am scanning over the documentation for Appcelerator Titanium, but can't seem to find a way of doing this in the docs (http://docs.appcelerator.com/platform/latest/#!/api/Titanium.Network).
Is this possible in Appcelerator Titanium?
You can use tinetworkinfo Module
Ex:-
var netInfo = require('com.clever_apps.tinetworkinfo');
var win = Ti.UI.createWindow({exitOnClose: true});
var testLabel = Ti.UI.createLabel({
height:"80%",
width:"90%",
top:0
});
var refreshButton = Ti.UI.createButton({
title:"Refresh Data",
height:"15%",
bottom:"5%"
});
refreshButton.addEventListener("click", getTelephonyData);
win.add(testLabel);
win.add(refreshButton);
getTelephonyData();
win.open();
function getTelephonyData(){
var imei = netInfo.getIMEI();
var cellid = netInfo.getCellID();
var lac = netInfo.getLac();
var mnc = netInfo.getMNC();
var mmc = netInfo.getMMC();
var outString = "IMEI: "+imei+"\nCell ID: "+cellid+"\nLAC: "+lac+"\nMNC: "+mnc+"\nMMC: "+mmc;
testLabel.text = outString;
}
Currently there is no API that will return you that information. For that, you need to create your own Android module.
I could not get tinetworkinfo Module working. However, a module named TelephonyManager worked fine.
I ran this in the terminal for the project:
gittio install com.goyya.telephonymanager
Then this code to get the network operator name:
var telephonymanager = require("com.goyya.telephonymanager");
Ti.API.log('networkOperatorName: ' + telephonymanager.networkOperatorName);

Swift : Share text through Twitter

So basically I am making an event app. Everything has been going smoothly but there's just sharing the event to twitter.
I have searched the internet but all I am getting is using the native app of twitter which I don't want. I want to use the browser to tweet.
I have implemented this method for FB sharing.
Any idea would help me a lot.
let content = FBSDKShareLinkContent()
content.contentURL=NSURL(string: "http://facebook.com")
content.imageURL = NSURL(string: "http://facebook.com")
content.contentTitle = "Shou 3emlin test app "
content.contentDescription = "testing testing testing"
let shareDialog = FBSDKShareDialog()
shareDialog.fromViewController = self
shareDialog.mode=FBSDKShareDialogMode.Browser
shareDialog.shareContent = content
if !shareDialog.canShow() {
shareDialog.mode=FBSDKShareDialogMode.Native
shareDialog.shareContent = content
}
if shareDialog.canShow() {
shareDialog.show()
}
Put this in an action method of a button or in the method where you want to use the browser to tweet your text Swift 3.0:
let tweetText = "your text"
let tweetUrl = "http://stackoverflow.com/"
let shareString = "https://twitter.com/intent/tweet?text=\(tweetText)&url=\(tweetUrl)"
// encode a space to %20 for example
let escapedShareString = shareString.addingPercentEncoding(withAllowedCharacters: CharacterSet.urlQueryAllowed)!
// cast to an url
let url = URL(string: escapedShareString)
// open in safari
UIApplication.shared.openURL(url!)
Result:
Take a look at Fabric.io. This SDK allows you to compose tweets directly from your app.
let composer = TWTRComposer()
composer.setText("just setting up my Fabric")
composer.setImage(UIImage(named: "fabric"))
// Called from a UIViewController
composer.showFromViewController(self) { result in
if (result == TWTRComposerResult.Cancelled) {
print("Tweet composition cancelled")
}
else {
print("Sending tweet!")
}
}
let tweetText = "hy"
let tweetUrl = "http://rimmi/"
let shareString = "https://twitter.com/intent/tweet?text=\(tweetText)&url=\(tweetUrl)"
// encode a space to %20 for example
let escapedShareString = shareString.addingPercentEncoding(withAllowedCharacters: CharacterSet.urlQueryAllowed)!
// cast to an url
let url = URL(string: escapedShareString)
// open in safari
UIApplication.shared.openURL(url!)
#ronatory's solution worked like charm. It also opens a Twitter application if it's already installed on the user's device.
For swift 5+ use UIApplication.shared.open(url!) instead of UIApplication.shared.openURL(url!) as it's deprecated.

Resources