I am working on outlook addin and have to call another funcion after collecting all to, cc, bcc
I want to process all information with to1, cc1, bcc1 but
After the toRecipients.getAsync() is called, the control returns to process function and to1 is empty.
How do I handle this ?
Also within getAllRecipients(), after the getAsync() is called, variables 'to1' / 'cc1' / 'bcc1' outside its scope (getAsync) are empty.
Any better way to handle both the above cases ?
Please see the below function and callers
function getAllRecipients(event) {
var to1 = [];
var cc1 = [];
var bcc1 = [];
// Local objects to point to recipients of either
// the appointment or message that is being composed.
// bccRecipients applies to only messages, not appointments.
var toRecipients, ccRecipients, bccRecipients;
item = Office.context.mailbox.item;
// Verify if the composed item is an appointment or message.
if (item.itemType == Office.MailboxEnums.ItemType.Appointment) {
toRecipients = item.requiredAttendees;
ccRecipients = item.optionalAttendees;
} else {
toRecipients = item.to;
ccRecipients = item.cc;
bccRecipients = item.bcc;
}
// Use asynchronous method getAsync to get each type of recipients
// of the composed item. Each time, this example passes an anonymous
// callback function that doesn't take any parameters.
if (toRecipients) {
toRecipients.getAsync(function (asyncResult) {
if (asyncResult.status == Office.AsyncResultStatus.Failed){
console.log(asyncResult.error.message);
}
else {
// Async call to get to-recipients of the item completed.
// Display the email addresses of the to-recipients.
console.log('To-recipients of the item:');
for (var i = 0; i < asyncResult.value.length; i++) {
var mailID = asyncResult.value[i].emailAddress;
to1.push(mailID);
}
}
}); // End getAsync for to-recipients.
}
// Get any cc-recipients.
if (ccRecipients) {
ccRecipients.getAsync(function(asyncResult) {
if (asyncResult.status == Office.AsyncResultStatus.Failed) {
console.log(asyncResult.error.message);
} else {
// Async call to get cc-recipients of the item completed.
// Display the email addresses of the cc-recipients.
console.log("Cc-recipients of the item:");
for (var i = 0; i < asyncResult.value.length; i++) {
var mailID = asyncResult.value[i].emailAddress;
cc1.push(mailID);
}
}
}); // End getAsync for cc-recipients.
}
// If the item has the bcc field, i.e., item is message,
// get any bcc-recipients.
if (bccRecipients) {
bccRecipients.getAsync(function(asyncResult) {
if (asyncResult.status == Office.AsyncResultStatus.Failed) {
console.log(asyncResult.error.message);
} else {
// Async call to get bcc-recipients of the item completed.
// Display the email addresses of the bcc-recipients.
console.log("Bcc-recipients of the item:");
for (var i = 0; i < asyncResult.value.length; i++) {
var mailID = asyncResult.value[i].emailAddress;
bcc1.push(mailID);
}
}
}); // End getAsync for bcc-recipients.
}
return to1;
}
getAllRecipients(event);
process(to1);
The recipient APIs for compose are asynchronous, so you will need to await the resolved value from the API.
There is some documentation about asynchronous programming with Office add-ins and some common patterns with asynchronous functions here.
Related
I created a script that tracks attendance for distance learning. After a while it times out so I think I am having issues with too many calls to the Google Classroom API, however I don't see a way that I can change it to take those calls out of a loop.
The script takes all the Google Classroom classes that my apps script account is a co-teacher on and using timed triggers creates a daily attendance assignment with one question that says 'here'. Students are then supposed to answer the question and then another trigger at night runs the function to 'grade' each assignment and populate my spreadsheet so school secretaries can view it in the morning and record the previous days attendance.
The part that seems to have the bottleneck is my getStudentResponses() function. I tried to reduce time by filtering out students that didn't submit the assignment, but it wasn't enough. Does anyone see any way that I can make this faster? I was reading up on using the Cache Service, but I couldn't figure out how to get that to work. Any help would be appreciated.
var ss = SpreadsheetApp.getActive();
var date = new Date();
/*
creates a button to programmatically create all necessary timed triggers for easy deployment
*/
function onOpen() {
var ui = SpreadsheetApp.getUi();
ui.createMenu('Attendance')
.addItem('Create Triggers', 'createTriggers')
.addToUi();
}
/*
auto accepts any co-teacher invites
*/
function acceptInvite() {
try{
var optionalArgs = {
userId: "me"
};
var invites = Classroom.Invitations.list(optionalArgs);
for(var i = 0; i < invites.invitations.length; i++) {
Classroom.Invitations.accept(invites.invitations[i].id);
}
}
catch(e){}
}
/*
populates a spreadsheet with all the classes that the script Google account is a co-teacher of
the sheet has two columns one with the course name and two with the course id
*/
function listCourses() {
var optionalArgs = {courseStates: "ACTIVE"};
var response = Classroom.Courses.list(optionalArgs);
var courses = response.courses;
var classSheet;
try{
classSheet = ss.insertSheet("Classes", 0);
ss.insertSheet("Assignments", 1);
}
catch(e) {
classSheet = ss.getSheetByName("Classes");
}
classSheet.clear();
if (courses && courses.length > 0) {
for (i = 0; i < courses.length; i++) {
var course = courses[i];
classSheet.appendRow([course.name, course.id]);
}
}
}
/*
reads the sheet to get all the classes and creates a new array with all the class IDs
*/
function getCourses() {
var classSheet = ss.getSheetByName("Classes");
var classList = new Array();
var range = classSheet.getDataRange();
var values = range.getValues();
for(var i in values) {
var row = values[i];
var courseId = row[1]+"";
classList.push(Classroom.Courses.get(courseId));
}
createTopics(classList);
}
/*
called immediatly after getCourses, creates topics in each class that will contain the daily attendance assignment
*/
function createTopics(classList) {
for(i = 0; i < classList.length; i++) {
var topic;
var resource = {name: "Daily Online Attendance"};
try {
topic = Classroom.Courses.Topics.create(resource, classList[i].id);
createAssignment(topic,classList[i]);
}
catch(e) {
if(e == "GoogleJsonResponseException: API call to classroom.courses.topics.create failed with error: Requested entity already exists") {
var topics = Classroom.Courses.Topics.list(classList[i].id);
for(j = 0; j < topics.topic.length; j++) {
if(topics.topic[j].name == "Daily Online Attendance") {
createAssignment(topics.topic[j], classList[i]);
}
}
}
}
}
}
/*
creates an assignment in each class, under each topic
each assignment only has one choice that says "here" and is going to be 'graded' each night to track attendance
*/
function createAssignment(topic,course) {
var resource = {
title: "Attendance for "+(date.getMonth()+1)+"/"+date.getDate()+"/2020",
description: "Please fill this assignment out each day for attendance",
topicId: topic.topicId,
state: "PUBLISHED",
workType: "MULTIPLE_CHOICE_QUESTION",
multipleChoiceQuestion: {
"choices": [
"Here"
]
}
};
try {
var assignment = Classroom.Courses.CourseWork.create(resource, course.id);
var sheet = ss.getSheetByName("Assignments");
sheet.appendRow([course.id,assignment.id]);
}
catch(e){}
}
/*
creates a new sheet for each day and logs each assignement
*/
function getStudentResponses() {
var assignmentSheet = ss.getSheetByName("Assignments");
var sheet2;
var response;
assignmentSheet.sort(1, true);
try{
sheet2 = ss.insertSheet("Attendance for "+(date.getMonth()+1)+"/"+date.getDate()+"/2020",(ss.getSheets().length-(ss.getSheets().length-2)));
sheet2.appendRow(["Student Last Name","Student First Name","Grade","Class Name","Assignment Answer"]);
}
catch(e) {
sheet2 = ss.getSheetByName("Attendance for "+(date.getMonth()+1)+"/"+date.getDate()+"/2020");
}
sheet2.setFrozenRows(1);
var range = assignmentSheet.getDataRange();
var values = range.getValues();
for(var i in values) {
var row = values[i];
var courseId = row[0]+"";
var courseWorkId = row[1]+"";
try {
response = Classroom.Courses.CourseWork.StudentSubmissions.list(courseId, courseWorkId);
for(var j in response.studentSubmissions) {
if(response.studentSubmissions[j].state == "TURNED_IN") {
try {
var grade;
var email = Classroom.UserProfiles.get(response.studentSubmissions[j].userId).emailAddress;
sheet2.appendRow([Classroom.UserProfiles.get(response.studentSubmissions[j].userId).name.familyName,Classroom.UserProfiles.get(response.studentSubmissions[j].userId).name.givenName,grade,Classroom.Courses.get(courseId).name,response.studentSubmissions[j].multipleChoiceSubmission.answer]);
}
catch (e) {}
}
}
}
catch(e) {}
}
}
/*
deletes all assignemnts that were created
*/
function deleteAssignments() {
var assignmentSheet = ss.getSheetByName("Assignments");
assignmentSheet.sort(1, true);
var range = assignmentSheet.getDataRange();
var values = range.getValues();
for(var i in values) {
var row = values[i];
var courseId = row[0]+"";
var courseWorkId = row[1]+"";
try {
Classroom.Courses.CourseWork.remove(courseId, courseWorkId);
}
catch(e) {}
assignmentSheet.clear();
}
}
function createTriggers() {
ScriptApp.newTrigger('getCourses')
.timeBased()
.everyDays(1)
.atHour(6)
.create();
ScriptApp.newTrigger('getStudentResponses')
.timeBased()
.everyDays(1)
.atHour(22)
.create();
ScriptApp.newTrigger('deleteAssignments')
.timeBased()
.everyDays(1)
.atHour(23)
.create();
ScriptApp.newTrigger('listCourses')
.timeBased()
.everyDays(1)
.atHour(21)
.create();
ScriptApp.newTrigger('acceptInvite')
.timeBased()
.everyDays(1)
.atHour(20)
.create();
}
appendRow is slow, you should avoid to used it inside a for loop. Instead build an array, then pass the values using a single setValues call.
Resources
Best Practices | Apps Script
Related
Google Script Performance Slow Down
Increase my script performance Google Sheets Script
Very slow execution of for...in loop
I've created a method to add members in a Batch Request to a google group using .NET core and google's .NET client library. The code looks like this:
private void InitializeGSuiteDirectoryService()
{
_directoryServiceCredential = GoogleCredential
.FromJson(GlobalSettings.Instance.GSuiteSettings.Credentials)
.CreateScoped(_scopes)
.CreateWithUser(GlobalSettings.Instance.GSuiteSettings.User);
_directoryService = new DirectoryService(new BaseClientService.Initializer()
{
HttpClientInitializer = _directoryServiceCredential,
ApplicationName = _applicationName
});
}
public OperationResult<int> AddGroupMembers(Group group, IEnumerable<Member> members)
{
var result = new OperationResult<int>();
var memberList = members.ToList();
var batchRequestCount = 0;
if (memberList.Any())
{
var request = new BatchRequest(_directoryService);
foreach (var member in memberList)
{
batchRequestCount++;
request.Queue<Members>(_directoryService.Members.Insert(member, group.Id), (content, error, i, message) =>
{
if (message.IsSuccessStatusCode)
{
//log OK
}
else
{
// Implement Exponential backoff only on the request that failed.
}
});
if (batchRequestCount == 30|| member.Equals(memberList.Last()))
{
request.ExecuteAsync().Wait();
request = new BatchRequest(_directoryService); //Clear queue
}
}
}
return result;
}
The logic works fine if the amount of members is small; however, when the members count is let's say 100( this is the max amount of users in my google's test instance), I get an Error from Google that reads: "quotaExceeded". According to Google's documentation, the limit for a batch request on their Admin SDK is 1000 and I've set my logic to Execute when we reach a limit of 30.
The QUESTION is: How do I implement error handling to retry whenever I get this error? Their documentation suggests implementing 'Exponential Backoff' with a response that contains a 'retry-able error'(I don't see this when I inspect my response).
So here's what I ended up doing to implement Exponential Backoff on my call to add members to a Gsuite group. Since I'm using dotnet core, I was able to use 'Polly', which is a resilience and transient-fault-handling library that offers this functionality out of the box. There may be some need for refactoring, but here's what the code looks like for now:
public OperationResult<int> AddGroupMembers(Group group, IEnumerable<Member> members)
{
var result = new OperationResult<int>();
var memberList = members.ToList();
var batchRequestCount = 0;
if (memberList.Any())
{
var request = new BatchRequest(_directoryService);
foreach (var member in memberList)
{
retryRequest = false; // This variable needs to be declared at the class level to guarantee the value is available to the original thread running the process.
batchRequestCount++;
request.Queue<Members>(_directoryService.Members.Insert(member, group.Id), (content, error, i, message) =>
{
// If error code is 'quotaExceeded' retry the request ( You can add as many error codes as you'd like to retry here)
if (error.Code == 403)
{
retryRequest = true;
}
});
// Execute batch request to add members in batches of 30 member max
if (batchRequestCount == 30|| member.Equals(memberList.Last()))
{
// Below is what the code to retry using polly looks like
var response = Policy
.HandleResult<HttpResponseMessage>(message => message.StatusCode == HttpStatusCode.Conflict)
.WaitAndRetry(new[]
{
TimeSpan.FromSeconds(1),
TimeSpan.FromSeconds(2),
TimeSpan.FromSeconds(4)
}, (results, timeSpan, retryCount, context) =>
{
// Log Warn saying a retry was required.
})
.Execute(() =>
{
var httpResponseMsg = new HttpResponseMessage();
// Execute batch request Synchronously
request.ExecuteAsync().Wait();
if (retryRequest)
{
httpResponseMsg.StatusCode = HttpStatusCode.Conflict;
retryRequest = false;
}
else
{
httpResponseMsg.StatusCode = HttpStatusCode.OK;
}
return httpResponseMsg;
});
if (response.IsSuccessStatusCode)
{
// Log info
}
else
{
// Log warn
}
requestCount = 0;
request = new BatchRequest(_directoryService);
batchCompletedCount++;
}
}
}
return result;
}
Using a HTTP GET request, how would you only get the classes that are active. Could you add a parameter to the Google API URL that only returns a list of active classes? Or do you have to search through the returned array and delete any classes are archived using a for loop?
var classroom = new XMLHttpRequest();
var accessToken = localStorage.getItem('accessToken');
classroom.open('GET',
'https://classroom.googleapis.com/v1/courses');
classroom.setRequestHeader('Authorization',
'Bearer ' + accessToken);
classroom.send();
classroom.onload = function () {
if (classroom.readyState === classroom.DONE) {
if (classroom.status === 200) {
var response = JSON.parse(classroom.response);
vm.classes = response.courses;
console.log(response);
for (var i = 0; i < response.courses.length; i++){
var courses = response.courses[i];
console.log(courses.name);
}
} else {
console.log("Error Unknown");
}
}
};
Any help would be much appreciated.
Thanks!
There's no filter option yet like with User objects. (That's documented for at least as far as I can tell). So yes you'll have to pull all of the courses and then just filter out the archived courses. https://developers.google.com/classroom/reference/rest/v1/courses there's a CourseState section that lists the 5 possible states a course can be in. [COURSE_STATE_UNSPECIFIED, ACTIVE, ARCHIVED, PROVISIONED, DECLINED]
Reading through the docs, courses.list returns a list of courses that the requesting user is permitted to view. It does not state a direct way of retrieving active classes only. You may have to resort to your said implementation.
Try this:
function get_courses(student) {
var optionalArgs = {
studentId: student
};
var response = Classroom.Courses.list(optionalArgs);
var courses = response.courses;
var active_courses = [];
if (courses && courses.length > 0) {
for (i = 0; i < courses.length; i++) {
var course = courses[i];
if(course.courseState == "ACTIVE"){
active_courses.push(course);
Logger.log('%s (%s)', course.name, course.id);
}
}
} else {
Logger.log('No courses found.');
}
return active_courses;
}
I do not know what I am talking about here I go.
On some pages it filters them and others like Youtube comments don't work.
What code needs to change in order for it to work in these sites?
// ==UserScript==
// #name profanity_filter
// #namespace localhost
// #description Profanity filter
// #include *
// #version 1
// #grant none
// ==/UserScript==
function recursiveFindTextNodes(ele) {
var result = [];
result = findTextNodes(ele,result);
return result;
}
function findTextNodes(current,result) {
for(var i = 0; i < current.childNodes.length; i++) {
var child = current.childNodes[i];
if(child.nodeType == 3) {
result.push(child);
}
else {
result = findTextNodes(child,result);
}
}
return result;
}
var l = recursiveFindTextNodes(document.body);
for(var i = 0; i < l.length; i++) {
var t = l[i].nodeValue;
t = t.replace(/badword1|badword2|badword3/gi, "****");
t = t.replace(/badword4/gi, "******");
t = t.replace(/badword5|badword6|badword7/gi, "*****");
t = t.replace(/badword8/gi, "******");
l[i].nodeValue = t;
}
* Replaced profanity in code to badword
Youtube comments are loaded asynchronously, quite a long time after the page has loaded (userscripts by default are executed at DOMContentLoaded event), so you need to wrap your code as a callback function of waitForKeyElements with a selector for the comments container or MutationObserver or setInterval.
replaceNodes(); // process the page
waitForKeyElements('.comment-text-content', replaceNodes);
function replaceNodes() {
..............
..............
}
Using setInterval instead of waitForKeyElements:
replaceNodes(); // process the page
var interval = setInterval(function() {
if (document.querySelector('.comment-text-content')) {
clearInterval(interval);
replaceNodes();
}
}, 100);
function replaceNodes() {
..............
..............
}
P.S. Don't blindly assign the value to the node, check first if it has changed to avoid layout recalculations:
if (l[i].nodeValue != t) {
l[i].nodeValue = t;
}
Hey i have been able to write an nsIStreamListener listener to listen on responses and get the response text following tutorials at nsitraceablechannel-intercept-http-traffic .But i am unable to modify the response sent to browser.Actually if i return the reponse and sent back to chain it reflects in firebug but not in browser.
What i am guessing is we will have to replace default listener rather than listening in the chain.I cant get any docs anywhere which explains how to do this.
Could anyone give me some insight into this.This is mainly for education purposes.
Thanks in advance
Edit : As of now i have arrived at a little solutions i am able to do this
var old;
function TracingListener() {}
TracingListener.prototype = {
originalListener: null,
receivedData: null, //will be an array for incoming data.
//For the listener this is step 1.
onStartRequest: function (request, context) {
this.receivedData = []; //initialize the array
//Pass on the onStartRequest call to the next listener in the chain -- VERY IMPORTANT
//old.onStartRequest(request, context);
},
//This is step 2. This gets called every time additional data is available
onDataAvailable: function (request, context, inputStream, offset, count) {
var binaryInputStream = CCIN("#mozilla.org/binaryinputstream;1",
"nsIBinaryInputStream");
binaryInputStream.setInputStream(inputStream);
var storageStream = CCIN("#mozilla.org/storagestream;1",
"nsIStorageStream");
//8192 is the segment size in bytes, count is the maximum size of the stream in bytes
storageStream.init(8192, count, null);
var binaryOutputStream = CCIN("#mozilla.org/binaryoutputstream;1",
"nsIBinaryOutputStream");
binaryOutputStream.setOutputStream(storageStream.getOutputStream(0));
// Copy received data as they come.
var data = binaryInputStream.readBytes(count);
this.receivedData.push(data);
binaryOutputStream.writeBytes(data, count);
//Pass it on down the chain
//old.onDataAvailable(request, context,storageStream.newInputStream(0), offset, count);
},
onStopRequest: function (request, context, statusCode) {
try {
//QueryInterface into HttpChannel to access originalURI and requestMethod properties
request.QueryInterface(Ci.nsIHttpChannel);
//Combine the response into a single string
var responseSource = this.receivedData.join('');
//edit data as needed
responseSource = "test";
console.log(responseSource);
} catch (e) {
//standard function to dump a formatted version of the error to console
dumpError(e);
}
var stream = Cc["#mozilla.org/io/string-input-stream;1"]
.createInstance(Ci.nsIStringInputStream);
stream.setData(responseSource, -1);
//Pass it to the original listener
//old.originalListener=null;
old.onStartRequest(channel, context);
old.onDataAvailable(channel, context, stream, 0, stream.available());
old.onStopRequest(channel, context, statusCode);
},
QueryInterface: function (aIID) {
if (aIID.equals(Ci.nsIStreamListener) ||
aIID.equals(Ci.nsISupports)) {
return this;
}
throw components.results.NS_NOINTERFACE;
},
readPostTextFromRequest: function (request, context) {
try {
var is = request.QueryInterface(Ci.nsIUploadChannel).uploadStream;
if (is) {
var ss = is.QueryInterface(Ci.nsISeekableStream);
var prevOffset;
if (ss) {
prevOffset = ss.tell();
ss.seek(Ci.nsISeekableStream.NS_SEEK_SET, 0);
}
// Read data from the stream..
var charset = "UTF-8";
var text = this.readFromStream(is, charset, true);
if (ss && prevOffset == 0)
ss.seek(Ci.nsISeekableStream.NS_SEEK_SET, 0);
return text;
} else {
dump("Failed to Query Interface for upload stream.\n");
}
} catch (exc) {
dumpError(exc);
}
return null;
},
readFromStream: function (stream, charset, noClose) {
var sis = CCSV("#mozilla.org/binaryinputstream;1",
"nsIBinaryInputStream");
sis.setInputStream(stream);
var segments = [];
for (var count = stream.available(); count; count = stream.available())
segments.push(sis.readBytes(count));
if (!noClose)
sis.close();
var text = segments.join("");
return text;
}
}
httpRequestObserver = {
observe: function (request, aTopic, aData) {
if (typeof Cc == "undefined") {
var Cc = components.classes;
}
if (typeof Ci == "undefined") {
var Ci = components.interfaces;
}
if (aTopic == "http-on-examine-response") {
request.QueryInterface(Ci.nsIHttpChannel);
console.log(request.statusCode);
var newListener = new TracingListener();
request.QueryInterface(Ci.nsITraceableChannel);
channel = request;
//newListener.originalListener
//add new listener as default and save old one
old = request.setNewListener(newListener);
old.originalListener = null;
var threadManager = Cc["#mozilla.org/thread-manager;1"]
.getService(Ci.nsIThreadManager);
threadManager.currentThread.dispatch(newListener, Ci.nsIEventTarget.DISPATCH_NORMAL);
}
},
QueryInterface: function (aIID) {
if (typeof Cc == "undefined") {
var Cc = components.classes;
}
if (typeof Ci == "undefined") {
var Ci = components.interfaces;
}
if (aIID.equals(Ci.nsIObserver) ||
aIID.equals(Ci.nsISupports)) {
return this;
}
throw components.results.NS_NOINTERFACE;
},
};
var observerService = Cc["#mozilla.org/observer-service;1"]
.getService(Ci.nsIObserverService);
observerService.addObserver(httpRequestObserver,
"http-on-examine-response", false);
This example works for me on Firefox 34 (current nightly): https://github.com/Noitidart/demo-nsITraceableChannel
I downloaded the xpi, edited bootstrap.js to modify the stream:
132 // Copy received data as they come.
133 var data = binaryInputStream.readBytes(count);
134 data = data.replace(/GitHub/g, "TEST");
135 this.receivedData.push(data);
installed the XPI then reloaded the github page. It read "TEST" in the footer.
The version of code you posted doesn't actually pass the results back to the old listener, so that's the first thing that ought to be changed.
It also may have interacted with Firebug or another extension badly. It's a good idea to try reproducing the problem in a clean profile (with only your extension installed).