High quality book covers using Google Books API - google-books-api

According to the Google Books API documentation each volume yields 6 different imageLinks (smallThumbnail, thumbnail, small, medium, large, extraLarge).
Unfortunately, for all the queries I've tried (and I've tried a lot) only smallThumbnail and thumbnail were returned. (Example query)
Also, apart from being fairly small those two images have this little fake dog-ears in the bottom right corner
Did they deprecate the high quality image links? Is there another way to fetch those images? Other APIs I tried are either deprecated (Goodreads) or less extensive (Open Library)

I'm having the same issue. The search query only yields the smallThumbnail and thumbnail keys for the imageLinks. If you query the volume directly (like this), then you'll get all the options for the imageLinks.
This won't 100% solve your problem though. For some of the books, the small/medium/large links point to the first page of the book instead of the actual cover.

You might not be able to get high-resolution images all the time. Also, for some book results, you might not get any images at all.
I am working on a similar project and I had to manually check the resolution of each of the images I got in the response and I chose only the 'good' ones.
Here's what I had to do:
let newArr = [];
let lowResImage = 0;
let dataWithoutImage = 0;
let highResImage = 0;
genreResults?.forEach((book, index) => {
if (book?.volumeInfo?.imageLinks?.smallThumbnail) {//the current book has an image
mypromise(book.volumeInfo.imageLinks.smallThumbnail).then((res) => {
++highResImage;
newArr.push(book)
}).catch(error => {
++lowResImage;
}).finally(() => {
if (dataWithoutImage + highResImage + lowResImage === genreResults.length) {
console.log("---------data populated----------");
console.log("highResolutionImages", highResImage);
console.log("low resolution images:", lowResImage);
console.log("booksWithoutImages", dataWithoutImage);
console.log("genreResults size", genreResults.length)
setBooks(newArr)
}
});
}
else //this book does not contain an image
++dataWithoutImage
})
The function to check the resolution looks like this:
let mypromise = function checkImageResolution(url) {
return new Promise((resolve, reject) => {
var img = new Image();
img.src = url;
img.onload = function () {
console.log("image dimensions", this.width, this.height);
if (this.width < 125 || this.height < 100) //if either of these is true, reject the image
reject("low quality image");
else
resolve("high quality ,let's have this one");
}
});
}

Replace the following:
thumbnail=thumbnail.replaceAll("1","10");
But it works only for some books and for the remaining it will only load the first page of the book.

Related

How do you identify questions that the bot could not answer

My organisation is starting to experiment with the Microsoft bot framework. One of the questions our enterprise architect has asked is as follows:
How do we identify questions that the bot was unable to answer?
I've checked the documentation but I'm still unclear. Can anyone elaborate on the techniques that they use to identify unanswered questions? We feel this is important as it identifies opportunities for further growth.
You can achieve this using a number of techniques. Essentially, what you are trying to do is to store any questions the Bot has not been able to provide an answer for analysis.
You can do this by using the scoring mechanism in the QnAMaker. For example, if the QnAMaker returns a score of zero, an answer doesn't exist, so we need to write that question back to storage for analysis.
You can use a number of storage solutions for this in the Azure stack, such as Application Insights, Cosmos, Blob, SharePoint Lists etc.
In the example below (code trimmed for brevity), I'm using Application Insights to store this information. I have imported the botbuilder-applicationinsights package and have created a simple custom event to capture any responses that score zero against the QnAMaker.
const {
ApplicationInsightsTelemetryClient,
ApplicationInsightsWebserverMiddleware
} = require('botbuilder-applicationinsights');
const {
MessageFactory,
CardFactory
} = require('botbuilder');
const {
QnAServiceHelper
} = require('../helpers/qnAServiceHelper');
const {
CardHelper
} = require('../helpers/cardHelper');
const {
FunctionDialogBase
} = require('./functionDialogBase');
// Setup Application Insights
settings = require('../settings').settings;
const appInsightsClient = new ApplicationInsightsTelemetryClient(settings.instrumentationKey);
class QnADialog extends FunctionDialogBase {
constructor() {
super('qnaDialog');
}
async processAsync(oldState, activity) {
var newState = null;
var query = activity.text;
var qnaResult = await QnAServiceHelper.queryQnAService(query, oldState);
var qnaAnswer = qnaResult[0].answer;
var qnaNonResponse = qnaResult[0].score;
var prompts = null;
if (qnaResult[0].context != null) {
prompts = qnaResult[0].context.prompts;
}
var outputActivity = null;
if (prompts == null || prompts.length < 1) {
outputActivity = MessageFactory.text(qnaAnswer);
} else {
var newState = {
PreviousQnaId: qnaResult[0].id,
PreviousUserQuery: query
}
outputActivity = CardHelper.GetHeroCard(qnaAnswer, prompts);
}
if (qnaNonResponse === 0) {
const {
NonResponseCard
} = require('../dialogs/non-response');
const quicknonresponseCard = CardFactory.adaptiveCard(NonResponseCard);
outputActivity = ({
attachments: [quicknonresponseCard]
});
console.log("Cannot find QnA response for" + " " + query);
appInsightsClient.trackEvent({
name: "Non-response",
properties: {
question: query
}
});
}
return ([newState, outputActivity, null]);
}
}
module.exports.QnADialog = QnADialog;
I can then hook up the query I might use in Application Insights in Power Bi to surface those non-answered questions.
There are multiple ways to achieve this, but this was one I ended up going with.
Depending of the size and the complexity of your model you will want to use LUIS or qnamaker. If your mother is very simple qnamaker will works. for something a bit more complex especially if you want to make use of entities LUIS is definitely the way to go. Each of them have their own technique and #steviebleeds describe how to do it on qnamaker. For Louis you are going to look at your confidence threshold and you should record that have below the confidence threshold you have set. each time you get a prediction from Lewis it send you a list of intent each of them having a confidence percentage on the predictions. You should assess this confidence percentage and decide depending of your fresh hold if you want or not to answer you users. You also want to look at all questions that have return none intent.

How does location cache works?

I have an application that gets the GPS location of the devices every time the user fills-up the form. My problem is capturing the GPS location takes too long. It takes about 40 seconds to 60 seconds before the GPS has been captured. I am using jamesmontemagno's Geolocator plugin.
GPS Parameters:
Accuracy: 100 meters
Timeout: 1 minute
Here is my code that I am using right now:
var defaultgpsaccuracy = Convert.ToDouble(Preferences.Get("gpsaccuracy", String.Empty, "private_prefs"));
var defaultgpstimeout = Convert.ToDouble(Preferences.Get("gpstimeout", String.Empty, "private_prefs"));
var locator = CrossGeolocator.Current;
locator.DesiredAccuracy = defaultgpsaccuracy;
position = await locator.GetLastKnownLocationAsync();
if (position != null)
{
string location = position.Latitude + "," + position.Longitude;
lblStartLocation.Text = location;
}
else
{
position = await locator.GetPositionAsync(TimeSpan.FromMinutes(defaultgpstimeout), null, false);
string location = position.Latitude + "," + position.Longitude;
lblStartLocation.Text = location;
}
These are my questions:
I used locator.GetLastKnownLocationAsync(); how long before the location cache refreshes?
And does the last known location refreshes when there is a change of location
And does the location refreshes when the devices is outside the accuracy range for example the accuracy is 100 meters does the location cache refresh when the device is outside the 100 meters range of the last know location?
I would highly recommend looking through the documentation for that particular plugin, James Montemagno is a well known and respected developer employed my Microsoft working on the Xamarin framework, so his plugins, extensions and toolkits tend to be pretty highly optimised for use in cross-platform applications.
Looking at the documentation it's clear that trying to get the last 'known' location looks at an internally cached location data set and is not necessarily optimized for near real-time queries. However it can be used to reduce the number of actual location queries you have to do within your app.
The full snippet from the linked documentation is as follows:
public async Task<Position> GetCurrentLocation()
{
public static async Task<Position> GetCurrentPosition()
{
Position position = null;
try
{
var locator = CrossGeolocator.Current;
locator.DesiredAccuracy = 100;
position = await locator.GetLastKnownLocationAsync();
if (position != null)
{
//got a cahched position, so let's use it.
return position;
}
if (!locator.IsGeolocationAvailable || !locator.IsGeolocationEnabled)
{
//not available or enabled
return null;
}
position = await locator.GetPositionAsync(TimeSpan.FromSeconds(20), null, true);
}
catch (Exception ex)
{
Debug.WriteLine("Unable to get location: " + ex);
}
if (position == null)
return null;
var output = string.Format("Time: {0} \nLat: {1} \nLong: {2} \nAltitude: {3} \nAltitude Accuracy: {4} \nAccuracy: {5} \nHeading: {6} \nSpeed: {7}",
position.Timestamp, position.Latitude, position.Longitude,
position.Altitude, position.AltitudeAccuracy, position.Accuracy, position.Heading, position.Speed);
Debug.WriteLine(output);
return position;
}
}
This method is designed to follow a hierarchical patter of location retrieval, it starts with the last 'known' position and queries if necessary, you could also take this a step further and add in a timeout to checking for a cached location if you wanted to.
In regards to how often this data is refreshed, I'd refer you to the documentation section titled 'Background Updates'
James talks about a driving app as an example of how this works. The refresh is handled differently across Android and iOS but here's the snippet regarding Android:
For this you will want to integrate a foreground service that
subscribes to location changes and the user interface binds to. Please
read through the Xamarin.Android Services documentation
In the code example he uses he shows you how to create a 'listener' that will check for changes to location periodically. This might be a better fit for what you're trying to do depending on the purpose of your application.

Flutter Image Compression

I wanted to do some image compression before uploading to my Firestore and ran into following thread: Flutter & Firebase: Compression before upload image
The uploading works totally fine, but I cannot recognize any compression regarding the file size... Even if I decrease the quality from 85 to 1, the file still has the same size. Same thing if I upload the image without calling the compression method at all. Here is the code snippet:
void compressImage() async {
print('starting compression');
final tempDir = await getTemporaryDirectory();
final path = tempDir.path;
int rand = new Math.Random().nextInt(10000);
Im.Image image = Im.decodeImage(file.readAsBytesSync());
Im.copyResize(image, 500);
// image.format = Im.Image.RGBA;
// Im.Image newim = Im.remapColors(image, alpha: Im.LUMINANCE);
Im.Image smallerImage = Im.copyResize(image, 500); // choose the size here, it will maintain aspect ratio
var newim2 = new File('$path/img_$rand.jpg')
..writeAsBytesSync(Im.encodeJpg(smallerImage, quality: 85));
setState(() {
file = newim2;
});
print('done');
}
Any idea what I have to change, to make the compression working?
Best regards!
you can use below plugin, same working fine without any issue, even faster and what i expected
https://github.com/btastic/flutter_native_image.git
steps and method available in above link.

bing maps showing large (grid)lines

I wanted to integrate Bing Maps in a website from one of our clients. Unfortunately I get strange results. Unfortunately I can't add an image. There are these large (grid?) lines (up to some 10 pixels wide), but I do not know how to remove them. I use the following basic code:
var map = null;
function getMap() {
map = new Microsoft.Maps.Map(document.getElementById('myMap'), {
credentials: '...mycredentials....'
,enableClickableLogo: false
,enableSearchLogo: false
,showDashboard: false
,zoom: 14
,center: new Microsoft.Maps.Location(...thelatitude...,...thelongitude...)
});
map.entities.clear();
var pushpin= new Microsoft.Maps.Pushpin(map.getCenter(), null);
map.entities.push(pushpin);
}
getMap();

Filtering a loaded kml file in OpenLayers

I'm trying to create an interactive search engine (for finding event tickets) of which one of its features is a visual map that shows related venues using OpenLayers. I have a plethora of venues (3000+) in a kml file that I would like to selectively show a filtered subsection of. Below is the code I have but when I try to run it has a JavaScript error. Running firebug and chrome developer tools makes me think that it is not getting passed the parameters I give because it says that the variables are null. However, I cannot figure out why they are not getting passed. Any insight is greatly appreciated.
var map, drawControls, selectControl, selectedFeature, select;
$('#kml').load('venuesComplete.kml');
kml=$('#kml').html();
function showVenues(state, city, venue){
filterStrategy = new OpenLayers.Strategy.Filter({});
var kmllayer = new OpenLayers.Layer.Vector("KML", {
strategies: [filterStrategy,
new OpenLayers.Strategy.Fixed()],
protocol: new OpenLayers.Protocol.HTTP({
url: "venuesComplete.kml",
format: new OpenLayers.Format.KML({
extractStyles: true,
extractAttributes: true
})
})
});
select = new OpenLayers.Control.SelectFeature(kmllayer);
kmllayer.events.on({
"featureselected": onFeatureSelect,
"featureunselected": onFeatureUnselect
});
map.addControl(select);
select.activate();
filter = new OpenLayers.Filter.Comparison({
type: OpenLayers.Filter.Comparison.LIKE,
property: "",
value: ""
});
function clearFilter(){
filterStrategy.setFilter(null);
}
function setFilter(property, value){
filter.value = value;
filter.property = property;
filterStrategy.setFilter(filter);
}
var vector_style = new OpenLayers.Style();
if(venue!=""){
setFilter('name', venue);
}else if(city!=""){
setFilter('description', city);
}else if(state!=""){
setFilter('description', state);
}
map.addLayer(kmllayer);
function onPopupClose(evt) {
select.unselectAll();
}
function onFeatureSelect(event) {
var feature = event.feature;
var selectedFeature = feature;
var popup = new OpenLayers.Popup.FramedCloud("chicken",
feature.geometry.getBounds().getCenterLonLat(),
new OpenLayers.Size(100,100),
"<h2>"+feature.attributes.name + "</h2>" + feature.attributes.description +'<br>'+feature.attributes,
null,
true,
onPopupClose
);
document.getElementById('venueName').value=feature.attributes.name;
document.getElementById("output").innerHTML=event.feature.id;
feature.popup = popup;
map.addPopup(popup);
}
function onFeatureUnselect(event) {
var feature = event.feature;
if(feature.popup) {
map.removePopup(feature.popup);
feature.popup.destroy();
delete feature.popup;
}
}
}
function init() {
map = new OpenLayers.Map('map');
var google_map_layer = new OpenLayers.Layer.Google(
'Google Map Layer',
{type: google.maps.MapTypeId.HYBRID}
);
map.addLayer(google_map_layer);
state="";
state+=document.getElementById('stateProvDesc').value;
city="";
city+=document.getElementById('cityZip').value;
venue="";
venue+=document.getElementById('venueName').value;
showVenues(state,city,'Michie Stadium');
map.addControl(new OpenLayers.Control.LayerSwitcher({}));
map.zoomToMaxExtent();
}
IF I UNDERSTAND CORRECTLY, your kml does not load properly. if this is not the case, please disconsider my answer.
it is very important to check if your kml layer was properly loaded. i have a map that loads multiple dynamic (from php) kml layers and it is not uncommon to have a large layer simply not load. when that happens, the operation is aborted, but, as far as openlayers is concerned, the layer was properly loaded.
so i do 2 things: i check if the amount of loaded data meets the expected number of features in my orginal php kml parser (i use a jquery or ajax call for that) and then, in case there is a discrepancy, i try reloading (since this is a loop, i limit it to 5 attempts, so as not to loop infinitely).
check out some of my code here

Resources