Square point of sale web app-"auto_return" does not work - square-connect

I am setting the return to true, but when i complete the payment on square, the app does not redirect on its own.
{
"amount_money": {
"amount" : "100",
"currency_code" : "USD"
},
"auto_return":true,
"callback_url" : "https://floating-inlet-19449.herokuapp.com/redirect",
"client_id" : "sq0idp-U8x6mJyLFtHuhCfv9sqL5g",
"version": "1.3",
"notes": "notes for the transaction",
"options" : {
"supported_tender_types" : ["CREDIT_CARD"]
}

auto_return should be nested in the options object, like this:
{
"amount_money": { "amount" : 100, "currency_code" : "USD" },
"callback_url" : "https://floating-inlet-19449.herokuapp.com/redirect",
"client_id" : "sq0idp-U8x6mJyLFtHuhCfv9sqL5g",
"version": "1.3",
"notes": "notes for the transaction",
"options" : {
"supported_tender_types" : ["CREDIT_CARD"],
"auto_return": true
}

Related

ingest pipeline not preserving the date type field

Here is my JSON data that i am trying to send from filebeat to ingest pipeline "logpipeline.json" in opensearch.
json data
{
"#timestamp":"2022-11-08T10:07:05+00:00",
"client":"10.x.x.x",
"server_name":"example.stack.com",
"server_port":"80",
"server_protocol":"HTTP/1.1",
"method":"POST",
"request":"/example/api/v1/",
"request_length":"200",
"status":"500",
"bytes_sent":"598",
"body_bytes_sent":"138",
"referer":"",
"user_agent":"Java/1.8.0_191",
"upstream_addr":"10.x.x.x:10376",
"upstream_status":"500",
"gzip_ratio":"",
"content_type":"application/json",
"request_time":"6.826",
"upstream_response_time":"6.826",
"upstream_connect_time":"0.000",
"upstream_header_time":"6.826",
"remote_addr":"10.x.x.x",
"x_forwarded_for":"10.x.x.x",
"upstream_cache_status":"",
"ssl_protocol":"TLSv",
"ssl_cipher":"xxxx",
"ssl_session_reused":"r",
"request_body":"{\"date\":null,\"sourceType\":\"BPM\",\"processId\":\"xxxxx\",\"comment\":\"Process status: xxxxx: \",\"user\":\"xxxx\"}",
"response_body":"{\"statusCode\":500,\"reasonPhrase\":\"Internal Server Error\",\"errorMessage\":\"xxxx\"}",
"limit_req_status":"",
"log_body":"1",
"connection_upgrade":"close",
"http_upgrade":"",
"request_uri":"/example/api/v1/",
"args":""
}
Filebeat to Opensearch log shipping
# ---------------------------- Elasticsearch Output ----------------------------
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["192.168.29.117:9200"]
pipeline: logpipeline
#index: "filebeatelastic-%{[agent.version]}-%{+yyyy.MM.dd}"
index: "nginx_dev-%{+yyyy.MM.dd}"
# Protocol - either `http` (default) or `https`.
protocol: "https"
ssl.enabled: true
ssl.verification_mode: none
# Authentication credentials - either API key or username/password.
#api_key: "id:api_key"
username: "filebeat"
password: "filebeat"
I am carrying out the "data" fields transformation in the ingest pipeline for some of the fields by doing type conversion which works perfectly. But the only problem i am facing is with the "#timestamp".
The "#timestamp" is of "date" type and once the json data goes through the pipeline i am mapping the json data message to root level json object called "data". In that transformed data the "data.#timestamp" is showing as type "string" even though i haven't done any transformation for it.
Opensearch ingestpipeline - logpipeline.json
{
"description" : "Logging Pipeline",
"processors" : [
{
"json" : {
"field" : "message",
"target_field" : "data"
}
},
{
"date" : {
"field" : "data.#timestamp",
"formats" : ["ISO8601"]
}
},
{
"convert" : {
"field" : "data.body_bytes_sent",
"type": "integer",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"convert" : {
"field" : "data.bytes_sent",
"type": "integer",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"convert" : {
"field" : "data.request_length",
"type": "integer",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"convert" : {
"field" : "data.request_time",
"type": "float",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"convert" : {
"field" : "data.upstream_connect_time",
"type": "float",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"convert" : {
"field" : "data.upstream_header_time",
"type": "float",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"convert" : {
"field" : "data.upstream_response_time",
"type": "float",
"ignore_missing": true,
"ignore_failure": true
}
}
]
}
Is there any way i can preserve the "#timestamp" "date" type field even after the transformation carried out in ingest pipeline?
indexed document image:
Edit1: Update ingest pipeline simulate result
{
"docs" : [
{
"doc" : {
"_index" : "_index",
"_id" : "_id",
"_source" : {
"index_date" : "2022.11.08",
"#timestamp" : "2022-11-08T12:07:05.000+02:00",
"message" : """
{ "#timestamp": "2022-11-08T10:07:05+00:00", "client": "10.x.x.x", "server_name": "example.stack.com", "server_port": "80", "server_protocol": "HTTP/1.1", "method": "POST", "request": "/example/api/v1/", "request_length": "200", "status": "500", "bytes_sent": "598", "body_bytes_sent": "138", "referer": "", "user_agent": "Java/1.8.0_191", "upstream_addr": "10.x.x.x:10376", "upstream_status": "500", "gzip_ratio": "", "content_type": "application/json", "request_time": "6.826", "upstream_response_time": "6.826", "upstream_connect_time": "0.000", "upstream_header_time": "6.826", "remote_addr": "10.x.x.x", "x_forwarded_for": "10.x.x.x", "upstream_cache_status": "", "ssl_protocol": "TLSv", "ssl_cipher": "xxxx", "ssl_session_reused": "r", "request_body": "{\"date\":null,\"sourceType\":\"BPM\",\"processId\":\"xxxxx\",\"comment\":\"Process status: xxxxx: \",\"user\":\"xxxx\"}", "response_body": "{\"statusCode\":500,\"reasonPhrase\":\"Internal Server Error\",\"errorMessage\":\"xxxx\"}", "limit_req_status": "", "log_body": "1", "connection_upgrade": "close", "http_upgrade": "", "request_uri": "/example/api/v1/", "args": ""}
""",
"data" : {
"server_name" : "example.stack.com",
"request" : "/example/api/v1/",
"referer" : "",
"log_body" : "1",
"upstream_addr" : "10.x.x.x:10376",
"body_bytes_sent" : 138,
"upstream_header_time" : 6.826,
"ssl_cipher" : "xxxx",
"response_body" : """{"statusCode":500,"reasonPhrase":"Internal Server Error","errorMessage":"xxxx"}""",
"upstream_status" : "500",
"request_time" : 6.826,
"upstream_cache_status" : "",
"content_type" : "application/json",
"client" : "10.x.x.x",
"user_agent" : "Java/1.8.0_191",
"ssl_protocol" : "TLSv",
"limit_req_status" : "",
"remote_addr" : "10.x.x.x",
"method" : "POST",
"gzip_ratio" : "",
"http_upgrade" : "",
"bytes_sent" : 598,
"request_uri" : "/example/api/v1/",
"x_forwarded_for" : "10.x.x.x",
"args" : "",
"#timestamp" : "2022-11-08T10:07:05+00:00",
"upstream_connect_time" : 0.0,
"request_body" : """{"date":null,"sourceType":"BPM","processId":"xxxxx","comment":"Process status: xxxxx: ","user":"xxxx"}""",
"request_length" : 200,
"ssl_session_reused" : "r",
"server_port" : "80",
"upstream_response_time" : 6.826,
"connection_upgrade" : "close",
"server_protocol" : "HTTP/1.1",
"status" : "500"
}
},
"_ingest" : {
"timestamp" : "2023-01-18T08:06:35.335066236Z"
}
}
}
]
}
Finally able to resolve my issue. I updated the filebeat.yml with the following. Previously template name and pattern was different. But this default template name "filebeat" and pattern "filebeat" seems to be doing the job for me.
To
setup.template.name: "filebeat"
setup.template.pattern: "filebeat"
setup.template.settings:
index.number_of_shards: 1
#index.codec: best_compression
#_source.enabled: false
But still need to figure our how templates work though

AppointmentType resource

I need to return AppointmentTypes as a FHIR resource. Unfortunately, I couldn't find it as an official FHIR resource format.
My best guess would be to create a Basic resource, like this:
{
"resourceType": "Basic",
"id" : "id-of-appointment-type",
"identifier" : [
{
"use" : "secondary",
"system" : "http://myUrl/myIdentifier",
"value" : "7"
}
],
"code" : {
"coding": [
{
"system": "http://myUrl/appointment-type",
"code": "appointment-type"
}
]
},
"text" : {
"status" : "generated",
"div" : "<div xmlns=\"http://www.w3.org/1999/xhtml\">AppointmentType</div>"
},
"extension": [
{
"url": "http://myUrl/appointment-type-name",
"valueString": "New Patient"
},
{
"url": "http://myUrl/appointment-type-availability",
"valueBoolean": true
}
],
"meta" : {
"lastUpdated" : "2020-05-27T00:00:00.000Z"
}
}
Would this be the right way to create the AppointmentType resource?
I don't see any obvious issues, but did you evaluate using CodeSystem? You can define properties on CodeSystem codes which would be able to distinguish available from non-available appointment types - and that would work better with Appointment, where 'type' is expected to be a code.

Simple Schema populate default value from collection

How is it possible to populate simple schema's default value with a call to a collection in Meteor js instead of defining the "tests" within the defaultValue as below? If possible to have the defaultValue return all from TestList = new Mongo.Collection('testList').
StudentSchema = new SimpleSchema({
tests: {
type: [Object],
blackbox: true,
optional: true,
defaultValue:[
{
"_id" : "T2yfqWJ3a5rQz64WN",
"category_id" : "5",
"active" : "true",
"category" : "Cognitive/Intelligence",
"abbr" : "WJ-IV COG",
"name" : "Woodcock-Johnson IV, Tests of Cognitive Abilities",
"publisher" : "Riverside Publishing"
},
{
"_id" : "Ai8bT6dLYGQRDfvKe",
"category_id" : "5",
"active" : "true",
"category" : "Cognitive/Intelligence",
"abbr" : "WISC-IV",
"name" : "Wechsler Intelligence Scale for Children-Fourth Edition",
"publisher" : "The Psychological Corporation"
},
{
"_id" : "osAuaLrX97meRZuda",
"category_id" : "7",
"active" : "true",
"category" : "Speech and Language",
"abbr" : "WOJO",
"name" : "Wechsler Intelligence",
"publisher" : "The Psychological Corporation"
},
{
"_id" : "57c62a784b94c533b656dba8",
"category_id" : "5",
"active" : "true",
"category" : "Behavioral",
"abbr" : "CARS",
"name" : "CARS",
"publisher" : "The Psychological Corporation"
}
],
);
},
Dynamically loading all entries from "TestList" collection into "tests" array.
TestList = new Mongo.Collection('testList');
StudentSchema = new SimpleSchema({
tests: {
type: [Object],
blackbox: true,
optional: true,
autoValue: function () {
return TestList.find().fetch();
},

Google places API not returning address components

My program is trying to determine the City, state, and country based on some text, for example "New york yankee stadium" I want to get New york city, NY, USA. I am using Google places API to do this. According to the documentations, the API should return a list of address component https://developers.google.com/places/web-service/details. However, right now its only returning formatted address "1 E 161st St, Bronx, NY 10451, United States".
here is my web service url
https://maps.googleapis.com/maps/api/place/textsearch/json?key=MY_KEY&query=new%20york%20yankee%20stadium
Anyone familiar with google places API that can let me know if I am not writting the right query or parameter?
{
"html_attributions" : [],
"results" : [
{
"formatted_address" : "1 E 161st St, Bronx, NY 10451, United States",
"geometry" : {
"location" : {
"lat" : 40.82964260000001,
"lng" : -73.9261745
},
"viewport" : {
"northeast" : {
"lat" : 40.83279975,
"lng" : -73.92236575000001
},
"southwest" : {
"lat" : 40.82643674999999,
"lng" : -73.93052034999999
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/generic_business-71.png",
"id" : "3d78036d61d35f48650bda737226432b57d82511",
"name" : "Yankee Stadium",
"opening_hours" : {
"open_now" : true,
"weekday_text" : []
},
"photos" : [
{
"height" : 540,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/101696810905045719819/photos\"\u003eYankee Stadium\u003c/a\u003e"
],
"photo_reference" : "CoQBdwAAAIxmCLrNS_XZ2FcJqVvRVtBUlNYMBVTVKppOWBu7sICj2q70cqJARBoJlTcZpydbMTzURKWWMVJhYpVCqsnia5pjmDhjvjsTirrEnAc6gvmRYKuUwgewB9Re--FulXzXZ5DY3P9fkwIwuc4U9BJVbqHD5O-N6SbbHcqn4XHUj_OdEhCoNPZ3kiNJhxOCGdYG5O4DGhTqVfUjdq7JzasqYATvQxkL1-H3xg",
"width" : 1242
}
],
"place_id" : "ChIJcWnnWiz0wokRCB6aVdnDQEk",
"rating" : 4.4,
"reference" : "CmRRAAAA5dHiw1YmLxW60_jITBZjMiUs48L4aVUqlPnPDpN_ySa7rw8kPp04WWk0qf8mG-kkMFSNzh39lP0YwfynW54tLcY4s_EYbAPvNWTMe6wXHm_FJiVbI0Lfenyxz4yOTzunEhDgI64EWoXkQe9k45y6qP3-GhSVSdCMPPZA3joFbnYGV-bqo2e0lw",
"types" : [ "stadium", "point_of_interest", "establishment" ]
}
],
"status" : "OK"
}
Its a two step process, first search and get the place_id from google places search service,
use the returned place_id and pass it with another call to receive the individual address components,
https://maps.googleapis.com/maps/api/place/details/json?place_id=ChIJcWnnWiz0wokRCB6aVdnDQEk&key=
{"html_attributions": [],
"result": {
"address_components": [
{
"long_name": "1",
"short_name": "1",
"types": [
"street_number"
]
},
{
"long_name": "East 161st Street",
"short_name": "E 161st St",
"types": [
"route"
]
},
{
"long_name": "Concourse",
"short_name": "Concourse",
"types": [
"neighborhood",
"political"
]
},
{
"long_name": "Bronx",
"short_name": "Bronx",
"types": [
"sublocality_level_1",
"sublocality",
"political"
]
},
{
"long_name": "Bronx County",
"short_name": "Bronx County",
"types": [
"administrative_area_level_2",
"political"
]
},

Server logs to Dojo Front end, appropriate data storage method?

Good Morning all, I am currently building a tool which scrapes a number of figures from various logs on several different servers using AUX and Shell scripts on a minute by minute basis. Currently I have scripts that produce HTML files also on a minute by minute basis so that I can look back and see the system health for any given minute. So essentially these HTML files as being used as the archiving medium, which obviously is very strange.
Taking this tool forward I am looking to build a front end on a DOJO platform which will support widgets and graphs for each of the different data sets that is gathered from the logs. I haven't used DOJO much in the past and was hoping that somebody more experienced in the technology would be able to tell me what storage medium I should be using for these simple metrics that are produced by the scripts every minute. I have heard of the JSON method but am not sure.
Thanks for your time!
Jonny
Just to follow up on this in the rare chance it may be stumbled upon by another user. I decided that JSON was definitely the way forward. Each minute I have a system that produces a JSON files like the below.
{
"DataDTS" : "2014.05.02-09:00",
"System" : "Performance Test",
"SourceServer" : "gb02qws122debx7",
"OldestData" : "2014.05.02-09:00",
"MetricData" : {
"FredHopper Direct" : {
"Small" : "FH",
"Type" : "ByMinute",
"HeaderLevels" : 1,
"Data": { "Failures":0, "Avg":"0.318", "Min":"0.306", "Max":"0.340" }
},
"Performance By Instance" : {
"Small" : "RATE",
"Type" : "ByMinute",
"HeaderLevels" : 2,
"Sections" : {
"102.01" : { "Attributes" : ["(20)"], "Data": { "Req":"999", "Avg Resp":"0.254" } },
"102.02" : { "Attributes" : ["(20)"], "Data": { "Req":"144", "Avg Resp":"0.376" } },
"103.01" : { "Attributes" : ["(20)"], "Data": { "Req":"168", "Avg Resp":"0.199" } },
"103.02" : { "Attributes" : ["(20)"], "Data": { "Req":"152", "Avg Resp":"0.283" } },
"Unallocated" : { "Attributes": [], "Data": {"Req":"87", "Avg Resp":"0.154"} },
"qws122" : { "Attributes": [], "Data": {"Tot Req":"460", "Rate PPS":"7.7"} }
}
},
"HTML Status" : {
"Small" : "HTML",
"Type" : "Status",
"HeaderLevels" : 1,
"Data" : [
{ "Code":"200", "Req":447 },
{ "Code":"206", "Req":1 },
{ "Code":"301", "Req":7 },
{ "Code":"302", "Req":5 },
{ "Code":"TOT", "Req":460 }
]
},
"Page Breakdown" : {
"Small" : "PB",
"Type" : "Status",
"HeaderLevels" : 2,
"Sections" : {
"Top 15 Average Response Times" : {
"Attributes": [],
"Data" : [
{ "URL":"men", "Req":1, "Avg Resp":"3.597", "Cached":"0.0%" },
{ "URL":"jacques-vert", "Req":1, "Avg Resp":"3.335", "Cached":"0.0%" },
{ "URL":"OrderOKView", "Req":1, "Avg Resp":"1.615", "Cached":"0.0%" },
{ "URL":"warehouse", "Req":1, "Avg Resp":"1.050", "Cached":"0.0%" }
]
},
"Top 15 Requests" : {
"Attributes": [],
"Data" : [
{ "URL":"Navigate", "Req":220, "Avg Resp":"0.372", "Cached":"26.4%" },
{ "URL":"prod", "Req":42, "Avg Resp":"0.186", "Cached":"13.5%" },
{ "URL":"sureroute-test-object.html", "Req":24, "Avg Resp":"0.001", "Cached":"0.0%" },
{ "URL":"TopCategories1", "Req":8, "Avg Resp":"0.032", "Cached":"0.0%" }
]
}
}
},
"App server CPU" : {
"Small" : "CPU",
"Type" : "ByMinute",
"HeaderLevels" : 2,
"Sections" : {
"qap302" : { "Attributes" : ["Ent:7.0", "Max:8.0", "Mem:32768MB", "Wgt:200"], "Data": { "usr%":"16.8", "iow%":"1.4", "phy":"1.8", "ent%":"25.1" } },
"qap312" : { "Attributes" : ["Ent:7.0", "Max:8.0", "Mem:32768MB", "Wgt:200"], "Data": { "usr%":"8.1", "iow%":"0.1", "phy":"0.8", "ent%":"11.0" } }
}
}
}

Resources