Related
Here is my JSON data that i am trying to send from filebeat to ingest pipeline "logpipeline.json" in opensearch.
json data
{
"#timestamp":"2022-11-08T10:07:05+00:00",
"client":"10.x.x.x",
"server_name":"example.stack.com",
"server_port":"80",
"server_protocol":"HTTP/1.1",
"method":"POST",
"request":"/example/api/v1/",
"request_length":"200",
"status":"500",
"bytes_sent":"598",
"body_bytes_sent":"138",
"referer":"",
"user_agent":"Java/1.8.0_191",
"upstream_addr":"10.x.x.x:10376",
"upstream_status":"500",
"gzip_ratio":"",
"content_type":"application/json",
"request_time":"6.826",
"upstream_response_time":"6.826",
"upstream_connect_time":"0.000",
"upstream_header_time":"6.826",
"remote_addr":"10.x.x.x",
"x_forwarded_for":"10.x.x.x",
"upstream_cache_status":"",
"ssl_protocol":"TLSv",
"ssl_cipher":"xxxx",
"ssl_session_reused":"r",
"request_body":"{\"date\":null,\"sourceType\":\"BPM\",\"processId\":\"xxxxx\",\"comment\":\"Process status: xxxxx: \",\"user\":\"xxxx\"}",
"response_body":"{\"statusCode\":500,\"reasonPhrase\":\"Internal Server Error\",\"errorMessage\":\"xxxx\"}",
"limit_req_status":"",
"log_body":"1",
"connection_upgrade":"close",
"http_upgrade":"",
"request_uri":"/example/api/v1/",
"args":""
}
Filebeat to Opensearch log shipping
# ---------------------------- Elasticsearch Output ----------------------------
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["192.168.29.117:9200"]
pipeline: logpipeline
#index: "filebeatelastic-%{[agent.version]}-%{+yyyy.MM.dd}"
index: "nginx_dev-%{+yyyy.MM.dd}"
# Protocol - either `http` (default) or `https`.
protocol: "https"
ssl.enabled: true
ssl.verification_mode: none
# Authentication credentials - either API key or username/password.
#api_key: "id:api_key"
username: "filebeat"
password: "filebeat"
I am carrying out the "data" fields transformation in the ingest pipeline for some of the fields by doing type conversion which works perfectly. But the only problem i am facing is with the "#timestamp".
The "#timestamp" is of "date" type and once the json data goes through the pipeline i am mapping the json data message to root level json object called "data". In that transformed data the "data.#timestamp" is showing as type "string" even though i haven't done any transformation for it.
Opensearch ingestpipeline - logpipeline.json
{
"description" : "Logging Pipeline",
"processors" : [
{
"json" : {
"field" : "message",
"target_field" : "data"
}
},
{
"date" : {
"field" : "data.#timestamp",
"formats" : ["ISO8601"]
}
},
{
"convert" : {
"field" : "data.body_bytes_sent",
"type": "integer",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"convert" : {
"field" : "data.bytes_sent",
"type": "integer",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"convert" : {
"field" : "data.request_length",
"type": "integer",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"convert" : {
"field" : "data.request_time",
"type": "float",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"convert" : {
"field" : "data.upstream_connect_time",
"type": "float",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"convert" : {
"field" : "data.upstream_header_time",
"type": "float",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"convert" : {
"field" : "data.upstream_response_time",
"type": "float",
"ignore_missing": true,
"ignore_failure": true
}
}
]
}
Is there any way i can preserve the "#timestamp" "date" type field even after the transformation carried out in ingest pipeline?
indexed document image:
Edit1: Update ingest pipeline simulate result
{
"docs" : [
{
"doc" : {
"_index" : "_index",
"_id" : "_id",
"_source" : {
"index_date" : "2022.11.08",
"#timestamp" : "2022-11-08T12:07:05.000+02:00",
"message" : """
{ "#timestamp": "2022-11-08T10:07:05+00:00", "client": "10.x.x.x", "server_name": "example.stack.com", "server_port": "80", "server_protocol": "HTTP/1.1", "method": "POST", "request": "/example/api/v1/", "request_length": "200", "status": "500", "bytes_sent": "598", "body_bytes_sent": "138", "referer": "", "user_agent": "Java/1.8.0_191", "upstream_addr": "10.x.x.x:10376", "upstream_status": "500", "gzip_ratio": "", "content_type": "application/json", "request_time": "6.826", "upstream_response_time": "6.826", "upstream_connect_time": "0.000", "upstream_header_time": "6.826", "remote_addr": "10.x.x.x", "x_forwarded_for": "10.x.x.x", "upstream_cache_status": "", "ssl_protocol": "TLSv", "ssl_cipher": "xxxx", "ssl_session_reused": "r", "request_body": "{\"date\":null,\"sourceType\":\"BPM\",\"processId\":\"xxxxx\",\"comment\":\"Process status: xxxxx: \",\"user\":\"xxxx\"}", "response_body": "{\"statusCode\":500,\"reasonPhrase\":\"Internal Server Error\",\"errorMessage\":\"xxxx\"}", "limit_req_status": "", "log_body": "1", "connection_upgrade": "close", "http_upgrade": "", "request_uri": "/example/api/v1/", "args": ""}
""",
"data" : {
"server_name" : "example.stack.com",
"request" : "/example/api/v1/",
"referer" : "",
"log_body" : "1",
"upstream_addr" : "10.x.x.x:10376",
"body_bytes_sent" : 138,
"upstream_header_time" : 6.826,
"ssl_cipher" : "xxxx",
"response_body" : """{"statusCode":500,"reasonPhrase":"Internal Server Error","errorMessage":"xxxx"}""",
"upstream_status" : "500",
"request_time" : 6.826,
"upstream_cache_status" : "",
"content_type" : "application/json",
"client" : "10.x.x.x",
"user_agent" : "Java/1.8.0_191",
"ssl_protocol" : "TLSv",
"limit_req_status" : "",
"remote_addr" : "10.x.x.x",
"method" : "POST",
"gzip_ratio" : "",
"http_upgrade" : "",
"bytes_sent" : 598,
"request_uri" : "/example/api/v1/",
"x_forwarded_for" : "10.x.x.x",
"args" : "",
"#timestamp" : "2022-11-08T10:07:05+00:00",
"upstream_connect_time" : 0.0,
"request_body" : """{"date":null,"sourceType":"BPM","processId":"xxxxx","comment":"Process status: xxxxx: ","user":"xxxx"}""",
"request_length" : 200,
"ssl_session_reused" : "r",
"server_port" : "80",
"upstream_response_time" : 6.826,
"connection_upgrade" : "close",
"server_protocol" : "HTTP/1.1",
"status" : "500"
}
},
"_ingest" : {
"timestamp" : "2023-01-18T08:06:35.335066236Z"
}
}
}
]
}
Finally able to resolve my issue. I updated the filebeat.yml with the following. Previously template name and pattern was different. But this default template name "filebeat" and pattern "filebeat" seems to be doing the job for me.
To
setup.template.name: "filebeat"
setup.template.pattern: "filebeat"
setup.template.settings:
index.number_of_shards: 1
#index.codec: best_compression
#_source.enabled: false
But still need to figure our how templates work though
How is it possible to populate simple schema's default value with a call to a collection in Meteor js instead of defining the "tests" within the defaultValue as below? If possible to have the defaultValue return all from TestList = new Mongo.Collection('testList').
StudentSchema = new SimpleSchema({
tests: {
type: [Object],
blackbox: true,
optional: true,
defaultValue:[
{
"_id" : "T2yfqWJ3a5rQz64WN",
"category_id" : "5",
"active" : "true",
"category" : "Cognitive/Intelligence",
"abbr" : "WJ-IV COG",
"name" : "Woodcock-Johnson IV, Tests of Cognitive Abilities",
"publisher" : "Riverside Publishing"
},
{
"_id" : "Ai8bT6dLYGQRDfvKe",
"category_id" : "5",
"active" : "true",
"category" : "Cognitive/Intelligence",
"abbr" : "WISC-IV",
"name" : "Wechsler Intelligence Scale for Children-Fourth Edition",
"publisher" : "The Psychological Corporation"
},
{
"_id" : "osAuaLrX97meRZuda",
"category_id" : "7",
"active" : "true",
"category" : "Speech and Language",
"abbr" : "WOJO",
"name" : "Wechsler Intelligence",
"publisher" : "The Psychological Corporation"
},
{
"_id" : "57c62a784b94c533b656dba8",
"category_id" : "5",
"active" : "true",
"category" : "Behavioral",
"abbr" : "CARS",
"name" : "CARS",
"publisher" : "The Psychological Corporation"
}
],
);
},
Dynamically loading all entries from "TestList" collection into "tests" array.
TestList = new Mongo.Collection('testList');
StudentSchema = new SimpleSchema({
tests: {
type: [Object],
blackbox: true,
optional: true,
autoValue: function () {
return TestList.find().fetch();
},
I am attempting to provision a few EC2 instances that need multiple EBS drives. I am attempting to define the root volume and 4 other volumes through BlockDeviceMappings.
Problem:
As far as I can tell, the code below conforms to any example I have seen online. But when Windows boots up, it dies instantly. And looking at the EC2 console (screenshot), I can see that the instance has seven EBS volumes attached (instead of 5) and that /dev/xda is set to root instead of /dev/sda1.
"Mappings" : {
"AWSRegionToAMI" : {
"us-east-1" : { "Windows2012R2" : "ami-5d1b984a" },
"us-west-1" : { "Windows2012R2" : "ami-07713767" },
"us-west-2" : { "Windows2012R2" : "ami-241bd844" }
},
"VolumeSize" : {
"DataDrive" : { "Size" : "50" },
"LogDrive" : { "Size" : "50" },
"TempDrive" : { "Size" : "400" },
"BackupDrive" : { "Size" : "100" }
},
"stackmap" : {
"sqlha" : {
"Name": "MS SQL Server 2014 Always On",
"chefjson" : "https://s3.amazonaws.com/[redacted]",
"os" : "win",
"bootstrapurl" : "https://s3.amazonaws.com/[redacted]"
}
}
},
"WSFCNode1": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : { "Fn::FindInMap" : [ "AWSRegionToAMI", { "Ref" : "AWS::Region" }, "Windows2012R2" ] },
"InstanceType": { "Ref": "InstanceType" },
"EbsOptimized": "true",
"NetworkInterfaces": [
{
"DeleteOnTermination": "true",
"DeviceIndex": 0,
"SubnetId": { "Ref": "PrivateSubnet1ID" },
"SecondaryPrivateIpAddressCount": 2,
"GroupSet": [
{ "Ref": "WSFCSecurityGroup" },
{ "Ref": "WSFCClientSecurityGroup" }
]
}
],
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs" : {"VolumeSize": "60"}
},
{
"DeviceName": "/dev/xvdb",
"Ebs" : {"VolumeSize": { "Fn::FindInMap" : [ "VolumeSize", "DataDrive", "Size" ]} }
},
{
"DeviceName": "/dev/xvdc",
"Ebs" : {"VolumeSize": { "Fn::FindInMap" : [ "VolumeSize", "LogDrive", "Size" ]} }
},
{
"DeviceName": "/dev/xvdd",
"Ebs" : {"VolumeSize": { "Fn::FindInMap" : [ "VolumeSize", "TempDrive", "Size" ]} }
},
{
"DeviceName": "/dev/xvde",
"Ebs" : {"VolumeSize": { "Fn::FindInMap" : [ "VolumeSize", "BackupDrive", "Size" ]} }
}
],
"KeyName": { "Ref": "KeyPairName" },
"UserData" : { "Fn::Base64" : { "Fn::Join" : ["", [
"<powershell>\n",
"# Disable UAC and allow scripts to run\n",
"New-ItemProperty -Path HKLM:Software\\Microsoft\\Windows\\CurrentVersion\\policies\\system -Name EnableLUA -PropertyType DWord -Value 0 -Force\n",
"Set-ExecutionPolicy Unrestricted -force\n",
"c:\\windows\\System32\\WindowsPowershell\\v1.0\\powershell.exe -noninteractive -noprofile Set-ExecutionPolicy unrestricted -force\n",
"c:\\windows\\syswow64\\windowspowershell\\v1.0\\powershell.exe -noninteractive -noprofile Set-ExecutionPolicy unrestricted -force\n",
"#Change TimeZone\n",
"tzutil /s ", {"Ref" : "Timezone"}, "\n",
"#Run Bootstrap PS1\n",
"$newname = '", { "Fn::Join" : ["", [{"Ref" : "Environment"}, {"Ref" : "Location"}, {"Ref" : "Stack"}, {"Ref" : "Role"} ]]},"'\n",
"$region = '", {"Ref" : "VPCRegion"}, "'\n",
"$role = '", {"Ref" : "Role"}, "'\n",
"$chef_rb = '", { "Fn::FindInMap" : [ "stackmap", { "Ref" : "Role" }, "chefjson"]}, "'\n",
"mkdir 'c:\\temp' -force\n",
"(new-object System.Net.WebClient).DownloadFile( 'https://s3.amazonaws.com/[redacted]', 'c:\\temp\\bootstrap.ps1')\n",
"powershell c:\\temp\\bootstrap.ps1 -newname $newname -region $region -role $role -chef_rb $chef_rb -logfile c:\\temp\\bootstrap.log -verbose true\n",
"#Reboot if needed\n",
"Start-Sleep -s 10\n",
"Restart-Computer\n",
"mkdir 'c:\\temp\\cf_reboot_cmd_ran' -force\n",
"shutdown -r\n",
"mkdir 'c:\\temp\\cf_shut_cmd_ran' -force\n",
"Start-Sleep -s 10\n",
"mkdir 'c:\\temp\\cf_ran_again' -force\n",
"</powershell>"
] ] }
},
"Tags": [
{ "Key": "Name", "Value": "SQL Node 1" }
]
}
},
Confusingly, even when I drop all the extra drives and just do a block device mapping of one disk
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs" : {"VolumeSize": "60"}
}
],
...I still end up with three volumes, and the wrong one (/dev/xda) assigned as root. Screenshot.
Is this a Windows thing? What do my block device mappings need to look like to mount correctly as root (or C:, in this case)?
Nevermind. Root problem was in the AMI I chose. Once I selected a proper Windows AMI, everything worked perfectly.
For anybody else running into this problem, double check your AMI selection.
I have been trying to run a simple test with Nightwatchjs and I keep running into issues.
I believe I have setup my JSON file correctly:
{
"src_folder" : ["./smoketests"],
"output_folder" : "./reports",
"selenium" : {
"start_process" : true,
"start_session" : true,
"server_path" : "M:/nightwatch/lib/selenium-server-standalone-2.48.2.jar",
"log_path" : false,
"host" : "127.0.0.1",
"port" : 4444,
"cli_args" : {
"webdriver.chrome.driver" : "./lib/chromedriver.exe"
}
},
"test_settings" : {
"default" : {
"launch_url" : "http://www.google.com/",
"selenium_port" : 4444,
"selenium_host" : "localhost",
"silent" : true,
"screenshots" : {
"enabled" : false,
"path" : "./screenshots/smoketests"
}
},
"desiredCapabilities" : {
"browserName" : "firefox",
"javascriptEnabled" : true,
"acceptSslCerts" : true
},
"chrome" : {
"desiredCapabilities" : {
"browserName" : "chrome",
"javascriptEnabled" : true,
"acceptSslCerts" : true
}
}
}
}
and my test is pretty simple:
module.exports = {
beforeEach : function(browser) {
browser.maximizeWindow();
},
'Test title' : function(browser) {
browser
.url('http://www.google.com/')
.waitForElementVisible('body', 1000)
.assert.title("Google")
browser.end();
}
};
Yet, when I run the test:
nightwatch -c smoketests/homepage.json
I receive the following error:
M:\nightwatch>nightwatch -c projects/smoketests/homepage.json
Starting selenium server... started - PID: 6448
C:\Users\jomartinez\AppData\Roaming\npm\node_modules\nightwatch\lib\
runner\run.js:116
var fullPaths = testSource.map(function (p) {
^
TypeError: Cannot read property 'map' of undefined
at module.exports.readPaths (C:\Users\jomartinez\AppData\Roaming\npm\node_mo
dules\nightwatch\lib\runner\run.js:116:31)
at runner [as run] (C:\Users\jomartinez\AppData\Roaming\npm\node_modules\nig
htwatch\lib\runner\run.js:182:10)
at Object.CliRunner.runner (C:\Users\jomartinez\AppData\Roaming\npm\node_mod
ules\nightwatch\lib\runner\cli\clirunner.js:345:16)
at C:\Users\jomartinez\AppData\Roaming\npm\node_modules\nightwatch\lib\runne
r\cli\clirunner.js:321:12
at SeleniumServer.onStarted (C:\Users\jomartinez\AppData\Roaming\npm\node_mo
dules\nightwatch\lib\runner\cli\clirunner.js:281:9)
at SeleniumServer.checkProcessStarted (C:\Users\jomartinez\AppData\Roaming\n
pm\node_modules\nightwatch\lib\runner\selenium.js:140:10)
at SeleniumServer.onStderrData (C:\Users\jomartinez\AppData\Roaming\npm\node
_modules\nightwatch\lib\runner\selenium.js:120:8)
at emitOne (events.js:77:13)
at Socket.emit (events.js:169:7)
at readableAddChunk (_stream_readable.js:146:16)
Has anybody else encounter this issue as well?
I think I figure out my initial issue. I had a syntax error in my "scr_folders" in my JSON file. After fixing it, my test seems to run fine.
I am new to ext Js.
I have a ajax call. I could see the response text on the alert but the next line, that is assumed to decode the responseText, does not produce any result in the alert Box.
My function goes like this :
function openToRecipients()
{
Ext.Ajax.request({
url: "Redirector?id=ClinicalInitiateForm&wfid=CLINICALONGOINGWFINITIATE",
method: 'POST',
success: function(response, opts)
{
alert(response.responseText);
var dataCurrent = Ext.util.JSON.decode(response.responseText);
alert(dataCurrent );
var jsonStr = dataCurrent.cData;
recipientJsonResponse = dataCurrent.dataGrid;
var myObject = eval('(' + jsonStr + ')');
gridStore = new Ext.data.JsonStore({
id : 'gridStore',
autoLoad : true,
data : myObject,
root : 'data',
fields:['NAME',
'CLIENT',
'DESCRIPTION'
],
listeners :{
load : gridDisplay
}
});
},
failure: function(response, opts) {
alert("fail");
}
});
}
This is my json after coverting to string
"formFields" : [ {
"id" : "NAME",
"set" : "",
"label" : "Name",
"dataType" : "string",
"editType" : "static",
"clientConfig" : "",
"hide" : "False",
"required" : "",
"mask" : "",
"maxValue" : "",
"maxLength" : "",
"minValue" : "",
"value" : "",
"showIf" : "",
"options" : "",
"prePopulate" : "",
"shortForm" : "",
"comments" : "",
"optionsValue" : "",
"currentValue" : "",
"disabled" : "",
"qTip" : "",
"hover" : ""
}, {
"id" : "CLIENT",
"set" : "",
"label" : "Client",
"dataType" : "string",
"editType" : "static",
"clientConfig" : "",
"hide" : "False",
"required" : "",
"mask" : "",
"maxValue" : "",
"maxLength" : "",
"minValue" : "",
"value" : "",
"showIf" : "",
"options" : "",
"prePopulate" : "",
"shortForm" : "",
"comments" : "",
"optionsValue" : "",
"currentValue" : "",
"disabled" : "",
"qTip" : "",
"hover" : ""
}, {
"id" : "DESCRIPTION",
"set" : "",
"label" : "Description",
"dataType" : "string",
"editType" : "static",
"clientConfig" : "",
"hide" : "False",
"required" : "",
"mask" : "",
"maxValue" : "",
"maxLength" : "",
"minValue" : "",
"value" : "",
"showIf" : "",
"options" : "",
"prePopulate" : "",
"shortForm" : "",
"comments" : "",
"optionsValue" : "",
"currentValue" : "",
"disabled" : "",
"qTip" : "",
"hover" : ""
} ],
And this is my data
{'data':[{"NAME":"Shan","CLIENT":"CSC","DESCRIPTION":"Computer science"}]}
How can i have this data in my grid
Here is the code that you can use:
var myStore = Ext.create( "Ext.data.JsonStore", {
fields: [ "firstname", "lastname" ], // the fields of each item (table line)
proxy: {
type: "ajax", // the proxy uses ajax
actionMethods: { // this config is not necessary for you. I needed to use it to be able to work with the echo service of jsFiddle. if you want to use post (as I saw in your post, you can skip this)
create: "POST",
read: "POST",
update: "POST",
destroy: "POST"
},
url: "/echo/json/", // here will come your URL that returns your JSON (in your case "Redirector?id..."
reader: {
type: "json", // this store reads data in json format
root: "items" // the itens to be read are inserted in a "items" array, in you case "formFields"
}
}
});
// in jsFiddle, we need to send the JSON that we want to read. In your case, you will just call .load() or set the autoLoad config of the store to true. If you want send adition parameters, you can use the sintax below.
myStore.load({
params: {
// everything inside the encode method will be encoded in json (this format that you must send to the store)
json: Ext.encode({
items: [{
"firstname": "foo",
"lastname": "bar"
}, {
"firstname": "david",
"lastname": "buzatto"
}, {
"firstname": "douglas",
"lastname": "adams"
}]
})
}
});
// creatin the grid, setting its columns and the store
Ext.create( "Ext.grid.Panel", {
title: "My Grid",
columns: [{
header: "First Name",
dataIndex: "firstname" // the dataIndex config is used to bind the column with the json data of each item
}, {
header: "Last Name",
dataIndex: "lastname"
}],
store: myStore, // the store created above
renderTo: Ext.getBody() // render the grid to the body
});
You can access a fiddle here: http://jsfiddle.net/cYwhK/1/
The documentation:
JsonStore: http://dev.sencha.com/deploy/ext-4.1.0-gpl/docs/index.html#!/api/Ext.data.JsonStore
Ajax proxy: http://dev.sencha.com/deploy/ext-4.1.0-gpl/docs/index.html#!/api/Ext.data.proxy.Ajax
Grid: http://dev.sencha.com/deploy/ext-4.1.0-gpl/docs/index.html#!/api/Ext.grid.Panel
Another think that I forgot to tell is that you can use Models in your store instead of an array of fields. The Models are like a class in a OO language. Take a look: http://dev.sencha.com/deploy/ext-4.1.0-gpl/docs/index.html#!/api/Ext.data.Model