JSON_value function not returning exact value - oracle

I was trying extract value in SQL developer from below json but it gives me wrong output
select
json_value('{"ABC": {
"CFF": 90,
"coord": {
"x1": 27.4,
"x2": 31.6,
"y1": 61.4,
"y2": 62.4
},
"value": "\\"
}}','$."ABC".value') COL from dual
Kindly suggest ..

The backslash symbol \ is also an escape character - so \\ in a quoted string "\\" becomes a single unescaped \
If you want the value to be \\ you have to double the amount of backslashes to \\\\

Related

How to pass arguments inside select and test function?

I've this JSON data, extracted from qbittorrent:
[
{
"hash": "333333333333333333333333333",
"name": "testtosearchcaseinsensitive",
"magnet_uri": "magnet:somedata",
"size": 1243989552,
"progress": 1.0,
"dlspeed": 0,
"upspeed": 0,
"priority": 0,
"num_seeds": 0,
"num_complete": 2,
"num_leechs": 0,
"num_incomplete": 32,
"ratio": 0.0,
"eta": "1.01:11:52",
"state": "stalledUP",
"seq_dl": false,
"f_l_piece_prio": false,
"category": "category",
"tags": "",
"super_seeding": false,
"force_start": false,
"save_path": "/data/path/",
"added_on": 1567358333,
"completion_on": 1567366287,
"tracker": "somedata",
"dl_limit": null,
"up_limit": null,
"downloaded": 1244073666,
"uploaded": 0,
"downloaded_session": 0,
"uploaded_session": 0,
"amount_left": 0,
"completed": 1243989552,
"ratio_limit": 1.0,
"seen_complete": 1567408837,
"last_activity": 1567366979,
"time_active": "1.01:00:41",
"auto_tmm": true,
"total_size": 1243989552,
"max_ratio": 1,
"max_seeding_time": 2880,
"seeding_time_limit": 2880
},
{
"hash": "44444444444444",
"name": "dontmatch",
"magnet_uri": "magnet:somedata",
"size": 2996838603,
"progress": 1.0,
"dlspeed": 0,
"upspeed": 0,
"priority": 0,
"num_seeds": 0,
"num_complete": 12,
"num_leechs": 0,
"num_incomplete": 0,
"ratio": 0.06452786606740063,
"eta": "100.00:00:00",
"state": "stalledUP",
"seq_dl": false,
"f_l_piece_prio": false,
"category": "category",
"tags": "",
"super_seeding": false,
"force_start": false,
"save_path": "/data/path/",
"added_on": 1566420155,
"completion_on": 1566424710,
"tracker": "some data",
"dl_limit": null,
"up_limit": null,
"downloaded": 0,
"uploaded": 193379600,
"downloaded_session": 0,
"uploaded_session": 0,
"amount_left": 0,
"completed": 2996838603,
"ratio_limit": -2.0,
"seen_complete": 4294967295,
"last_activity": 1566811636,
"time_active": "10.23:07:42",
"auto_tmm": true,
"total_size": 2996838603,
"max_ratio": -1,
"max_seeding_time": -1,
"seeding_time_limit": -2
}
]
So I want to match all data where the name have some text, so, in Bash I write this but I can't make it work.
Some declaration to start, actually I pass data via arguments, so I use $1:
TXTIWANT="test"
MYJSONDATA= Here I put my JSON data
Then this jq equation that doesn't work for me is this:
RESULTS=$(echo "$MYJSONDATA" | jq --raw-output --arg TOSEARCH "$TXTIWANT" '.[] | select(.name|test("$TOSEARCH.";"i")) .name')
But I always got an error or all data, I think because $TOSEARCH is not expanded.
Maybe there's a better way to search a string inside a value?
What I do wrong?
The right syntax for variable (or filter) interpolation with jq looks like this:
"foo \(filter_or_var) bar"
In your case:
jq --raw-output --arg TOSEARCH "$TXTIWANT" '.[]select(.name|test("\($TOSEARCH).";"i")) .name')
side-note: By convention, environment variables (PAGER, EDITOR, ...) and internal shell variables (SHELL, BASH_VERSION, ...) are capitalized. All other variable names should be lower case.
If (as suggested by the name TXTIWANT and by the example, as well as by the wording of the question) the value of "$TXTIWANT" is supposed to be literal text, then using test is problematic, as test will search for a regular expression.
Since it is not clear from the question why you are adding a period to TOSEARCH, in the remainder of this first section, I will ignore whatever requirement you have in mind regarding that.
So if you simply want to find the .name values that contain $TXTIWANT literally (ignoring case), then you could convert both .name and the value of $TXTIWANT to the same case, and then check for containment.
In jq, ignoring the mysterious ".", this could be done like so:
jq --raw-output --arg TOSEARCH "$TXTIWANT" '
($TOSEARCH|ascii_upcase) as $up
| .[]
| .name
| select(ascii_upcase|index($up))'
Search for non-terminal occurrence of $TXTIWANT ignoring case
If the "." signifies there must be an additional character after $TXTIWANT, then you could just add another select as follows:
($TOSEARCH|length) as $len
| ($TOSEARCH|ascii_upcase) as $up
| .[]
| .name
| (ascii_upcase|index($up)) as $ix
| select($ix)
| select($ix + $len < length)

INSERT on Oracle with error in AdonisJS. What is the right way to do this?

When attempting to insert into Oracle using the Adonis Database, an error is generated for all fields of type string.
I am using AdonisJS 4.1 and Oracle 11.
JSON Submitted:
{
"codepi": "1",
"codoem": "5",
"datcer": "01/01/2007",
"cerapr": "2586",
"unimed": "Size",
"medepi": "P",
"desepi": "Ear Protector 01"
}
Code in Controller:
'use strict'
const Database = use ('Database')
class EstoqueEpiController {
  async store ({response, request}) {
    const data = request.only ([
      'codepi',
      'codoem',
      datcer,
      'cerapr',
      'unimed',
      'medepi',
      'desepi',
    ])
    response = await Database.connection ('oracle')
      .table ('USU_T096EPI')
      .insert ({
        USU_CodEpi: data.codepi,
        USU_CodOem: date.codoem,
        USU_DatCer: data.datcer,
        USU_CerApr: data.cerapr,
        USU_UniMed: data.unimed,
        USU_MedEpi: data.medepi,
        USU_DesEpi: data.desepi,
      })
    Database.close ()
    return response
  }
}
module.exports = EpiController
Error returned:
"message": "insert into \" USU_T096EPI \ "(\" USU_CerApr \ ", \" USU_CodEpi \ ", \" USU_CodOem \ ", \" USU_DatCer \ ", \" USU_DesEpi \ ", \" USU_MedEpi \ ", \ "USU_UniMed \") values ​​(: 1,: 2,: 3,: 4,: 5,: 6,: 7) - ORA-00904: \ "USU_UniMed \": invalid identifier "
ORA-00904 oracle documentation :
You tried to execute a SQL statement that included an invalid column name or the column name is missing. This commonly occurs when you reference an invalid alias in a SELECT statement.
Check the column names
&
I don't know if it has an impact (line 4) :
const data = request.only ([
'codepi',
'codoem',
datcer, < 'datcer'
'cerapr',
'unimed',
'medepi',
'desepi',
])

Assign variable with JSON string

I have a JSON string and I want to assign it to a variable. How can I do that?
my_var = 'test text'
my_json = '{"text": "#{my_var}", "info": "great", "username": "Testuser", "avatar": "green"}'
does not work. Thanks
"#{work}" - allow interpolation
'#{not work}' - not allow
' "#{not work}" ' - not allow
" '#{work}' " - allow interpolation
Ref

sed how to delete text and symbols between

I have sql file with this strings :
(17, 14, '2015-01-20 10:38:40', 211, 'Just text\n\nFrom: Support <support#domain.com>\n Send: 20 Jan 2015 year. 10:33\n To: Admin\n Theme: [TST #0000014] Just text \n\nJust text: Text\n Test text test text\n\nJust text:\n Text\n\n-- \n Test\n Text.\n Many text words 0.84.2', 0, 2);
I want remove all text between symbols \n\ and ', 0, 2);
I want get this result:
(17, 14, '2015-01-20 10:38:40', 211, 'Just text', 0, 2);
How I can do it via sed?
I try use this example - cat file | sed 's/<b>.*</b>//g'. I changed <b> to \n\ and </b> to ', 0, 2); But it dont work, I get error in console
Thanks in advance!
You can try this command
sed 's/\\n\\.*\('\'', 0, 2);\)/\1/g' FileName
Output :
(17, 14, '2015-01-20 10:38:40', 211, 'Just text', 0, 2);
You have to escape the single quotes like '\'' as well as back slash \\
If you can find it, you can replace it with nothing.
So, depending on what you mean by \n and what you need to escape, you want something like sed 's/\\n\\.*'//g'.
Obviously, take care that this is really what you want to replace on every line. It might be worth searching for the target \\n\\.*' first, to make sure it doesn't accidentally grab too much on an unexpected line.

Split complex string with a regex

I have a string:
(3592, -1, 7, N'SUNWopensp-root', N'1.5,REV=10.0.3.2004.12.15.14.19', N'Sun Microsystems, Inc.', N'The OpenJade Group''s SGML and XML parsing tools - platfowrm independent files, / filesystem', N'SunPackage', abc, 83)
I need to split this on commas, but NOT the ones within N' ... ' substrings.
I managed to extract all the content of N' ... ' strings with this:
N\'(.*?)(?:\',|\)|\'\))
But that does not split on commas "3592, -1, 7" and the like, while I cannot split on commas separately because that breaks up N' ... ' strings with commas. The ultimate goal is having all fields split on commas EXCEPT the ones within N' ... ' strings (i.e. N'.. , ..' should be a complete field too).
given_string.scan(/(?:(?:N'.*?')|[^,])+/)
gives:
[
  "(3592",
  " -1",
  " 7",
  " N'SUNWopensp-root'",
  " N'1.5,REV=10.0.3.2004.12.15.14.19'",
  " N'Sun Microsystems, Inc.'",
  " N'The OpenJade Group''s SGML and XML parsing tools - platfowrm independent files",
  " / filesystem'",
  " N'SunPackage'",
  " abc",
  " 83)"
]
This looks unusual as it contains spaces and parentheses, and a ' character inside a word works as a delimiter for the field N'...', but since that is what is mentioned in the question, this is what I give. If this is not exactly what you want, blame the sloppiness of the question.
Since that is close to CSV format, here's one way to parse it.
#remove parens and N's
csv = str.gsub(/^\(|\)$/, "").gsub(/, N/, ",")
CSV.parse_line(csv, {:quote_char => "'"})
Output:
[
"3592",
" -1",
" 7",
"SUNWopensp-root",
"1.5,REV=10.0.3.2004.12.15.14.19",
"Sun Microsystems, Inc.",
"The OpenJade Group's SGML and XML parsing tools - platfowrm independent files,
/ filesystem",
"SunPackage",
" abc",
" 83"
]
Note: This is the only solution that handles the doubled apostrophe correctly.
You already extracted the N' fields, now you can gsub them to become something like X, them you split by comma and substitute the X's with your N' fields. It's not the solution, but works.

Resources