ForgeRock OpenIDM sync for custom user object - opendj

I created a custom user object in ForgeRock openIDM. While creating user in openIDM it is synced in openDJ. But user data is not getting synced when I do update.
When I remove the mappings for openIDM and openDJ and create new mappings, sync works fine for some time and later it stops working. Again I have to remove old mappings and create new mappings. Is there any way to solve this issue? Please help me if some one knows the fix.
My Sync.json mapping for managed/user to system/ldap/account in openIDM looks like this,
{
"enableSync" : true,
"source" : "managed/user",
"onCreate" : {
"source" : "target.dn='uid='+source.loginId+','+source.ou",
"type" : "text/javascript"
},
"name" : "managedUser_sourceLdapAccount",
"target" : "system/ldap/account",
"properties" : [
{
"source" : "givenName",
"target" : "givenName"
},
{
"source" : "description",
"target" : "description"
},
{
"source" : "familyName",
"target" : "familyName"
},
{
"source" : "gender",
"target" : "gender"
},
{
"source" : "mobilePhone",
"target" : "mobilePhone"
},
{
"source" : "emailAddress",
"target" : "mail"
},
{
"source" : "homePhone",
"target" : "homePhone"
},
{
"source" : "workPhone",
"target" : "workPhone"
},
{
"source" : "_id",
"target" : "userUUID"
},
{
"source" : "middleName",
"target" : "sn"
},
{
"source" : "birthDay",
"target" : "birthDay"
},
{
"source" : "country",
"target" : "pcountry"
},
{
"target" : "cn",
"source" : "",
"transform" : {
"source" : "source.displayName||(source.givenName+' '+source.familyName)",
"type" : "text/javascript"
}
},
{
"target" : "userPassword",
"transform" : {
"source" : "openidm.decrypt(source)",
"type" : "text/javascript"
},
"source" : "password",
"condition" : {
"source" : "object.password!=null",
"type" : "text/javascript"
}
},
{
"source" : "displayName",
"target" : "displayName"
}
],
"policies" : [
{
"action" : "CREATE",
"situation" : "ABSENT"
},
{
"action" : "IGNORE",
"situation" : "ALL_GONE"
},
{
"action" : "EXCEPTION",
"situation" : "AMBIGUOUS"
},
{
"action" : "UPDATE",
"situation" : "CONFIRMED"
},
{
"action" : "UPDATE",
"situation" : "FOUND"
},
{
"action" : "EXCEPTION",
"situation" : "FOUND_ALREADY_LINKED"
},
{
"action" : "EXCEPTION",
"situation" : "LINK_ONLY"
},
{
"action" : "UNLINK",
"situation" : "MISSING"
},
{
"action" : "IGNORE",
"situation" : "SOURCE_IGNORED"
},
{
"action" : "EXCEPTION",
"situation" : "SOURCE_MISSING"
},
{
"action" : "IGNORE",
"situation" : "TARGET_IGNORED"
},
{
"action" : "EXCEPTION",
"situation" : "UNASSIGNED"
},
{
"action" : "DELETE",
"situation" : "UNQUALIFIED"
}
]
}

Try adding "onUpdate", on your script, it should look like this:
"onUpdate" : {
"source" : "target.dn='uid='+source.loginId+','+source.ou",
"type" : "text/javascript"
},
In the idm documentation, they state that onCreate is only called when creating a new object.

Related

Using Ingest Attachment Plugin within elastic search index template

I am trying to update my current elastic search schema which is on 1.3.2 to the latest one. For one of the indexes, the current schema looks something like the below:
curl -XPOST localhost:9200/_template/<INDEXNAME> -d '{
"template" : "*-<INDEXNAME_TYPE>",
"index.mapping.attachment.indexed_chars": -1,
"mappings" : {
"post" : {
"properties" : {
"sub" : { "type" : "string" },
"sender" : { "type" : "string" },
"dt" : { "type" : "date", "format" : "EEE, d MMM yyyy HH:mm:ss Z" },
"body" : { "type" : "string"},
"attachments" : {
"type" : "attachment",
"path" : "full",
"fields" : {
"attachments" : {
"type" : "string",
"term_vector" : "with_positions_offsets",
"store" : true
},
"name" : {"store" : "yes"},
"title" : {"store" : "yes"},
"date" : {"store" : "yes"},
"content_type" : {"store" : "yes"},
"content_length" : {"store" : "yes"}
}
}
}
}
}
}'
With my old version of Elastic Search, there is a "mapper-attachment" plugin installed. I am aware that the "mapper-attachment" plugin has been replaced by the "Ingest Attachment Processor" and following the examples from the plugins' website, I do understand their examples where I got to create a pipeline,
PUT _ingest/pipeline/attachment
{
"description" : "Extract attachment information from arrays",
"processors" : [
{
"foreach": {
"field": "attachments",
"processor": {
"attachment": {
"target_field": "_ingest._value.attachment",
"field": "_ingest._value.data",
"indexed_chars" : -1
}
}
}
}
]
}
PUT my-index-000001/_doc/my_id?pipeline=attachment
{
"sub" : "This is a test post",
"sender" : "jane.doe#gmail.com",
"dt" : "Sat, 15 Jan 2022 08:50:00 AEST"
"body" : "Test Body",
"fromaddr": "jane.doe#gmail.com",
"toaddr": "larne.jones#gmail.com",
"attachments" : [
{
"filename" : "ipsum.txt",
"data" : "dGhpcyBpcwpqdXN0IHNvbWUgdGV4dAo="
},
{
"filename" : "test.txt",
"data" : "VGhpcyBpcyBhIHRlc3QK"
}
]
}
How do I make use of this new attachment processor to create the index template I had before?
Note: With my index and schema, for each "post", there will be one or many attachments,
The answer is, unlike the previous version, I cannot use the data type of attachment. So following the example from the elastic.co website and from my own question, the answer is in my question itself.
1st: create the pipeline as in the question
2nd Create the schema [see below]
3rd Insert the data as shown in the question. When inserting the data into the index, use pipeline=attachment as the name of the pipeline and the plugin would parse the given attachment into the schema above
curl -XPOST localhost:9200/_template/<INDEXNAME> -d '{
"template" : "*-<INDEXNAME_TYPE>",
"index.mapping.attachment.indexed_chars": -1,
"mappings" : {
"post" : {
"properties" : {
"sub" : { "type" : "string" },
"sender" : { "type" : "string" },
"dt" : { "type" : "date", "format" : "EEE, d MMM yyyy HH:mm:ss Z" },
"body" : { "type" : "string"},
"attachments" : {
"properties" : {
"attachment" : {
"properties" : {
"content" : {
"type" : "text",
"store": true,
"term_vector": "with_positions_offsets"
},
"content_length" : { "type" : "long" },
"content_type" : { "type" : "keyword" },
"language" : { "type" : "keyword"},
"date" : { "type" : "date", "format" : "EEE, d MMM yyyy HH:mm:ss Z" }
}
},
"content" : { "type": "keyword" },
"name" : { "type" : "keyword" }
}
}
}
}
}
}'

Kibana index pattern mapping conflict

I am tired of reindexing every 2 3 weeks i have to do reindex.
{
"winlogbeat_sysmon" : {
"order" : 0,
"index_patterns" : [
"log-wlb-sysmon-*"
],
"settings" : {
"index" : {
"lifecycle" : {
"name" : "winlogbeat_sysmon_policy",
"rollover_alias" : "log-wlb-sysmon"
},
"refresh_interval" : "1s",
"number_of_shards" : "1",
"number_of_replicas" : "1"
}
},
"mappings" : {
"properties" : {
"thread_id" : {
"type" : "long"
},
"z_elastic_ecs.event.code" : {
"type" : "long"
},
"geoip" : {
"type" : "object",
"properties" : {
"ip" : {
"type" : "ip"
},
"latitude" : {
"type" : "half_float"
},
"location" : {
"type" : "geo_point"
},
"longitude" : {
"type" : "half_float"
}
}
},
"dst_ip_addr" : {
"type" : "ip"
}
}
},
"aliases" : { }
}
}
this is the template i set earlier from then i didn't change anything
in current and previous indices of log-wlb-sysmon has dst_ip_addr has ip field and older indices of log-wlb-sysmon has text field in logstash i didn't see any warnning for this issue

Elasticsearch Suggestions Multi Index and Multi Fields

I have different indexes that contain different fields. And I try to figure out how to get suggests from all indexes and all fields. I know that with GET /_all/_search I can search for results through all indexes. But how can I get all suggestions from all indexes and all fields? Because I want to have a feature like Google "Did you mean: suggests"
So, I tried this out:
GET /_all/_search
{
"query" : {
"multi_match" : {
"query" : "berlin"
}
},
"suggest" : {
"text" : "berlin",
"my-suggest-1" : {
"term" : {
"field" : "street"
}
},
"my-suggest-2" : {
"term" : {
"field" : "city"
}
},
"my-suggest-3" : {
"term" : {
"field" : "description"
}
}
}
}
"my-suggest-1" and "-2" belongs to Index address (see below) and "my-suggest-3" belongs to Index product. I get the following error:
"error" : {
"root_cause" : [
{
"type" : "illegal_argument_exception",
"reason" : "no mapping found for field [street]"
},
{
"type" : "illegal_argument_exception",
"reason" : "no mapping found for field [city]"
},
{
"type" : "illegal_argument_exception",
"reason" : "no mapping found for field [description]"
}
]
}
But if I use only the fields of 1 index I get suggestions, see:
GET /_all/_search
{
"query" : {
"multi_match" : {
"query" : "berlin"
}
},
"suggest" : {
"text" : "berlin",
"my-suggest-1" : {
"term" : {
"field" : "street"
}
},
"my-suggest-2" : {
"term" : {
"field" : "city"
}
}
}
}
Response
...
"failures" : {
...
},
"hits" : {
...
}
"suggest" : {
"my-suggest-1" : [
{
"text" : "berlin",
"offset" : 0,
"length" : 10,
"options" : [
{
"text" : "berliner",
"score" : 0.9,
"freq" : 12
},
{
"text" : "berlinger",
"score" : 0.9,
"freq" : 1
}
]
}
],
"my-suggest-2" : [
{
"text" : "berlin",
"offset" : 0,
"length" : 10,
"options" : []
}
]
...
I don't know how I can get suggests from index address and product? I would be happy if someone can help me.
Index 1 - Address:
"address" : {
"aliases" : {
....
},
"mappings" : {
"dynamic" : "strict",
"properties" : {
"_entity_type" : {
"type" : "keyword",
"index" : false
},
"street" : {
"type" : "text"
},
"city" : {
"type" : "text"
}
}
},
"settings" : {
...
}
}
Index 2 - Product:
"product" : {
"aliases" : {
...
},
"mappings" : {
"dynamic" : "strict",
"properties" : {
"_entity_type" : {
"type" : "keyword",
"index" : false
},
"description" : {
"type" : "text"
}
}
},
"settings" : {
...
}
}
You can add multiple indices to your search. In this case, you need to search over the fields that exist on all indices. So In your case, you need to define all three fields in both of the indices. The fields "street" and "city" are filed in the first index and the field "description" is filled only in the second index. This will be your mapping for the "Address" index. In this index, the "description" field exists but has no data. In the second index, "street" and "city" exist but have no data.
"address" : {
"aliases" : {
....
},
"mappings" : {
"dynamic" : "strict",
"properties" : {
"_entity_type" : {
"type" : "keyword",
"index" : false
},
"street" : {
"type" : "text"
},
"city" : {
"type" : "text"
},
"description" : {
"type" : "text"
}
}
},
"settings" : {
...
}
}

Elasticsearch ILM Error policy does not exists

So this is my index template:
{
"net-stat-template" : {
"order" : 0,
"index_patterns" : [
"net-stat-*"
],
"settings" : {
"index" : {
"lifecycle" : {
"name" : "net-stat",
"rollover_alias" : "net-stat"
},
"routing" : {
"allocation" : {
"require" : {
"data" : "hot"
}
}
},
"refresh_interval" : "15s",
"number_of_shards" : "1",
"number_of_replicas" : "0"
}
},
"mappings" : { },
"aliases" : { }
}
}
and this is my ilm/policy :
"net-stat" : {
"version" : 1,
"modified_date" : "2020-05-10T19:20:18.979Z",
"policy" : {
"phases" : {
"hot" : {
"min_age" : "0ms",
"actions" : {
"rollover" : {
"max_size" : "50gb",
"max_age" : "5d"
},
"set_priority" : {
"priority" : 50
}
}
},
"delete" : {
"min_age" : "10d",
"actions" : {
"delete" : { }
}
},
"warm" : {
"min_age" : "0ms",
"actions" : {
"allocate" : {
"number_of_replicas" : 0,
"include" : { },
"exclude" : { },
"require" : {
"data" : "warm"
}
},
"set_priority" : {
"priority" : 50
}
}
}
}
}
}
but it's doesn't delete indexes with more than 10 days old and when I try GET net-stat-2020.04.20/_ilm/explain it returns:
{
"indices" : {
"net-stat-2020.04.20" : {
"index" : "net-stat-2020.04.20",
"managed" : true,
"policy" : "netstat",
"step_info" : {
"type" : "illegal_argument_exception",
"reason" : "policy [netstat] does not exist"
}
}
}
}
I'm not sure where this netstat came from and also when I try POST /net-stat-2020.04.20/_ilm/retry it returns error :
"type": "illegal_argument_exception",
"reason": "cannot retry an action for an index [net-stat-2020.04.20] that has not encountered an error when running a Lifecycle Policy"
Is there something I'm missing or my setting are somehow wrong?

aggregation fails on nested aggregation field

I've this mapping for fuas type:
curl -XGET 'http://localhost:9201/living_team/_mapping/fuas?pretty'
{
"living_v1" : {
"mappings" : {
"fuas" : {
"properties" : {
"backlogStatus" : {
"type" : "long"
},
"comment" : {
"type" : "string"
},
"dueTimestamp" : {
"type" : "date",
"format" : "strict_date_optional_time||epoch_millis"
},
"matter" : {
"type" : "string"
},
"metainfos" : {
"properties" : {
"category 1" : {
"type" : "string"
},
"key" : {
"type" : "string"
},
"null" : {
"type" : "string"
},
"processos" : {
"type" : "string"
}
}
},
"resources" : {
"properties" : {
"noteId" : {
"type" : "string"
},
"resourceId" : {
"type" : "string"
}
}
},
"status" : {
"type" : "long"
},
"timestamp" : {
"type" : "date",
"format" : "strict_date_optional_time||epoch_millis"
},
"user" : {
"type" : "string",
"index" : "not_analyzed"
}
}
}
}
}
}
I'm trying to perform this aggregation:
curl -XGET 'http://ESNode01:9201/living_team/fuas/_search?pretty' -d '
{
"aggs" : {
"demo" : {
"nested" : {
"path" : "metainfos"
},
"aggs" : {
"key" : { "terms" : { "field" : "metainfos.key" } }
}
}
}
}
'
ES realizes me:
"error" : {
"root_cause" : [ {
"type" : "aggregation_execution_exception",
"reason" : "[nested] nested path [metainfos] is not nested"
} ],
"type" : "search_phase_execution_exception",
"reason" : "all shards failed",
"phase" : "query_fetch",
"grouped" : true,
"failed_shards" : [ {
"shard" : 3,
"index" : "living_v1",
"node" : "HfaFBiZ0QceW1dpqAnv-SA",
"reason" : {
"type" : "aggregation_execution_exception",
"reason" : "[nested] nested path [metainfos] is not nested"
}
} ]
},
"status" : 500
}
Any ideas?
You're missing "type":"nested" from your metainfos mapping.
Should have been:
"metainfos" : {
"type":"nested",
"properties" : {
"category 1" : {
"type" : "string"
},
"key" : {
"type" : "string"
},
"null" : {
"type" : "string"
},
"processos" : {
"type" : "string"
}
}
}

Resources