Mongodb connections time out from openwhisk action - openwhisk

I am running openwhisk via jar on my development box. I can connect to mongo from node and from the command line. However, it times out when I try to connect from an openwhisk action.
curl -u $tok "http://172.17.0.1:3233/api/v1/namespaces/_/actions/openwhiskmongo?blocking=true&result=true" -X POST -H "Content-Type: application/json" -d '{"username":"jonboy"}' {"error":{"message":"Operation users.findOne()buffering timed out after 10000ms","name":"MongooseError","stack":"MongooseError: Operationusers.findOne() buffering timed out after 10000ms\n at Timeout.<anonymous> (/nodejsAction/hTFYlZze/node_modules/mongoose/lib/drivers/node-mongodb-native/collection.js:153:23)\n at listOnTimeout (internal/timers.js:557:17)\n at processTimers (internal/timers.js:500:7)"}}
Mongo connects fine from the command line.
mongo mongodb://172.17.0.1
MongoDB shell version v4.4.17
connecting to: mongodb://172.17.0.1:27017/?compressors=disabled&gssapiServiceName=mongodb
Implicit session: session { "id" : UUID("9777739e-0a81-4ef4-852a-a19a515ac376") }
MongoDB server version: 4.4.17
---
The server generated these startup warnings when booting:
2022-11-21T21:32:14.551-05:00: Using the XFS filesystem is strongly recommended with the WiredTiger storage engine. See http://dochub.mongodb.org/core/prodnotes-filesystem
2022-11-21T21:32:16.601-05:00: Access control is not enabled for the database. Read and write access to data and configuration is unrestricted
---
>
172.17.0.1 should be accessible via docker.
ifconfig
docker0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.17.0.1 netmask 255.255.0.0 broadcast 172.17.255.255
inet6 fe80::42:efff:fecc:f630 prefixlen 64 scopeid 0x20<link>
ether 02:42:ef:cc:f6:30 txqueuelen 0 (Ethernet)
RX packets 153 bytes 11410 (11.4 KB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 278 bytes 7650054 (7.6 MB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
Here's my action code.
openmongowhisk.js
const mongoose = require('mongoose');
async function main(args) {
require('./config/database');
require('./models/user');
const User = mongoose.model('User');
const name = args && args.username;
var result = 'not initialized';
var user = await User.findOne({username: name}).exec();
if (user) {
result = 'found';
} else {
result = 'not found';
}
return { user: user, result: result };
}
exports.main = main;
database.js
const mongoose = require('mongoose');
const devConnection = "mongodb://172.17.0.1";
mongoose.connect(devConnection, {
useNewUrlParser: true,
useUnifiedTopology: true
});
mongoose.connection.on('connected', () => {
console.log('Database connected');
});
user.js
const mongoose = require('mongoose');
const UserSchema = new mongoose.Schema({
id: String,
username: String,
});
mongoose.model('User', UserSchema);
Edit:add docker network info
[
{
"Name": "bridge",
"Id": "441a18a8d66ef7e29aaae1f2d5fee8b5e2f2edf39200b54a91f0df8b17407a46",
"Created": "2022-11-19T09:03:56.78026462-05:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.17.0.0/16",
"Gateway": "172.17.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"13d0e2d487f03d2c806a917199857eefd77d9fb216eb9c68647ec5d7e93810e0": {
"Name": "wsk0_243_prewarm_nodejs14",
"EndpointID": "938de84937bfe214486713b4490645145a76f85cbff06f7ac49904beba9d8699",
"MacAddress": "02:42:ac:11:00:03",
"IPv4Address": "172.17.0.3/16",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.bridge.default_bridge": "true",
"com.docker.network.bridge.enable_icc": "true",
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "docker0",
"com.docker.network.driver.mtu": "1500"
},
"Labels": {}
}
]```
Any ideas?

I disabled the proxy settings on my computer, and now it is working. Can't believe I didn't think of trying that earlier.

Related

How to register multiple service instances in consul on one machine

I have a consul running locally on a dev machine. I also have one golang service running on two different ports on the same machine. Is there a way to register them as one service but two instances in consul using golang API (for example, is it possible to specify the node name when registering)?
Here's a very basic example which registers two instances of a service named my-service. Each instance is configured to listen on a different port, 8080 and 8081 respectively.
The key thing to note is that the service instances are also registered with a unique service ID in order to disambiguate between instance A and instance B of my-service which are running on the same agent.
package main
import (
"fmt"
"github.com/hashicorp/consul/api"
)
func main() {
// Get a new client
client, err := api.NewClient(api.DefaultConfig())
if err != nil {
panic(err)
}
service_name := "my-service"
service_ports := [2]int{8080, 8081}
for idx, port := range service_ports {
svc_reg := &api.AgentServiceRegistration{
ID: fmt.Sprintf("%s-%d", service_name, idx),
Name: service_name,
Port: port,
}
client.Agent().ServiceRegister(svc_reg)
}
}
After running go mod init consul-register (or any module name), and executing the code with go run main.go, you can see the service has been registered in the catalog.
$ consul catalog services
consul
my-service
Both service instances are correctly being returned for service discovery queries over DNS or HTTP.
$ dig #127.0.0.1 -p 8600 -t SRV my-service.service.consul +short
1 1 8080 b1000.local.node.dc1.consul.
1 1 8081 b1000.local.node.dc1.consul.
$ curl localhost:8500/v1/health/service/my-service
[
{
"Node": {
"ID": "11113853-a8e0-5787-7482-538078db855a",
"Node": "b1000.local",
"Address": "127.0.0.1",
"Datacenter": "dc1",
"TaggedAddresses": {
"lan": "127.0.0.1",
"lan_ipv4": "127.0.0.1",
"wan": "127.0.0.1",
"wan_ipv4": "127.0.0.1"
},
"Meta": {
"consul-network-segment": ""
},
"CreateIndex": 11,
"ModifyIndex": 13
},
"Service": {
"ID": "my-service-0",
"Service": "my-service",
"Tags": [],
"Address": "",
"Meta": null,
"Port": 8080,
"Weights": {
"Passing": 1,
"Warning": 1
},
"EnableTagOverride": false,
"Proxy": {
"Mode": "",
"MeshGateway": {},
"Expose": {},
"TransparentProxy": {}
},
"Connect": {},
"CreateIndex": 14,
"ModifyIndex": 14
},
"Checks": [
{
"Node": "b1000.local",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"ServiceTags": [],
"Type": "",
"Definition": {},
"CreateIndex": 11,
"ModifyIndex": 11
}
]
},
{
"Node": {
"ID": "11113853-a8e0-5787-7482-538078db855a",
"Node": "b1000.local",
"Address": "127.0.0.1",
"Datacenter": "dc1",
"TaggedAddresses": {
"lan": "127.0.0.1",
"lan_ipv4": "127.0.0.1",
"wan": "127.0.0.1",
"wan_ipv4": "127.0.0.1"
},
"Meta": {
"consul-network-segment": ""
},
"CreateIndex": 11,
"ModifyIndex": 13
},
"Service": {
"ID": "my-service-1",
"Service": "my-service",
"Tags": [],
"Address": "",
"Meta": null,
"Port": 8081,
"Weights": {
"Passing": 1,
"Warning": 1
},
"EnableTagOverride": false,
"Proxy": {
"Mode": "",
"MeshGateway": {},
"Expose": {},
"TransparentProxy": {}
},
"Connect": {},
"CreateIndex": 15,
"ModifyIndex": 15
},
"Checks": [
{
"Node": "b1000.local",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"ServiceTags": [],
"Type": "",
"Definition": {},
"CreateIndex": 11,
"ModifyIndex": 11
}
]
}
]

Does consul sidecars support websockets upstream?

Is it possible to configure a consul agent sidecar with a websocket upstream? I have tried the following configuration but it doesn't work:
{
"bind_addr": "172.17.0.2",
"data_dir": "/consul/data",
"datacenter": "dc1",
"node_id" : "98dc3bf4-a364-46d9-8b72-624963064ab2",
"node_name": "socket-client-agent",
"leave_on_terminate": true,
"ports": [
{
"grpc": 8502
}
],
"server": false,
"service": [
{
"address": "172.17.0.3",
"connect": [
{
"sidecar_service": [
{
"checks": [
{
"interval": "10s",
"name": "socket-client-sidecar-proxy",
"tcp": "172.17.0.3:21000"
}
],
"port": 21000,
"proxy": [
{
"config": [
{
"bind_address": "0.0.0.0",
"bind_port": 21000,
"protocol": "tcp"
}
],
"upstreams": [
{
"destination_name": "sockets-server",
"local_bind_port": 5001,
"config": {
"protocol": "tcp"
}
}
]
}
]
}
]
}
],
"id": "socket-client-0",
"name": "socket-client",
"port": 5000
}
],
"ui_config": [
{
"enabled": false
}
]
}
From the configuration I'm trying to connect to sockets-server service which uses websockets protocol. I'm using envoy as sidecar proxy.
Currently Consul does not configure Envoy correctly to support WebSocket upgrades. This GitHub issue has more detail on the issue, and potential fix – https://github.com/hashicorp/consul/issues/9473.

Failure to connect to proxy "Certificate signed by unknown authority"

I'm attempting to connect to a CloudSQL instance via a cloudsql-proxy container on my Kubernetes deployment. I have the cloudsql credentials mounted and the value of GOOGLE_APPLICATION_CREDENTIALS set.
However, I'm still receiving the following error in my logs:
2018/10/08 20:07:28 Failed to connect to database: Post https://www.googleapis.com/sql/v1beta4/projects/[projectID]/instances/[appName]/createEphemeral?alt=json&prettyPrint=false: oauth2: cannot fetch token: Post https://oauth2.googleapis.com/token: x509: certificate signed by unknown authority
My connection string looks like this:
[dbUser]:[dbPassword]#cloudsql([instanceName])/[dbName]]?charset=utf8&parseTime=True&loc=Local
And the proxy dialer is shadow-imported as:
_ github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/dialers/mysql
Anyone have an idea what might be missing?
EDIT:
Deployment Spec looks something like this (JSON formatted):
{
"replicas": 1,
"selector": {
...
},
"template": {
...
"spec": {
"containers": [
{
"image": "[app-docker-imager]",
"name": "...",
"env": [
...
{
"name": "MYSQL_PASSWORD",
...
},
{
"name": "MYSQL_USER",
...
},
{
"name": "GOOGLE_APPLICATION_CREDENTIALS",
"value": "..."
}
],
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
}
],
"volumeMounts": [
{
"mountPath": "/secrets/cloudsql",
"name": "[secrets-mount-name]",
"readOnly": true
}
]
},
{
"command": [
"/cloud_sql_proxy",
"-instances=...",
"-credential_file=..."
],
"image": "gcr.io/cloudsql-docker/gce-proxy:1.11",
"name": "...",
"ports": [
{
"containerPort": 3306,
"protocol": "TCP"
}
],
"volumeMounts": [
{
"mountPath": "/secrets/cloudsql",
"name": "[secrets-mount-name]",
"readOnly": true
}
]
}
],
"volumes": [
{
"name": "[secrets-mount-name]",
"secret": {
"defaultMode": 420,
"secretName": "[secrets-mount-name]"
}
}
]
}
}
}
The error message indicates that your client is not able to trust the certificate of https://www.googleapis.com. There are two possible causes for this:
Your client does not know what root certificates to trust. The official cloudsql-proxy docker image includes root certificates, so if you are using that image, this is not your problem. If you are not using that image, you should (or at least install ca certificates in your image).
Your outbound traffic is being intercepted by a proxy server that is using a different, untrusted, certificate. This might be malicious (in which case you need to investigate who is intercepting your traffic). More benignly, you might be in a organization using an outbound proxy to inspect traffic according to policy. If this is the case, you should build a new docker image that includes the CA certificate used by your organization's outbound proxy.

way to properly setup Laravel-echo authentication for private channels

I tried a lot but still can not setup properly LaravelEcho. Actually i unable to setup authentication step.The private channel are not authenticating. here is larave-echo CLI result
Boostrap.js
/**
* Echo exposes an expressive API for subscribing to channels and listening
* for events that are broadcast by Laravel. Echo and event broadcasting
* allows your team to easily build robust real-time web applications.
*/
import Echo from "laravel-echo"
// window.Pusher = require('pusher-js');
window.io = require('socket.io-client');
window.Echo = new Echo({
broadcaster: 'socket.io',
host: 'http://localhost:6001'
});
Layout.blade.php
Echo.private('App.User.'+id)
.notification((notification) => {
console.log(notification);
});
laravel-echo-server.json
{
"authHost": "http://bigplan.com",
"authEndpoint": "/broadcasting/auth",
"clients": [
{
"appId": "*********",
"key": "*************************"
}
],
"database": "redis",
"databaseConfig": {
"redis": {
"port": "6379",
"host": "localhost"
},
"sqlite": {
"databasePath": "/database/laravel-echo-server.sqlite"
}
},
"devMode": false,
"host": "localhost",
"port": "6001",
"protocol": "http",
"referrers": [],
"sslCertPath": "",
"sslKeyPath": "",
"verifyAuthPath": true,
"verifyAuthServer": false
}
You might be missing the routes to authenticate, by default it's located in routes/channel.php. The broadcast callback should return true if it's authenticated.
Here is an example :
Broadcast::channel('notify.{employeeId}', function ($user, $employeeId) {
$employee = $user->employee;
if (!$employee){
return false;
}
return $employee->id==$employeeId;
});
notify.{employeeId} is your private channel name that you have defined in your Event class/
$employeeId here is the one that you send from echo client, it can be anything, in my case, it's employee ID.
$user is the current authenticated user.
Docs:
https://laravel.com/docs/5.5/broadcasting#authorizing-channels

Gossip encryption not working fine

I have created a master token using the below command:
$ consul keygen
G74SM8N9NUc4meaHfA7CFg==
Then, I bootstrapped the server with the following config.json:
{
"server": true,
"datacenter": "consul",
"data_dir": "/var/consul",
"log_level": "INFO",
"enable_syslog": true,
"disable_update_check": true,
"client_addr": "0.0.0.0",
"bootstrap": true,
"leave_on_terminate": true,
"encrypt": "G74SM8N9NUc4meaHfA7CFg=="
}
The output of the bootstrap server is as follows:
Node name: 'abcd'
Datacenter: 'consul'
Server: true (bootstrap: true)
Client Addr: 0.0.0.0 (HTTP: 8500, HTTPS: -1, DNS: 8600, RPC: 8400)
Cluster Addr: x.x.x.x (LAN: 8301, WAN: 8302)
Gossip encrypt: true, RPC-TLS: false, TLS-Incoming: false
Atlas: <disabled>
Then, I added a new server as a regular consul server which has the following config.json:
{
"server": true,
"datacenter": "consul",
"data_dir": "/var/consul",
"log_level": "INFO",
"enable_syslog": true,
"disable_update_check": true,
"client_addr": "0.0.0.0",
"bootstrap": false,
"leave_on_terminate": true,
"ui_dir": "/usr/local/bin/consul_ui",
"check_update_interval": "0s",
"ports": {
"dns": 8600,
"http": 8500,
"https": 8700,
"rpc": 8400,
"serf_lan": 8301,
"serf_wan": 8302,
"server": 8300
},
"dns_config": {
"allow_stale": true,
"enable_truncate": true,
"only_passing": true,
"max_stale": "02s",
"node_ttl": "30s",
"service_ttl": {
"*": "10s"
}
},
"advertise_addr": "y.y.y.y",
"encrypt": "G74SM8N9NUc4meaHfA7CFg==",
"retry_join": [
"x.x.x.x",
"y.y.y.y"
]
}
Note: Here, x.x.x.x is IP address of the bootstrap server, y.y.y.y is IP address of the regular server.
For testing purpose, I changed the encrypt key on one of the servers. And, when I do consul members, I can still see the all IPs which means that the servers are still able to communicate even with the different encrypt key. It seems that the gossip encryption is not working fine.
A Consul instance will cache the initial key and re-use it. It is stored in the serf folder in the file local.keyring.
This is counter-intuitive, but it is documented at least in one place together with the encrypt option.
You'll need to delete this file and restart Consul in order to get the expected behaviour.

Resources