I am trying to run behat inside a Nginx container in a laravel app.
My docker-compose.yml
version: '2'
services:
load_balancer:
image: tutum/haproxy
links:
- nginx
ports:
- "8888:80"
nginx:
image: andrewmclagan/nginx-hhvm
volumes:
- ./www:/var/www
- ./sites:/etc/nginx/sites-enabled
links:
- mysql:mysql
- redis:predis
networks:
- front-tier
- back-tier
environment:
- APP_ENV=local
- DB_DATABASE=homestead
- DB_PASSWORD=secret
- DB_HOST=mysql
artisan:
image: spiralout/dartisan
volumes:
- ./www:/var/www
links:
- mysql:mysql
- redis:predis
networks:
- front-tier
- back-tier
mysql:
image: spiralout/alpine-mariadb
ports:
- "3306:3306"
volumes:
- ./database:/var/lib/mysql
networks:
- back-tier
composer:
image: spiralout/dcomposer
volumes:
- ./www:/var/www
networks:
- back-tier
nodejs:
image: spiralout/dnodejs
volumes:
- ./www:/var/www
networks:
- back-tier
redis:
image: spiralout/alpine-redis
volumes:
- ./redis-data:/data
networks:
- back-tier
volumes:
www:
sites:
redis-data:
networks:
front-tier:
back-tier:
What I do after is
docker-compose up -d
and then
docker exec -it <nginx_name> \bin\bash
I then try to run behat
vendor/behat/behat/bin/behat
My test simply tries to visit and print the homepage.
FeatureContext.php
use Behat\Behat\Context\Context;
use Behat\Behat\Context\SnippetAcceptingContext;
use Behat\Gherkin\Node\PyStringNode;
use Behat\Gherkin\Node\TableNode;
/**
* Defines application features from the specific context.
*/
class FeatureContext extends Behat\MinkExtension\Context\MinkContext implements Context, SnippetAcceptingContext
{
/**
* Initializes context.
*
* Every scenario gets its own context instance.
* You can also pass arbitrary arguments to the
* context constructor through behat.yml.
*/
public function __construct()
{
}
/**
* #Given that I am on the homepage
*/
public function thatIAmOnTheHomepage()
{
$this->iAmOnHomepage();
$this->visit("https://localhost:80");
$this->printLastresponse();
}
/**
* #Given that name is :arg1
*/
public function thatNameIs($arg1)
{
$this->fillField('name', $arg1);
}
/**
* #Given that email is :arg1
*/
public function thatEmailIs($arg1)
{
$this->fillField('email', $arg1);
}
/**
* #When you try to register
*/
public function youTryToRegister()
{
$this->pressButton('registerButton');
}
/**
* #Then you remain on the homepage
*/
public function youRemainOnTheHomepage()
{
assertUrlRegExp('http://localhost');
}
}
The problems are that I can't access the web app, I only get nginx homepage
And secondly my app trying to visit it just get's page not found.
Any help appreciated.
Solution was to create a new project with the dev-develop flag.
composer create-project laravel/laravel new dev-develop
For some reason Mink-Extension wasn't working correctly for me without the flag
Related
Hello I got a problem I can not receive data which are sent from backend through broadcastingWith() in laravel
Dealers.js
async mounted() {
await this.loadDealers()
this.echo.connector.socket.on('connect', () => {
console.log('Connected')
})
console.log(this.echo)
this.echo.channel('Dealers')
.subscribed(() => {
console.log("Echo connected to Dealers channel!");
})
.listen('Dealers', (data) => {
console.log(data)
})
this.echo.connector.socket.on('disconnect', () => {
console.log('Disconnect')
})
},
docker-compose.yml
version: '3.8'
services:
back-office:
build:
context: ./
dockerfile: docker/etc/nodejs/Dockerfile
working_dir: /app
command: ["npm", "run", "serve"]
ports:
- 3000:8080
#- 8080:8080
environment:
NODE_ENV: "development"
volumes:
- ./back-office:/app
networks:
- default
depends_on:
- cineonic-api-backend
cineonic-api-backend:
env_file: .env
build:
context: ./
dockerfile: docker/etc/php-fpm/Dockerfile
volumes:
- ./api-backend:/var/www
expose:
- 8000
ports:
- "8000:8000"
networks:
default:
aliases:
- ${APP_HOST}
working_dir: /var/www
command: php artisan serve --host=0.0.0.0 --port=8000
depends_on:
- mysql
- redis
- mailhog
echo:
image: node:latest
env_file:
- .env
- echo/.env
volumes:
- ./echo:/app
working_dir: /app
command: bash -c "
rm -f laravel-echo-server.lock
&& npm run start"
ports:
- 6001:6001
depends_on:
- redis
- cineonic-api-backend
networks:
- default
UpdatingDealersLatLonEvent.php
<?php
namespace App\Events;
use Illuminate\Broadcasting\Channel;
use Illuminate\Broadcasting\InteractsWithSockets;
use Illuminate\Broadcasting\PresenceChannel;
use Illuminate\Broadcasting\PrivateChannel;
use Illuminate\Contracts\Broadcasting\ShouldBroadcast;
use Illuminate\Foundation\Events\Dispatchable;
use Illuminate\Queue\SerializesModels;
class UpdatingDealersLatLonEvent implements ShouldBroadcast
{
use Dispatchable, InteractsWithSockets, SerializesModels;
public $data;
public mixed $lon;
public function __construct($data)
{
$this->data = $data;
}
public function broadcastAs()
{
return 'Dealers';
}
public function broadcastOn(): Channel
{
return new Channel('Dealers');
}
public function broadcastWith()
{
return ['data'=> $this->data];
}
}
I receive websockets in frontend but with Empty data
In My ECHO server logs I can see only sending to channel from backend
cineonic-echo-1 | Server ready!
cineonic-echo-1 |
cineonic-echo-1 | Channel: Dealers
cineonic-echo-1 | Event: Dealers
But on frontend I even can not see next messageы
console.log('Connected')
console.log("Echo connected to Dealers channel!");
Only this console.log('Disconnect')
After several hours to try things, I think I'm too much confuse to understand what's going wrong... The title explain perfectly what I'm trying to make working.
My docker-compose.yml:
version: '3'
services:
mysite.test:
build:
context: ./docker/8.1
dockerfile: Dockerfile
args:
WWWGROUP: '${WWWGROUP}'
image: sail-8.1/app
extra_hosts:
- 'host.docker.internal:host-gateway'
ports:
- '${APP_PORT:-80}:80'
- '443:443' //added for test but not working...
- '${HMR_PORT:-8080}:8080'
- '5173:5173' //Vite port
environment:
WWWUSER: '${WWWUSER}'
LARAVEL_SAIL: 1
XDEBUG_MODE: '${SAIL_XDEBUG_MODE:-off}'
XDEBUG_CONFIG: '${SAIL_XDEBUG_CONFIG:-client_host=host.docker.internal}'
volumes:
- '.:/var/www/html'
networks:
- sail
depends_on:
- mysql
- minio
mysql:
image: 'mysql/mysql-server:8.0'
ports:
- '${FORWARD_DB_PORT:-3306}:3306'
environment:
MYSQL_ROOT_PASSWORD: '${DB_PASSWORD}'
MYSQL_ROOT_HOST: "%"
MYSQL_DATABASE: '${DB_DATABASE}'
MYSQL_USER: '${DB_USERNAME}'
MYSQL_PASSWORD: '${DB_PASSWORD}'
MYSQL_ALLOW_EMPTY_PASSWORD: 1
volumes:
- 'sail-mysql:/var/lib/mysql'
- './vendor/laravel/sail/database/mysql/create-testing-database.sh:/docker-entrypoint-initdb.d/10-create-testing-database.sh'
networks:
- sail
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-p${DB_PASSWORD}"]
retries: 3
timeout: 5s
networks:
sail:
driver: bridge
volumes:
sail-nginx:
driver: local
sail-mysql:
driver: local
My vite.config.js:
import { defineConfig } from 'vite';
import laravel from 'laravel-vite-plugin';
export default defineConfig({
server: {
https: true,
host: '0.0.0.0'
},
plugins: [
laravel({
input: [
'resources/css/app.css',
'resources/js/app.js',
],
refresh: true
}),
],
});
My .env:
(...)
APP_URL=https://mysite.test
APP_SERVICE=mysite.test
(...)
The result of that configuration is it working with http://mysite.test but not in https. That return:
This site can’t be reached
mysite.test unexpectedly closed the connection.
Does anyone have a tips for me? 🙏
Thank you!
I managed to get this working with Caddy using the following gist
https://gist.github.com/gilbitron/36d48eb751875bebdf43da0a91c9faec
After all of the above I added the vite port to docker-compose.yml under laravel.test service.
ports:
- '5173:5173'
Also vite itself needs an ssl certificate
vite.config.js:
import { defineConfig } from 'vite';
import laravel from 'laravel-vite-plugin';
import vue from '#vitejs/plugin-vue';
import basicSsl from '#vitejs/plugin-basic-ssl'
export default defineConfig({
server: {
https: true,
host: '0.0.0.0',
hmr: {
host: 'localhost'
},
},
plugins: [
basicSsl(),
laravel({
input: 'resources/js/app.js',
refresh: true,
}),
vue({
template: {
transformAssetUrls: {
base: null,
includeAbsolute: false,
},
},
}),
],
});
For some reason my app generates http links, not https. So I added the following to the boot method of my AppServiceProvider.php
\URL::forceScheme('https');
Using the share command you can run a https tunnel through expose. Your domain will look like
https://[something].expose.dev
Getting a real globally trusted ssl certificate can only be done by a certificate authority like for example Let's Encrypt. They need to verify you own the domain, either by a http challenge or a dns challenge.
As you cannot really own a .test domain, your only option left is sign a certificate yourself and add it to your own computer plus it's root certificate. If you do this only your computer will show the connection as secure though.
I have .NET Core Web API Server and OpenTelemetry collector as docker container. But with my docker-compose, any event from web server does not be sent to otel collector container while it is working using "http://localhost:4318" as otlp endpoint without docker.
My docker-compose.yml:
version: "2.1"
services:
web:
build:
context: ..
dockerfile: ./MyWebServer/Dockerfile
container_name: web
networks:
- test_net
ports:
- "8080:80"
otel:
image: "otel/opentelemetry-collector:latest"
container_name: otel
command: ["--config=/etc/otel-collector-config.yml"]
volumes:
- ./OpenTelemetry/otel-collector-config.yml:/etc/otel-collector-config.yml
networks:
test_net:
ipv4_address: 172.23.10.4
ports:
- "4318:4318"
networks:
test_net:
ipam:
config:
- subnet: 172.23.0.0/16
My Program.cs:
using System.Diagnostics;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
const string serviceName = "MyWebServer";
const string serviceVersion = "1.0.0";
var builder = WebApplication.CreateBuilder(args);
builder.Services.AddControllers();
builder.Services.AddOpenTelemetryTracing(tracerProviderBuilder =>
{
tracerProviderBuilder
.AddOtlpExporter(opt =>
{
opt.Endpoint = new Uri("http://172.23.10.4:4318");
opt.Protocol = OtlpExportProtocol.HttpProtobuf;
})
.AddSource(serviceName)
.SetResourceBuilder(
ResourceBuilder.CreateDefault()
.AddService(serviceName: serviceName, serviceVersion: serviceVersion))
.AddHttpClientInstrumentation()
.AddAspNetCoreInstrumentation()
.AddSqlClientInstrumentation();
});
var app = builder.Build();
app.UseRouting();
app.UseEndpoints(endpoints => { endpoints.MapControllers(); });
app.Run();
You dont need to use IPaddress here. you can use service name and docker will resolve it automatically for you.
opt.Endpoint = new Uri("http://172.23.10.4:4318");
so in your case it should be
opt.Endpoint = new Uri("http://otel:4318");
I'd like to use Traefik as a reverse proxy behind a Ratchet WebSocket server (3rd option suggested in deploy section).
The goal is to manage HTTPS and wss with the reverse proxy while keeping a simple HTTP and ws on the Ratchet server.
My WebSocket server exposes on port 8080, like in this example:
public function run()
{
$loop = React\EventLoop\Factory::create();
$pusher = new Pusher();
// Listen for the web server to make a ZeroMQ push after an AJAX request
$context = new React\ZMQ\Context($loop);
$pull = $context->getSocket(ZMQ::SOCKET_PULL);
$pull->bind('tcp://0.0.0.0:5555');
$pull->on('message', array($pusher, 'onEntry'));
// Set up our WebSocket server for clients wanting real-time updates
$webSock = new React\Socket\Server('0.0.0.0:8443', $loop);
$webServer = new IoServer(
new HttpServer(
new WsServer(
new WampServer(
$pusher
)
)
),
$webSock
);
$loop->run();
}
Following this post, I have been able to configure HTTPS via Traefik.
Here is my simplified docker-compose.yml:
nginx:
image: wodby/nginx:$NGINX_TAG
container_name: "${PROJECT_NAME}_nginx"
depends_on:
- php
environment:
NGINX_STATIC_OPEN_FILE_CACHE: "off"
NGINX_ERROR_LOG_LEVEL: debug
NGINX_BACKEND_HOST: php
NGINX_SERVER_ROOT: /var/www/html/webroot
NGINX_VHOST_PRESET: $NGINX_VHOST_PRESET
volumes:
- ./html:/var/www/html:cached
labels:
- "traefik.http.routers.${PROJECT_NAME}_nginx.rule=Host(`${PROJECT_BASE_URL}`)"
- "traefik.http.middlewares.${PROJECT_NAME}_nginx_https.redirectscheme.scheme=https"
- "traefik.http.routers.${PROJECT_NAME}_nginx.entrypoints=web"
- "traefik.http.routers.${PROJECT_NAME}_nginx.middlewares=${PROJECT_NAME}_nginx_https#docker"
- "traefik.http.routers.${PROJECT_NAME}_nginx_https.rule=Host(`${PROJECT_BASE_URL}`)"
- "traefik.http.routers.${PROJECT_NAME}_nginx_https.tls=true"
- "traefik.http.routers.${PROJECT_NAME}_nginx_https.entrypoints=websecure"
php:
build:
context: .
dockerfile: docker/php-fpm/Dockerfile
container_name: "${PROJECT_NAME}_php"
volumes:
- ./html:/var/www/html
labels:
- "traefik.http.routers.php.rule=Host(`${PROJECT_BASE_URL}`)"
traefik:
image: traefik:v2.0
container_name: "${PROJECT_NAME}_traefik"
command:
- "--api.insecure=true"
- "--entrypoints.web.address=:80"
- "--entrypoints.websecure.address=:443"
- "--providers.docker=true"
- "--providers.file.filename=/etc/traefik/dynamic_conf/config.yml"
- "--providers.file.watch=true"
ports:
- "80:80"
- "443:443"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
- "./docker/traefik/config.yml:/etc/traefik/dynamic_conf/config.yml" # used to define the certificate path
- "./docker/certs:/tools/certs"
However, how can I now forward HTTPS/wss to HTTP/ws to the php service?
I am working on a Laravel project. I started writing browser tests using Dusk. I am using docker as my development environment. When I run the tests, I am getting the "Connection refused" error.
This is my docker-compose.yaml file.
version: '3'
services:
apache:
container_name: res_apache
image: webdevops/apache:ubuntu-16.04
environment:
WEB_DOCUMENT_ROOT: /var/www/public
WEB_ALIAS_DOMAIN: restaurant.localhost
WEB_PHP_SOCKET: php-fpm:9000
volumes: # Only shared dirs to apache (to be served)
- ./public:/var/www/public:cached
- ./storage:/var/www/storage:cached
networks:
- res-network
ports:
- "8081:80"
- "443:443"
php-fpm:
container_name: res_php
image: jguyomard/laravel-php:7.3
volumes:
- ./:/var/www/
- ./ci:/var/www/ci:cached
- ./vendor:/var/www/vendor:delegated
- ./storage:/var/www/storage:delegated
- ./node_modules:/var/www/node_modules:cached
- ~/.ssh:/root/.ssh:cached
- ./composer.json:/var/www/composer.json
- ./composer.lock:/var/www/composer.lock
- ~/.composer/cache:/root/.composer/cache:delegated
networks:
- res-network
db:
container_name: res_db
image: mariadb:10.2
environment:
MYSQL_ROOT_PASSWORD: root
MYSQL_DATABASE: restaurant
MYSQL_USER: restaurant
MYSQL_PASSWORD: secret
volumes:
- res-data:/var/lib/mysql
networks:
- res-network
ports:
- "33060:3306"
chrome:
image: robcherry/docker-chromedriver
networks:
- res-network
environment:
CHROMEDRIVER_WHITELISTED_IPS: ""
CHROMEDRIVER_PORT: "9515"
ports:
- 9515:9515
cap_add:
- "SYS_ADMIN"
networks:
res-network:
driver: "bridge"
volumes:
res-data:
driver: "local"
The following is the driver function in DuskTestCase.php class
/**
* Create the RemoteWebDriver instance.
*
* #return \Facebook\WebDriver\Remote\RemoteWebDriver
*/
protected function driver()
{
$options = (new ChromeOptions)->addArguments([
'--disable-gpu',
'--headless',
'--window-size=1920,1080',
]);
return RemoteWebDriver::create(
'http://localhost:9515', DesiredCapabilities::chrome()->setCapability(
ChromeOptions::CAPABILITY, $options
)
);
}
I run the tests running the following command.
docker-compose exec php-fpm php artisan dusk
Then I get the following error.
Facebook\WebDriver\Exception\WebDriverCurlException: Curl error thrown for http POST to /session with params: {"capabilities":{"firstMatch":[{"browserName":"chrome","goog:chromeOptions":{"args":["--disable-gpu","--headless","--windo
w-size=1920,1080"]}}]},"desiredCapabilities":{"browserName":"chrome","platform":"ANY","chromeOptions":{"args":["--disable-gpu","--headless","--window-size=1920,1080"]}}}
Failed to connect to localhost port 9515: Connection refused
/var/www/vendor/php-webdriver/webdriver/lib/Remote/HttpCommandExecutor.php:331
What is wrong with my configuration and how can I fix it?