I'm trying to run this on a Windows machine and keep getting a timeout. I have Selenium running but it seems like its not able to fulfill connecting to the URL.
var client = require('webdriverjs').remote({
desiredCapabilities: {
browserName: 'chrome'
},
logLevel: 'verbose'
});
var expect = require('chai').expect;
describe('Test example.com', function(){
before(function(done) {
client.init().url('http://example.com', done);
});
describe('Check homepage', function(){
it('should see the correct title', function(done) {
client.getTitle(function(err, title){
expect(title).to.have.string('Example Domain');
done();
});
});
});
after(function(done) {
client.end();
done();
});
});
This is the error I receive:
1) Test example.com "before all" hook:
Error: timeout of 10000ms exceeded
at null.<anonymous> (C:\Users\sandy_000\AppData\Roaming\npm\node_modules\m
ocha\lib\runnable.js:139:19)
at Timer.listOnTimeout [as ontimeout] (timers.js:110:15)
I found this website: http://aboutcode.net/2013/12/02/automating-chrome-on-windows-with-javascript-using-selenium-webdriverjs.html
I needed to download the associating executable driver and start selenium with it like so:
c:\myproject>java -jar selenium-server-standalone-2.35.0.jar -Dwebdriver.chrome.driver=chromedriver.exe
Related
My Protractor test fails with below error
Error: Timeout - Async callback was not invoked within timeout specified by jasmine.DEFAULT_TIMEOUT_INTERVAL.
This is my spec file.
let common=require('./Objects/common.js')
describe('Cloud testing', function() {
it('Cloudtest1', function() {
let EC=protractor.ExpectedConditions;
browser.waitForAngularEnabled(false);
browser.get(common.loginURL);
common.txtUserName.sendKeys('aqaasdas#hkm.com');
common.txtPword.sendKeys('asdasd##$');
common.btnLogin.click();
browser.wait(EC.visibilityOf(element(by.xpath("//a[#class='btn btn-success']"))));
element(by.xpath("//a[#class='btn btn-success']")).click();
common.btnCrtPcr.click();
});
});
Any help is appreciated, I tried answers to similar questions posted here but nothing works for me. I'm running the latest Protractor and Chrome versions.
Have you tried by asyn test? Sometimes even backend response or browser performance may affect the test cases.
Refer: https://jasmine.github.io/2.0/introduction.html#section-Asynchronous_Support
describe("Your module", function() {
var originalTimeout;
beforeEach(function() {
originalTimeout = jasmine.DEFAULT_TIMEOUT_INTERVAL;
jasmine.DEFAULT_TIMEOUT_INTERVAL = 10000;
});
it("takes a long time", function(done) {
let EC=protractor.ExpectedConditions;
browser.waitForAngularEnabled(false);
browser.get(common.loginURL);
common.txtUserName.sendKeys('aqaasdas#hkm.com');
common.txtPword.sendKeys('asdasd##$');
common.btnLogin.click();
browser.wait(EC.visibilityOf(element(by.xpath("//a[#class='btn btn-success']"))));
element(by.xpath("//a[#class='btn btn-success']")).click();
common.btnCrtPcr.click();
});
afterEach(function() {
jasmine.DEFAULT_TIMEOUT_INTERVAL = originalTimeout;
});
});
Your callback was later than your Jasmins default timout time. You might want to use async/await in your it block even though you have used expected condition.
it('Cloudtest1', async () => {
let EC=protractor.ExpectedConditions;
browser.waitForAngularEnabled(false);
browser.get(common.loginURL);
await common.txtUserName.sendKeys('aqaasdas#hkm.com');
await common.txtPword.sendKeys('asdasd##$');
await common.btnLogin.click();
browser.wait(EC.visibilityOf(element(by.xpath("//a[#class='btn btn-success']"))));
element(by.xpath("//a[#class='btn btn-success']")).click();
await common.btnCrtPcr.click();
});
Suggestion: Perform action in page object or before each/all. Use it block for expect statement.
example:
describe('When user logged Into account ', () => {
beforeAll(async () => {
await browser.waitForAngularEnabled(false);
await loginPO.login();
});
it('Browser title should be displayed.', async () => {
expect(await browser.getTitle()).toBe('test');
});
});
I'm testing an Angular App with Cypress.
I'm running my test with the Cypress dashboard, that I open using this command:
$(npm bin)/cypress open
I'm calling an API with my test: it works.
But when I change my code, Cypress will rerun the code which will cause my first (and only my first test) to fail. The request calling the API is aborted.
The only way to make it work again is to manually end the process, then start it again.
Has anyone got an idea what is causing this strange behaviour?
Here is my test code:
beforeEach(() => {
cy.visit('/');
cy.server();
cy.route('POST', `myUrl`).as('apiCall');
});
it('should find a doctor when user searches doctor with firstName', () => {
cy.get('#myInput').type('foo');
cy.get('#submitButton]').click();
cy.wait('#apiCall').then((xhr) => {
expect(xhr.status).to.eq(200);
});
});
You can prepare XHR stub like this:
describe('', () => {
let requests = {}; // for store sent request
beforeEach(() => {
cy.server({ delay: 500 }); // cypress will answer for mocked xhr after 0.5s
cy.route({
url: '<URL>',
method: 'POST',
response: 'fixture:response',
onRequest: ({ request }) => {
Object.assign(requests, { someRequest: request.body }); // it has to be mutated
},
});
});
And then in test:
it('', () => {
cy
.doSomeSteps()
.assertEqual(requests, 'someRequest', { data: 42 })
});
There is 2 advantages of this solution: first 0.5s delay make test more realistic because real backend doesn't answer immediately. Second is you can verify if application will send proper payload after doSomeActions() step.
assertEqual is just util to make assertion more readable
Cypress.Commands.add('assertEqual', (obj, key, value) =>
cy
.wrap(obj)
.its(key)
.should('deep.equal', value)
);
I have 2 describe blocks in a spec file.
First, describe visits xyz.com and Second, describe visits abc.com
And I need these 2 describe in one spec only. The wired behavior I see is it runs the tests smoothly but after visiting abc.com from 2nd describe it starts running 1st describe again. An infinite loop of tests
var signedOutArtifactID = null;
describe('WEB APP E2E tests', function() {
var token = null;
before(function() {
cy.visit('/');
// Login
cy.get('#username')
.type(auth.geneticist.username);
cy.get('#password')
.type(auth.geneticist.password);
cy.get('button')
.contains('Login')
.click()
.should(function() {
token = localStorage.getItem('token');
expect(token).not.to.be.null;
});
});
beforeEach(function() {
localStorage.setItem('token', token);
cy.contains('Logout')
.should('exist');
expect(localStorage.getItem('token'));
});
it('should land on home page', function() {
cy.url()
.should('include', '/home');
});
it('should save and generate and end up on signout page', function() {
cy.contains('Save and Generate Report')
.click();
cy.url()
.should('include', '/sign-out');
});
it('should signout and send successfully', function() {
cy.url()
.should(function(currentURL) {
signedOutArtifactID = currentURL.match(/2-([0-9]+)/)[0];
expect(signedOutArtifactID).not.to.be.null;
});
// Make sure interpretation was updated
cy.get('.card-body pre')
.should('contain', 'test interpretation added by cypress');
cy.contains('Sign Out and Send')
.click();
cy.contains('Yes, sign out and send')
.click();
});
});
describe('2nd WEB APP E2E tests', function() {
before(function () {
cy.visit({
url:`https://webappurl.com/search?scope=All&query=${signedOutArtifactID}`,
failOnStatusCode: false
})
})
it('Review Completed step in clarity', async () => {
cy.get('#username').type(auth.clarity_creds.username)
cy.get('#password').type(auth.clarity_creds.password)
cy.get('#sign-in').click()
cy.get('.result-name').click()
cy.get('.view-work-link').contains('QWERTYU-IDS').click()
cy.get('.download-file-link ')
.should(($downloads) => {
expect($downloads).to.have.length(2)
})
});
});
describe defines a test suite. You can only have one top-level test suite per file, and only one domain per test.
I would just change your describes to contexts and wrap both contexts in a single describe, like so:
var signedOutArtifactID = null;
describe('e2e tests', function() {
context('WEB APP E2E tests', function() {
var token = null;
before(function() {
cy.visit('/');
// Login
cy.get('#username')
.type(auth.geneticist.username);
cy.get('#password')
.type(auth.geneticist.password);
cy.get('button')
.contains('Login')
.click()
.should(function() {
token = localStorage.getItem('token');
expect(token).not.to.be.null;
});
});
beforeEach(function() {
localStorage.setItem('token', token);
cy.contains('Logout')
.should('exist');
expect(localStorage.getItem('token'));
});
it('should land on home page', function() {
cy.url()
.should('include', '/home');
});
it('should save and generate and end up on signout page', function() {
cy.contains('Save and Generate Report')
.click();
cy.url()
.should('include', '/sign-out');
});
it('should signout and send successfully', function() {
cy.url()
.should(function(currentURL) {
signedOutArtifactID = currentURL.match(/2-([0-9]+)/)[0];
expect(signedOutArtifactID).not.to.be.null;
});
// Make sure interpretation was updated
cy.get('.card-body pre')
.should('contain', 'test interpretation added by cypress');
cy.contains('Sign Out and Send')
.click();
cy.contains('Yes, sign out and send')
.click();
});
});
context('2nd WEB APP E2E tests', function() {
before(function () {
cy.visit({
url:`https://webappurl.com/search?scope=All&query=${signedOutArtifactID}`,
failOnStatusCode: false
})
})
it('Review Completed step in clarity', async () => {
cy.get('#username').type(auth.clarity_creds.username)
cy.get('#password').type(auth.clarity_creds.password)
cy.get('#sign-in').click()
cy.get('.result-name').click()
cy.get('.view-work-link').contains('QWERTYU-IDS').click()
cy.get('.download-file-link ')
.should(($downloads) => {
expect($downloads).to.have.length(2)
})
});
});
})
There should be one describe block per suite (specification file). Therefore, when I need to wrap multiple related tests in one specification file I use context. Also, the following is what cypress documentation says:
The test interface, borrowed from Mocha, provides describe(),
context(), it() and specify().
context() is identical to describe() and specify() is identical to
it(), so choose whatever terminology works best for you
However, I believe the test structure and describe, context, it hierarchy is a little off track. So, here is how I write tests:
describe('User Authentication Using Custom Auth Token', () => {
beforeEach(() => {
cy.on('uncaught:exception', err => {
console.log('cypress has detected uncaught exception', err);
return false;
});
});
context('when not authenticated', () => {
it('Redirects to /login and Stays on /Login', () => {
cy.visit('/');
cy.location('pathname').should('equal', '/login');
// more on your logic
});
context('when authenticated', () => {
it('Successful login using Custom Auth Token', ()=>{
cy.visit('/')
cy.login();
// more on your logic
});
});
});
I'm trying to get a TDD workflow going with koa2/mocha/chai/chai-http but my problem is that when I run the tests the koa2 server keeps running after the tests are finished. So that I have to Ctrl+C (kill) it every time.
Can anyone tell me how to setup a TDD workflow where the server gets stopped after all tests are run?
Also, I'd like to watch the test files for changes and re-run the tests as soon as changes are detected... can anyone help with that? can't find anything on the net -.-
What I currently have (simplified):
package.json:
"scripts": {
"watch-server": "nodemon --watch 'src/**/*' -e ts,tsx --exec 'ts-node' ./src/server.ts",
"test": "./node_modules/mocha/bin/mocha --compilers ts:ts-node/register test/**/*.ts"
},
server.ts:
app.use(routes_v1.routes());
export const server = app.listen(3000, () => {
console.log('Server running on port 3000');
});
test:
process.env.NODE_ENV = 'test';
import * as chai from 'chai';
const chaiHttp = require('chai-http');
const should = chai.should();
chai.use(chaiHttp);
import { server } from '../../../src/server';
describe('routes : login / register', () => {
describe('POST /sign_in', () => {
it('should return unauthorized for invalid user', (done) => {
chai.request(server)
.post('/sign_in')
.send({email: "test#test.de", password: "somePassword"})
.end((err, res) => {
res.status.should.eql(401);
should.exist(err);
done();
});
});
it('should return authorized for valid user', (done) => {
chai.request(server)
.post('/sign_in')
.send({email: 'authorized#test.de', password: "authorizedPassword"})
.end((err, res) => {
res.status.should.eql(200);
should.exist(res.body.token);
done();
});
});
});
Thank you.
Starting from version 4.0 Mocha will no longer force the process to exit once all tests complete. You can use CLI parameter -exit to exit from the process when tests are finished:
"test": "mocha ... -exit"
Or another option, which gives you more control over the process, is to use Hooks. So you can start the server before running test(s) and stop it after:
describe('...', () => {
let server;
before(() => {
server = app.listen()
});
after(() => {
server.close()
});
...
})
As an example, you can take a look at this test. It is using Jest and supertest, but idea is the same.
I am getting my head around protractor and phantomjs. The test looks like this, it works fine with
just chrome:
describe('angularjs homepage', function () {
beforeEach(function () {
browser.driver.manage().window().setSize(1124, 850);
});
it('should greet the named user', function () {
browser.driver.get('https://angularjs.org/');
element(by.model('yourName')).sendKeys('Julie');
var greeting = element(by.binding('yourName'));
expect(greeting.getText()).toEqual('Hello Julie!');
});
});
the protractor.config looks like this :
// An example configuration file.
exports.config = {
// Capabilities to be passed to the webdriver instance.
capabilities: {
'browserName': 'phantomjs',
'phantomjs.binary.path': 'C:/ptor_testing/node_modules/phantomjs/lib/phantom/phantomjs.exe'
},
// For speed, let's just use the Chrome browser.
//chromeOnly: true,
// Spec patterns are relative to the current working directly when
// protractor is called.
specs: ['example.spec.js'],
// Options to be passed to Jasmine-node.
jasmineNodeOpts: {
// onComplete will be called just before the driver quits.
onComplete: null,
// If true, display spec names.
isVerbose: true,
// If true, print colors to the terminal.
showColors: true,
// If true, include stack traces in failures.
includeStackTrace: true,
// Default time to wait in ms before a test fails.
defaultTimeoutInterval: 30000
},
seleniumAddress: 'http://localhost:4444/wd/hub'
};
When I run this i get:
Error: Error while waiting for Protractor to sync with the page: {"message":"Can't find variable: angular","name":"ReferenceError","line":4,"stack":"ReferenceError: Can't find variable: angular\n at :4\n at anonymous (:13)\n at Na (phantomjs://webpage.evaluate():14)\n at phantomjs://webpage.evaluate():15\n at phantomjs://webpage.evaluate():15\n at phantomjs://webpage.evaluate():16\n at phantomjs://webpage.evaluate():16\n at phantomjs://webpage.evaluate():16","stackArray":[{"sourceURL":"","line":4},{"sourceURL":"","line":13,"function":"anonymous"},{"sourceURL":"phantomjs://webpage.evaluate()","line":14,"function":"Na"},{"sourceURL":"phantomjs://webpage.evaluate()","line":15},{"sourceURL":"phantomjs://webpage.evaluate()","line":15},{"sourceURL":"phantomjs://webpage.evaluate()","line":16},{"sourceURL":"phantomjs://webpage.evaluate()","line":16},{"sourceURL":"phantomjs://webpage.evaluate()","line":16}],"sourceId":81819056}
How can I fix this?