samedi 29 février 2020

End-to-end testing for endpoint that handles login

I'm trying to test a login endpoint using supertest. I know that the code for the API is correct because when I do the test on Postman, it works. However, when I do it using automated testing, I am getting an error. Specifically, I make a .post request to my login endpoint, pass in a user, expect to get a 200 status code. I end up getting a 401 status code, which per my code, indicates 'invalid credentials.' Can someone help me figure out why this is happening, and lead me in the right direction to doing my testing correctly?

my testing file (auth-router.spec.js):

const request = require('supertest');
const server = require('../api/server');
const users = require('./users-model');
const db = require('../database/dbConfig');

describe('auth router', function() {
    describe('test environment', function() {
        it('should use the testing environment', function() {
            expect(process.env.DB_ENV).toBe('testing');
        })
    })
})

describe('POST /api/auth/register tests', function() {
    beforeEach(async () => {
        await db('users').truncate();
    })

    describe('insert()', function() {
        it('adds a new user to the database', async function() {
            await users.add({ username: 'jevon', password: 'cochran' });

            const table = await db('users');

            expect(table).toHaveLength(1);
        })

        it('should insert provided user into the database', async function() {
            let newUser = await users.add({ username: 'leroy', password: 'gatlin' });

            expect(newUser.username).toBe('leroy');
        })
    })

    describe('POST /api/auth/login tests', function() {
        it('returns 200 OK', async function() {
           return request(server)
            .post('/api/auth/login')
            .send({ username: 'leroy', password: 'gatlin' })
            .then(res => {
                expect(res.status).toBe(200);
            })

        })
    })
})

auth-router.js:

const router = require('express').Router();
const bcrypt = require('bcryptjs');
const jwt = require('jsonwebtoken');

const users = require('./users-model');

router.get('/', (req, res) => {
  res.status(200).json({ api: 'up' });
})

router.post('/register', (req, res) => {
  let user = req.body;
  console.log(user);

  const hash = bcrypt.hashSync(user.password, 8);
  user.password = hash;

  users.add(user)
    .then(added => {
      console.log(user);
      res.status(201).json(added);
    })
    .catch(error => {
      console.log(error);
      res.status(500).json({ errorMessage: 'unable to register user' });
    })
});

router.post('/login', (req, res) => {
  let { username, password } = req.body;

  users.findBy({ username })
    .first()
    .then(user => {
      if(user && bcrypt.compareSync(password, user.password)) {
        const token = generateToken(user);

        res.status(200).json({
          messsage: `Welcome ${user.username}`,
          token
        })
      } else {
        res.status(401).json({ message: 'Invalid credentials' });
      }
    })
    .catch(error => {
      console.log(error);
      res.status(500).json({ errorMessage: 'unable to login' });
  })
});

function generateToken(user) {
  const payload = {
    username: user.username
  }

  const secret = process.env.JWT_SECRET || 'top secret';

  const options = {
    expiresIn: '1h'
  }

  return jwt.sign(payload, secret, options);
}

module.exports = router;

Here is what's happening when I run my tests: enter image description here

React: Trying to mirror __tests__/ with src/

I want to have my __tests__/ mirror the src/. I am able to run the tests, but they are all failing with the same error below:

/application/src/setup.js:1
    ({"Object.<anonymous>":function(module,exports,require,__dirname,__filename,global,jest){import Enzyme from 'enzyme';
                                                                                             ^^^^^^

    SyntaxError: Cannot use import statement outside a module

setup.js

import Enzyme from 'enzyme'
import Adapter from 'enzyme-adapter-react-16'

Enzyme.configure({ adapter: new Adapter() })

The file structure for my project is as follows (__tests__/ mirrors src/) :

application/
  __tests__/
   App.test.js
   components/
    c.test.js
  src/
   components/
    c.js
   styles/
   App.js
   jest.config.js
   setup.js

I specifically do not want to move my __test__/ into src/ and keep the structure as is, any help is appreciated!

Cyclomatic Complexity of multiple if statements

if (k =  = 1) 
        r  +  =  a;
else if (k =  = 2) 
        r  +  =  b;
else if (k =  = 3) 
        r  +  =  c;
else  
        r  +  =  d;


switch (k) {
    case 1:
    r   +  =  a;
    break;
    case 2:
    r   +  =  b;
    break;
    case 3:
    r   +  =  c;
    break;
    default:
    r   +  =  d;
    break;
    }

I am trying to understand whether for both the multiple if statement and the switch case the sample control flow diagram is the below diagram. I am sure that it is true for the switch case but i am unable to draw one for the multiple if statement

Control flow grpah

Dcoder Challenge "Love For Mathematics": Only one out of three testcases correct

I just completed a challenge in Dcoder using C. I got one correct test case out of three, and I can't seem to find the problem that prevents me from getting the other two test cases right as well. I'm still new to C, so please excuse if my code may look inexperienced. Any help is appreciated.

Here is the task:

Problem Statement
Students of Dcoder school love Mathematics. They love to read a variety of Mathematics books. To make sure they remain happy,their Mathematics teacher decided to get more books for them. A student would become happy if there are at least X Mathematics books in the class and not more than Y books because they know "All work and no play makes Jack a dull boy".The teacher wants to buy a minimum number of books to make the maximum number of students happy.

Input
The first line of input contains an integer N indicating the number of students in the class. This is followed up by N lines where every line contains two integers X and Y respectively.

Output
Output two space-separated integers that denote the minimum number of mathematics books required and the maximum number of happy students. Explanation: The teacher could buy 5 books and keep student 1, 2, 4 and 5 happy.

Constraints
1<=N<=10000 1<=X,Y<=10^9

Sample Input

5
3 6
1 6
7 11
2 15
5 8

Sample Output

5 4

And here is my code:

#include  <stdio.h>

//Compiler version gcc  6.3.0

typedef struct{
    int minBooks;
    int maxBooks;
} Student;

typedef struct{
    int books;
    int happyStudents;
} Happiness;

void fillarray(Student *arr, int len){
    for(int i = 0; i < len; i++){
        scanf(" %d", &arr[i].minBooks);
        scanf(" %d", &arr[i].maxBooks);
    }
}

int getmaximum(Student *arr, int len){
    int max = 0;
    for(int i = 0; i < len; i++)
        if(arr[i].maxBooks > max)
            max = arr[i].maxBooks;
    return max;
}

Happiness *calchappiness(Student *arr, int len, int max){
    Happiness *re = malloc(max * sizeof(Happiness));
    for(int i = 1; i <= max; i++){
        re[i - 1].books = i;

        for(int j = 0; j < len; j++){
            if(i >= arr[j].minBooks && i <= arr[j].maxBooks)
                re[i - 1].happyStudents++;
        }
    }
    return re;
}

Happiness getmaxhappiness(Happiness *arr, int len){
    Happiness re;
    re.happyStudents = 0;
    for(int i = 0; i < len; i++)
        if(arr[i].happyStudents > re.happyStudents)
            re = arr[i];
    return re;
}

Happiness getminbooks(Happiness *arr, int len){
    Happiness re;
    re.books = 1000;
    for(int i = 0; i < len; i++)
        if(arr[i].books < re.books)
            re = arr[i];
    return re;
}

Happiness besthappiness(Happiness *arr, int len){
    Happiness re = getminbooks(arr, len);

    for(int i = 0; i < len; i++){
        if(arr[i].happyStudents > re.happyStudents)
            re = arr[i];
    }

    return re;
}

int main()
{
    int len;
    scanf(" %d", &len);

    Student *students = malloc(len * sizeof(Student));
    fillarray(students, len);
    int happylen = getmaximum(students, len);
    Happiness *happy = calchappiness(students, len, happylen);
    Happiness output = besthappiness(happy, happylen);

    printf("%d %d", output.books, output.happyStudents);

    free(students);
    free(happy);
    return 0;
}

How to test a cartesian coordinates input (int[][]) to a Java class in Sublime Text 3?

I'm running a correct algorithm for Max Points on a Line on Java using Sublime Text 3 "Command + B".

class Solution{
    int[][] points;
    int n;
    HashMap<Double, Integer> lines = new HashMap<Double, Integer>();
    int horizontalLines;

    public Pair<Integer, Integer> addLine(int i, int j, int count, int duplicates) {
        int x1 = points[i][0];
        int y1 = points[i][1];
        int x2 = points[j][0];
        int y2 = points[j][1];

        if ((x1 == x2) && y1 == y2) 
            duplicates++;
        else if (y1 == y2) {
            horizontalLines += 1;
            count = Math.max(horizontalLines, count);
        } else {
            double slope = 1.0 * (x1 - x2) / (y1 - y2) + 0.0;
            lines.put(slope, lines.getOrDefault(slope, 1) + 1);
            count = Math.max(lines.get(slope), count);
        }
        return new Pair(count, duplicates);
    }

    public int maxPointsWithPointI(int i) {
        lines.clear();
        horizontalLines = 1;
        int count = 1;
        int duplicates = 0;

        for (int j = i + 1; j < n; j++) {
            Pair<Integer, Integer> p = addLine(i, j, count, duplicates);
            count = p.getKey();
            duplicates = p.getValue();
        }
        return count + duplicates;
    }

    public int maxPoints(int[][] points) {
        this.points = points;
        n = points.length;
        if (n < 3) return n;

        int maxCount = 1;
        for (int i = 0; i < n - 1; i++)
            maxCount = Math.max(maxPointsWithPointI(i), maxCount); 
        return maxCount;
    }
}

Errors

b.java:4: error: cannot find symbol
    HashMap<Double, Integer> lines = new HashMap<Double, Integer>();
    ^
  symbol:   class HashMap
  location: class Solution
b.java:7: error: cannot find symbol
    public Pair<Integer, Integer> addLine(int i, int j, int count, int duplicates) {
           ^
  symbol:   class Pair
  location: class Solution
b.java:4: error: cannot find symbol
    HashMap<Double, Integer> lines = new HashMap<Double, Integer>();
                                         ^
  symbol:   class HashMap
  location: class Solution
b.java:23: error: cannot find symbol
        return new Pair(count, duplicates);
                   ^
  symbol:   class Pair
  location: class Solution
b.java:33: error: cannot find symbol
            Pair<Integer, Integer> p = addLine(i, j, count, duplicates);
            ^
  symbol:   class Pair
  location: class Solution
5 errors

I think maybe I should import some packages on the top of the file? What should I do?

Also, I'd like to test the algorithm using class b:

class b{
    public static void main(String[] args){
        Solution myTest = new Solution();
        System.out.println(myTest.maxPoints());
    }
}

How do I pass variables to maxPoints()?

Inputs and Outputs:

Input: [[1,1],[2,2],[3,3]]
Output: 3

Input: [[1,1],[3,2],[5,3],[4,1],[2,3],[1,4]]
Output: 4

$ which java:

/usr/bin/java

$ java -version

java version "1.8.0_60"
Java(TM) SE Runtime Environment (build 1.8.0_60-b27)
Java HotSpot(TM) 64-Bit Server VM (build 25.60-b23, mixed mode)

Sublime JavaC Build System

{
  "cmd": ["javac \"$file_name\" && java \"$file_base_name\""],
  "file_regex": "^(...*?):([0-9]*):?([0-9]*)",
  "selector": "source.java",
  "shell": true,
  "quiet":true
}

Thanks! I'm new to Java.

I am passing SQL queries as an object in my web app. I wish to do a test bed for testing those objects. Any logical idea or method for it?

I am passing queries in the form of object in my web app. Those objects are created with my own class definition. It will be converted to DB executable query strings. I wish to do a test bed which tests those query object. My present idea is to get the query objects from my old build and my new build and compare. I'm getting those objects by serializing them while application is running. But it makes the application very slow. Is there any method to achieve this?

Restsharp addFile() can't read file path correctly

I am trying to test response after uploading 3 .csv files, as part of integration tests for API on project I am currently on.

public void UploadFile(string token)
    {
        request = new RestRequest(Method.POST);
        endpoint.Timeout = -1;
        request.AlwaysMultipartFormData = true;
        request.AddHeader("Authorization", $"Bearer {token}");
        request.AddHeader("Content-Type", "multipart/form-data");
        request.AddFile("Customers", "Users/Dell/Downloads/ProRecoFiles/CUSTOMERS.csv");
        //request.AddFile(Constants.customers, Constants.customersPath);
        //request.AddFile(Constants.orders, Constants.ordersPath);
        //request.AddFile(Constants.products, Constants.productsPath);
        endpoint.Execute(request);
    }

Although I'm relatively new to Restsharp and C#, I would say this is pretty straight forward request. But apparently something is wrong as I get:

Message: 
System.IO.FileNotFoundException : Could not find file 'C:\Users\Dell\Source\Repos\proreco-client-api\ProReco.Client.API\Tests\bin\Debug\netcoreapp3.1\Users\Dell\Downloads\ProRecoFiles\CUSTOMERS.csv'.

In the request.AddFile() I am passing name of file and path to the file. Somehow it concatenate value of file path to location of my project. Any idea how to fix that? Any suggestion would be great.

What is the advantage of using structs for UOMs like Temperature?

I ran the code below to test speed of some common uses in my program for UOM's and was quite surprised. With the results: All tests were run 1 million times in debug mode (run mode gave similar differences). It seems there is no real advantage in using structs for UOMs from a speed point of view so can anyone tell what any advantages would be? (I have a big number crunching program and this is of huge interest to me). If the example code is wrong in any way below, please let me know also. What I really want is the most convenient way to handle UOMs without making the code verbose (UOM * UOM rather than UOM.Value * UOM.Value would be best but its apparently not the most speed efficient). All times in ms.

Multply doubles 7

Multply struct fields 8

Multply struct property 232

Multply temperature struct using overloaded * operator 141

Multply class fields 7

Multiply & Load doubles into object array 692

Multiply struct fields & Load into object array 719

Multiply struct fields & Load new struct into object array 926

Multiply structs with overloaded operator a load struct into object array 906

Multiply class fields & load into object array 697

Multiply class fields & load new class into object array 964

Multiply class using overloaded * operator & load class into object array 948

public class TestSpeed
{
    public class TempClass
    {
        public double value=100;
        private double v;

        public TempClass(double v)
        {
            this.v = v;
        }

        public static TempClass operator *(TempClass t1, TempClass t2)
        {
            return new TempClass(t1.value * t2.value);
        }
    }

    public struct TempStruct
    {
        public double value;

        public TempStruct(double v)
        {
            value = v;
        }

        public double GetTemp
        {
            get { return value; }
            set { this.value = value; }
        }

        public static TempStruct operator *(TempStruct t1, TempStruct t2)
        {
            return new TempStruct(t1.value * t2.value);
        }
    }


    [TestMethod]
    public void TestDouble()
    {
        double doubleValue = 100;
        TempStruct t = new TempStruct();
        TempStruct Tres= new TempStruct(100);
        TempClass tclass = new TempClass(100);
        double res;

        var watch = Stopwatch.StartNew();

        for (int i = 0; i < 10000000; i++)
        {
            res = doubleValue*doubleValue;
        }

        watch.Stop();

        var elapsedMs = watch.ElapsedMilliseconds;
        Debug.WriteLine("Multply doubles "+ elapsedMs.ToString());

        watch = Stopwatch.StartNew();

        for (int i = 0; i < 10000000; i++)
        {
            Tres.value = t.value * t.value;
        }

        watch.Stop();


        elapsedMs = watch.ElapsedMilliseconds;
        Debug.WriteLine("Multply struct fields " + elapsedMs.ToString());

        watch = Stopwatch.StartNew();

        for (int i = 0; i < 10000000; i++)
        {
            Tres.GetTemp = t.GetTemp * t.GetTemp;
        }

        watch.Stop();


        elapsedMs = watch.ElapsedMilliseconds;
        Debug.WriteLine("Multply struct property " + elapsedMs.ToString());


        watch = Stopwatch.StartNew();

        for (int i = 0; i < 10000000; i++)
        {
            Tres = t * t;
        }

        watch.Stop();

        elapsedMs = watch.ElapsedMilliseconds;
        Debug.WriteLine("Multply temperature struct using overloaded * operator " + elapsedMs.ToString());

        watch = Stopwatch.StartNew();

        for (int i = 0; i < 10000000; i++)
        {
            res = tclass.value * tclass.value;
        }

        watch.Stop();
        elapsedMs = watch.ElapsedMilliseconds;
        Debug.WriteLine("Multply class fields " + elapsedMs.ToString());
    }


    [TestMethod]
    public void TestDoubleArray()
    {
        double doublevalue = 100;
        TempStruct t = new TempStruct();
        TempStruct Tres = new TempStruct(100);
        TempClass tclass = new TempClass(100);
        object[] res = new object[10000000];

        var watch = Stopwatch.StartNew();

        for (int i = 0; i < 10000000; i++)
        {
            res[i] = doublevalue * doublevalue;
        }

        watch.Stop();

        var elapsedMs = watch.ElapsedMilliseconds;
        Debug.WriteLine("Multiply & Load doubles into object array " + elapsedMs.ToString());

        watch = Stopwatch.StartNew();

        for (int i = 0; i < 10000000; i++)
        {
            res[i] = t.value * t.value;
        }

        watch.Stop();


        elapsedMs = watch.ElapsedMilliseconds;
        Debug.WriteLine("Multiply struct fields & Load into object array " + elapsedMs.ToString());


        watch = Stopwatch.StartNew();

        for (int i = 0; i < 10000000; i++)
        {
            res[i] = new TempStruct(t.value * t.value);
        }

        watch.Stop();


        elapsedMs = watch.ElapsedMilliseconds;
        Debug.WriteLine("Multiply struct fields & Load new struct into object array " + elapsedMs.ToString());



        watch = Stopwatch.StartNew();

        for (int i = 0; i < 10000000; i++)
        {
            res[i] = t * t;
        }

        watch.Stop();

        elapsedMs = watch.ElapsedMilliseconds;
        Debug.WriteLine("Multiply structs with overloaded operator a load struct into object array " + elapsedMs.ToString());

        watch = Stopwatch.StartNew();

        for (int i = 0; i < 10000000; i++)
        {
            res[i] = tclass.value * tclass.value;
        }

        watch.Stop();
        elapsedMs = watch.ElapsedMilliseconds;
        Debug.WriteLine("Multiply class fields & load into object array " + elapsedMs.ToString());

        watch = Stopwatch.StartNew();

        for (int i = 0; i < 10000000; i++)
        {
            res[i] = new TempClass(tclass.value * tclass.value);
        }

        watch.Stop();
        elapsedMs = watch.ElapsedMilliseconds;
        Debug.WriteLine("Multiply class fields & load new class into object array " + elapsedMs.ToString());

        watch = Stopwatch.StartNew();

        for (int i = 0; i < 10000000; i++)
        {
            res[i] = tclass * tclass;
        }

        watch.Stop();
        elapsedMs = watch.ElapsedMilliseconds;
        Debug.WriteLine("Multiply class using overloaded * operator & load class into object array " + elapsedMs.ToString());
    }
}    

How to fix NetworkError: Failed to execute 'send' on 'XMLHttpRequest': Failed to load

  • Include details about your goal

I want to visit website and click an element this is the 1st test then the 2nd test will visit the same website again and click an element. the two tests in one describe

  • Describe expected and actual results open website and click element then in the 2nd test open same website and click element

  • Include any error messages

NetworkError: Failed to execute 'send' on 'XMLHttpRequest': Failed to load 'https://gcpsmapi.example.com/sec/submit-events': Synchronous XHR in page dismissal.

I tried to search google with no luck or solution .i used beforeEach and i tried each test alone and it worked . but together is not working .

describe('Checkout', () => {


  it('Checkout with empty cart', () => {
   cy.visit('https://www.example.com')
   cy.get('.lgi_btn_3').click()
   cy.get('.xo-header__cart > .xo-header__navigation-button > .xo-core-header-icon').click()
   cy.url().should('include', 'https://www.example.com/shop/winkelwagen/index.shtml')
   cy.contains('empty').should('be.visible')

  })

  it('2nd', () => {
   cy.visit('https://www.example.com')
   cy.get('.lgi_btn_3').click()
   cy.get('.xo-header__cart > .xo-header__navigation-button > .xo-core-header-icon').click()
   cy.url().should('include', 'https://www.example.com/shop/winkelwagen/index.shtml')
   cy.contains('empty').should('be.visible')


  })

})

useEffect() unmount process

I ve been wondering how useEffect returned function is able to be runned before function body , sample below

enter image description here

Laravel testing migrating pollutes assertions

While writing tests for my Laravel package I came across something strange. My empty tests were passing instead of marked as "Risky".

Further investigation led me to the PendingCommand class that has a run() method which makes an assertion on the exit code of the command. This PendingCommand was instantiated by calling $this->astisan('migrate:fresh')->run(). I was able to skip this assertion by calling assertExitCode(null) before running the command. It worked but there is still an assertion happening.

Anybody had this problem before and/or was able to prevent assertions from happening before the actual test is executed?

It would be nice to see which assertions are being made, but I was unable to find this. The only thing I could find was that the Assert class keeps a $count of all the assertions done, not which one.

I will continue my search for a solution and post my findings to this question.

Robolectric.setupActivity() is deprecated in Android unit test

I'm trying to use Robolectric framework to run a simple Unit test, although I got that Robolectric.setupActivity() is deprecated.

@RunWith(RobolectricTestRunner.class)
public class MainActivityFragmentTest {

    MainActivity mainActivity;

    @Before
    public void setUp() {
        mainActivity = Robolectric.setupActivity(MainActivity.class);
    }

    // Rest of Test

How can I solve that? Thanks for help in advance..

Vue-apollo-graphql. Web testing with different users to test reactivity

I am developing a web service. Debugging the reactivity is proving a pain in the neck.

Is there a library to test a web to see that reactivity works among different users?

Thanks.

Can I run test by condition .net

I want to run test only if a certain condition is match. This condition is evaluate only during runtime.

I can't use args or preprocessor condition.

I'm open to any c# friendly tool.

What is the best way to achieve that ?

Is Cypress (really) Ent to End (E2E) testing framework?

Given the fact that Cypress tests on development environment how it can be considered as E2E tool?

It seems to me that the essence of E2E testing is to do so for pre/production environment, whereas Cypress does it only on our local environment. Puppeteer and recently developed by Microsoft tool called Playwright seems to me the real E2E testing tools.

Would be great to hear from someone with experience across E2E tools.

Tools for testing microservices

I would like to test my microservices and have found https://github.com/SpectoLabs/hoverfly and https://docs.pact.io/. The former is component tests and the later is contract tests.

What is the difference between those two? What is more suitable for testing microservices? What about, when my microservice use GRPC for communication.

vendredi 28 février 2020

How could we store urls and timestamps with a singleton pattern?

I am doing the following programming exercise: URL Timestamps. The statement is:

For my web app, I need a class that lets me store timestamps for URLs. For URLs that were never accessed, it should return -1. The class should be able to handle about a million calls in a few seconds.

I have tried:

import java.net.URL;
import java.util.*;

public class UrlMap {

  private static final Map<URL,Long> map = new HashMap<URL,Long>();

  public void setTimestamp(URL url, long timestamp) {
    map.put(url,timestamp);
  }

  public long getTimestamp(URL url) {
    System.out.println("map: "+map);
    return map.getOrDefault(url,-1L);
  }

}

I have a lot of curiosity because of it does not pass the execution tests but it does pass the example tests. The example tests are:

import static org.junit.Assert.*;
import org.junit.Test;
import java.net.URL;

public class UrlMapTest {
  @Test
  public void testCodewars() throws Exception {
    UrlMap map = new UrlMap();
    URL url1 = new URL("http://www.codewars.com/");
    long time1 = 12345L;
    URL url2 = new URL("http://www.codewars.com/kata/url-timestamps/");
    long time2 = 67890L;
    map.setTimestamp(url1, time1);
    map.setTimestamp(url2, time2);
    assertEquals(time1, map.getTimestamp(url1));
    assertEquals(time2, map.getTimestamp(url2));
  }  

  @Test
  public void testNew() throws Exception {
    UrlMap map = new UrlMap();
    URL url1 = new URL("http://www.codewars.com/");
    URL url2 = new URL("http://www.codewars.com/kata/url-timestamps/");
    assertEquals(-1, map.getTimestamp(url1));
    assertEquals(-1, map.getTimestamp(url2));
  }
}

I will explain the difficulty as best as I can. The execution tests, create a "url1" with timestamp 12345L. Then, in the next tests, it creates a url1 without timestamp. So it would expect to get a -1, because of it should not have a timestamp stored, however it does have the initial timestamp, as the map is static.

As an image worths more than a thousand words: enter image description here

I think this code passes the example tests because of in each of the test methods a new UrlMap is being created. However, in the execution tests I suppose the exact same urlMap class is being reused.

To try to fix this issue I have read: How can I initialise a static Map? How to update a value, given a key in a hashmap? What is an efficient way to implement a singleton pattern in Java?

How could we store urls and timestamps with a singleton pattern?

How to get Jest test name and result statistics

I have a number of Jest test files and in each of them, I have something like this:

const dName = 'A';
describe( dName, () => {
  beforeAll( () => {
    _.timings[dName] = Date.now();
  }
  afterAll( () => {
    _.timings[dName] = Date.now() - _.timings[dName];
  }

  const tName1 = 'B';
  test( tName1, () => {
    const startTime = Date.now();

    //do test here and set isPassed=true if test passed

    _.timings[tName1] = Date.now() - startTime;
    _.result[tName1].passed = isPassed;
  } 
}

afterAll( () => {
  generateReport( _ );
}

Each test file contains multiple describes and in each describe there are multiple tests.

Firstly, is there a better way to retrieve the describe (or test) name, rather than what I am doing currently which is defining a variable (dName, tName1) and passing that through? Ideally I could do something like:

test( 'B', () => {
  _.timings[ getTestName() ] = ... // where getTestName() would return B somehow.

I was hoping I could use Javascripts arguments variable to see the contents of the first argument (since that should be the name), however arguments doesn't seem to contain the information that I can extract ... or at least I don't know how to decipher the info stored in arguments.

Secondly, is there a better way to get the results of a test run? As it is now, I need to define a variable (isPassed) and populate it accordingly in each test so that at the end I can record this information in the _.results[testName] object. This object is used at the very end in generateReport to create a file containing the names of all the tests and the status of whether each test passed or failed.

What are the disadvantages of Screenplay Pattern

I've been looking at the screen play pattern and all the great things about it for UI testing. It sounds great and can't wait to use it but want to be aware of the pitfalls. Searching online I can't find any content that talks about the disadvantages or pitfalls of the screen play pattern. Does anyone know the downsides to this pattern?

I found this stack overflow post but there is no talk about disadvantages...

Java test Intelij IDEA vs Jenkins(selenium server) encoding

I have Selenium tests in Java and if I run it on my Intelij IDEA it works fine, but when it runs via Jenkins on Selenium server it has a problem with encoding.

There is code which loads text file on the disc:

public static List<String> getExpectedSpells() throws FileNotFoundException {
   FileReader fileReader = new FileReader(new File("src/test/resources/testData/spells.txt"));
   BufferedReader br = new BufferedReader(fileReader);
   return br.lines().collect(Collectors.toList());
}

It returns a list of strings to compare with text on the page. One line returns this demaged dash

  1. Actual: controls a person - unforgivable
  2. Expected: controls a person – unforgivable // comes from text file

EDIT: If looks like a Jenkins problem. Cause Selenium Server works ok if I run it directly via sel. server.

Can somebody tell me please what is wrong? I have no idea. Thanks.

What is the best way to collect thermal data on my Android device?

I am trying to collect thermal data on my Android tablet (running Android 9) while running various apps/benchmarks etc. Specifically, I want to be able to both measure and log core CPU temperatures and frequencies while other apps are running on my tablet. I want to collect CPU temps and freq because I want to know the extent of thermal throttling on my device. There are lots of great apps out there but I can't find one that will log information while running a different app (such as the Geekbench Benchmark). Here is what I've tried so far:

  1. I have already tried collecting information from the thermal zones in /sys/class/thermal but am unable to make sense of the names the vendor has given (the vendor in my case is MTK).

  2. I have tried using CpuFloat. This app provides a nice overlay on the device display and includes all CPU frequencies (updating in real time) as well as the core temp of the processor. The only problem is CpuFloat doesn't log data over time and I have resorted to taking screenshots periodically to track temperatures. This is really tedious though since I have to sit and record temperatures throughout benchmark testing.

I have considered developing an app that tracks this information, but would like to track it through adb or an existing app if possible. Any help/advice would be appreciated!

Bug in Jest js tests: react-modal: No elements were found for selector #root

I am trying to create SelectColumn.test.js similar to Summary.test.js

https://gitlab.com/sosy-lab/software/benchexec/-/blob/SelectColumn.test/benchexec/tablegenerator/react-table/src/tests/SelectColumn.test.js

But unfortunately not successful, I tried all solutions from code forums. But seems I am doing something wrong.

Result: failed

Expected result should be: passed

Error:

react-modal: No elements were found for selector #root.

      180 | 
      181 |   render() {
    > 182 |     ReactModal.setAppElement("#root");
          |                ^
      183 |     return (
      184 |       <ReactModal
      185 |         className="overlay"

@QuarkusTest - implementing an interface with annotation @RegisterRestClient, in test - fails the test

I'm using Quarkus 1.2.0.Final.

I have the following REST client:

@Produces(MediaType.APPLICATION_JSON)
@Consumes(MediaType.APPLICATION_JSON)
@RegisterRestClient(configKey = "<some key>")
public interface SomeClient {

    @POST
    @Path("<some path>")
    SomeResponse someMethod(SomeRequest request);

}

This bean is used in my other beans as a dependency.

And I have the following test case:

@QuarkusTest
class SomeTest {
    @Test
    void testGetTransactions() { }
}

class SomeClientImpl implements SomeClient {

    @Override
    public SomeResponse someMethod(SomeRequest request) {
        // <the implementation doesn't matter>
        return null;
    }
}

The test is failing with the following exception:

Suppressed: javax.enterprise.inject.UnsatisfiedResolutionException: Unsatisfied dependency for type com.example.client.SomeClient and qualifiers [@RestClient]
...

Why the test is failing? If I remove the class class SomeClientImpl implements SomeClient {...} the test is passing. So implementing an interface leads to test failing, which is weird.


Update 1:

I tried the next code and I'm getting the same exception:

@QuarkusTest
class TransactionServiceImplTest {

    @Test
    void testGetTransactions() {
        new SomeClient() {

            @Override
            public SomeResponse someMethod(SomeRequest request) {
                // <the implementation doesn't matter>
                return null;
            }
        };
    }
}

Override properties for specific test class

Is it possible to override properties just for one test class? I already have a specific application.properties for tests and I would like to override some of them just for this one test class. Somthing similar like @SpringBootTest(properties = {"myprop1=0, myprop2=hello"}) in Spring? Or is it possible to set a profile on class level? @Profle("myProflileJustForThisTestClass") MyTestClass {...}

Best Boban

How to select framework when build website?

I have got some questions about Dotnetnuke:

  • Can i create/run multiple websites having only VS 2010 installed or do i need IIS(Express) ?
  • Does a website create a physical map on your harddrive ?
  • Can DNN be seen as a SharePoint "alternative" ?
  • Once a site is in production can it handle a big workload ?

Thx for any answer.

How to connect to open chrome browser instance? selenium, AutoIT

I need to write with my selenium test in popup log Username and password, for this I use Auto it script:

Sleep(3000)
Send("DZC\dev_sp_install") // Username 
Sleep(1000)
Send("{Tab}")
Sleep(1000)
Send("BcX9bvfP") // Password
Sleep(1000)
Send("{Enter}")

It works if I open browser with "hands" and after that I run the script, but it doesn't works if I run my selenium test, so I have automatically opened link, after that I run my AutoIT script, but it doesn't works. I think that with "hands" and automatically opened browsers have differences. So, in my opinion I need to write in my script connection/navigation way to my opened browser or something like that, may be someone can help me with that?

How can I test the perfomance of mqueue?

I want to know the performance of mqueue, but I don't have test cases. Is there any mqueue performance test cases to know the latency and so on?

Thanks in advance!

Test Methods With No Assert result

Can anyone help me in test class......................................

I have an error in test method assert ............................

Error message :

test method assert

test class :

@isTest
public class TwnxTest {
    //this method for testing with names as "Twilio"
    public static testmethod void testSmsFromTwnxWithNameAsTwilio(){        

        Contact con = new Contact(LastName='test',Phone='+19343213321');
        insert con;

        ApexPages.StandardController sc = new ApexPages.StandardController(con);

        Twnx twInstance = new Twnx(sc);

        configuration_setting__c configSettings= new configuration_setting__c();
        configSettings.Name='Twilio';
        configSettings.AccountSid__c = 'accountId';
        configSettings.Active__c = true;
        configSettings.AuthToken__c ='auth002';
        configSettings.Bulk_SMS__c='+18559331384';
        configSettings.Contact_Phone_Number__c='+18559172384';
        configSettings.Lead_Phone_Number__c='+14154633840';        
        insert configSettings;
        Test.setMock(HttpCalloutMock.class, new Twilio_MockClass());

        System.Test.startTest();

        Twilio.sendfromtwilio(configSettings.Contact_Phone_Number__c,'test',con.Phone);
        twInstance.sendfromtwnx();
        twInstance.ok();

        System.Test.stopTest();
    }

    //this method for testing with names as "Nexmo"
    public static testmethod void testSmsFromTwnxWithNameAsNexmo(){        
        Contact con = new Contact(LastName='test',Phone='+19343213321');
        insert con;
        ApexPages.StandardController sc = new ApexPages.StandardController(con);
        Twnx twInstance = new Twnx(sc);
        configuration_setting__c configSettings= new configuration_setting__c();
        configSettings.Name='Nexmo';
        configSettings.AccountSid__c = 'accountId';
        configSettings.Active__c = true;
        configSettings.AuthToken__c ='auth002';
        configSettings.Bulk_SMS__c='+18559331384';
        configSettings.Contact_Phone_Number__c='+18559172384';
        configSettings.Lead_Phone_Number__c='+14154633840';      
        insert configSettings; 

        Test.setMock(HttpCalloutMock.class, new Nexmo_MockClass());

        System.Test.startTest();
        Nexmo.sendMessage( con.Phone,  configSettings.Contact_Phone_Number__c,  'text',  'sms');
        twInstance.sendfromtwnx();
        twInstance.ok();
        System.Test.stopTest();
    } }

Thanks inadvance.......................................

Class not registered error while Executing ALM Scripts using Jenkins

Am trying to execute some ALM Test scripts using Jenkins, but am getting below error,

Started by user admin Running as SYSTEM Building in workspace C:\Program Files (x86)\Jenkins210\workspace\ALMConnect [ALMConnect] $ "C:\Program Files (x86)\Jenkins210\workspace\ALMConnect\HpToolsLauncher.exe" -paramfile props27022020181747552.txt "Started..." Timeout is set to: 300 Run mode is set to: RUN_REMOTE

Unable to retrieve test set folder: Node not found.

Starting test set execution Test set name: Appiantesting TestSet, Test set id: 402 Class not registered

Class not registered Could not create scheduler, please verify ALM client installation on run machine by downloading and in installing the add-in form: https://.saas.microfocus.com/qcbin/TDConnectivity_index.html Build step 'Execute Micro Focus functional tests from Micro Focus ALM' changed build result to FAILURE Recording test results RunResultRecorder: didn't find any test results to record Finished: FAILURE

jeudi 27 février 2020

Exporting Scripts

I'm a freelancer and if I create the scripts in Test Cafe can they be ported to another application or repository?

I ask because if my client decides for me not to continue running their scripts they will want them back.

Thanks Bret

Webdriver.io - can not get firefox to run

I am trying to get firefox to run in webdriver.io 5. This is on a Linux cloud.

I get:

2020-02-28T02:32:33.472Z INFO @wdio/cli:launcher: Run onPrepare hook
2020-02-28T02:32:33.475Z INFO @wdio/local-runner: Start worker 0-0 with arg: wdio.conf.js
[0-0] 2020-02-28T02:32:33.818Z INFO @wdio/local-runner: Run worker command: run
[0-0] RUNNING in firefox - /build/main.js
[0-0] 2020-02-28T02:32:33.881Z INFO webdriverio: Initiate new session using the webdriver protocol
[0-0] 2020-02-28T02:32:33.883Z INFO webdriver: [POST] http://127.0.0.1:4444/session
[0-0] 2020-02-28T02:32:33.883Z INFO webdriver: DATA {
  capabilities: {
    alwaysMatch: { browserName: 'firefox', 'moz:firefoxOptions': [Object] },
    firstMatch: [ {} ]
  },
  desiredCapabilities: { browserName: 'firefox', 'moz:firefoxOptions': { args: [Array] } }
}
[0-0] 2020-02-28T02:32:33.899Z WARN webdriver: Request failed due to connect ECONNREFUSED 127.0.0.1:4444
[0-0] 2020-02-28T02:32:33.899Z INFO webdriver: Retrying 1/3

Here is my config:

exports.config = {
    runner: 'local',
    path: '/',
    specs: [
        './build/**/*.js'
    ],
    maxInstances: 10,
    capabilities: [
      {   
          "browserName": "firefox",
          "moz:firefoxOptions": {
            "args": ["-headless"],
          }
      }
    ],
    logLevel: 'info',
    bail: 0,
    baseUrl: 'http://localhost',
    waitforTimeout: 10000,
    connectionRetryTimeout: 90000,
    connectionRetryCount: 3,
    reporters: ['spec'],
    framework: 'mocha',
    mochaOpts: {
        ui: 'bdd',
        timeout: 60000
    },
    services: [
    ],
}```

Any ideas what I am doing wrong? 

Is there a way to run all BeforeMethods(even when it fails) only if BeforeSuite succeeds in TestNG?

Currently, if an exception occurs in BeforeSuite, I don't want tests in my testlist to run. So, it will stop execution and BeforeMethods aren't run.

If the BeforeSuite succeeds and then BeforeMethod throw an exception, I want it to keep executing BeforeMethod for all tests in my test list.

Is this possible?

Currently, I have configfailurepolicy=continue. It'll successfully stop after BeforeSuite exception, but if BeforeMethod exception occurs it will only run once.

Thanks

Is there a way to get the html report from Istanbul while it is running?

I have a test suite that takes some time to run (i.e., half an hour). I would like to check progress in the html report while the tests are running (i.e., by refreshing the page in the browser).

How to find code which was never executed in coverage.py despite a 100% coverage report

Consider the following code:

import math

def dumb_sqrt(x):
    result = math.sqrt(x) if x >= 0 else math.sqrt(-x)*j
    return result


def test_dumb_sqrt():
    assert dumb_sqrt(9.) == 3.

The test can be executed like this:

$ pip install pytest pytest-cov
$ pytest test_thing.py --cov=test_thing --cov-report=html --cov-branch

The coverage report will consider all lines 100% covered, even with branch coverage enabled:

inline

However, this code has a bug, and those of you with a keen eye may have seen it already. Should you ever go into the "else" branch, there will be an exception:

NameError: global name 'j' is not defined

It's easy to fix the bug: change the undefined j name into a literal 1j. It's also easy to add another test which will reveal the bug: assert dumb_sqrt(-9.) == 3j. Neither is what this question is asking about. I want to know how to find sections of code which were never actually executed despite a 100% code coverage report.

Using conditional expressions is one such culprit, but there are similar cases anywhere that Python can short-circuit an evaluation (x or y, x and y are other examples).

Preferably, the line 4 above could be colored as yellow in the coverage report, similar to how the "if" line would have been rendered if you did not use an inline conditional expression in the first place:

long

Does coverage.py support such a feature? If so, how can you enable "inline branch coverage" in your cov reporting? If not, are there any other approaches to identify "hidden" code that was never actually executed by your test suite?

Jest + puppeteer best architecture practices

I just entered the world of testing with puppeteer and jest, and I was wondering what the best practice was in terms of folder architecture and logic.

I've never done testing before and I think I'm getting a little lost in the different principles and concepts and how it all fits together.

I learned to do my tests based on the page-object model, so I have classes for each of my pages, but also for each of my modules ( or component ). For example, in my application, the header or the login modal are components.

Then I have a test file per page or per component. (for example the landingPage.tests.js file, which uses the model of the LandingPage class in the LandingPage.js file)

Here is a concrete example: I have different login cases and I'd like to test them all. For example I want to test to connect with a "normal" user, for which the process is simply login then password. Then I need to test with a user who has activated 2FA, or with a user from a company that uses SSO.

I first thought about putting my different tests in authentication.tests.js, in different "describe" blocks, thinking it would open a new tab each time, but it doesn't... I use puppeteer in incognito mode to make sure each tab is an isolated session.


So my questions are:

  • Where is the best place to do these test suites?

  • Am I supposed to have test files that "describe" the pages ( for example, the button must be present, such text must be here etc) and also have "scenario type" test file ( a set of contextual actions to a user, like for my different login cases) ?

Here is authentication.tests.js, in which I would like to tests all my different ways of logging in :

import HeaderComponent from "../../../pages/components/HeaderComponent";
import AuthenticationComponent from "../../../pages/components/AuthenticationComponent";
import LandingPage from "../../../pages/landing/LandingPage";

import {
    JEST_TIMEOUT,
    CREDENTIALS
} from "../../../config";


describe('Component:Authentication', () => {
    let headerComponent;
    let authenticationComponent;
    let landingPage;

    beforeAll(async () => {
        jest.setTimeout(JEST_TIMEOUT);
        headerComponent = new HeaderComponent;
        authenticationComponent = new AuthenticationComponent;
        landingPage = new LandingPage;
    });


    describe('Normal login ', () => {

        it('should click on login and open modal', async () => {
            await landingPage.visit();
            await headerComponent.isVisible();
            await headerComponent.clickOnLogin();
            await authenticationComponent.isVisible();
        });

        it('should type a normal user email adress and validate', async () => {
            await authenticationComponent.typeUsername(CREDENTIALS.normal.username);
            await authenticationComponent.clickNext();
        });

        it('should type the correct password and validate', async () => {
            await authenticationComponent.typePassword(CREDENTIALS.normal.password);
            await authenticationComponent.clickNext();
        });

        it('should be logged in', async () => {
            await waitForText(page, 'body', 'Success !');
        });

    });

    describe('SSO login ', () => {
        // todo ...
    });


});

Thank you and sorry if it sounds confusing, like I said I'm trying to figure out how it all fits together.

How to evaluate efficiency of performance optimisations?

I've made some optimizations to an API, and I want to get average response time from calling some endpoint (actually it is large graphql query). In the result I want to know whether the optimizations to the API improved the response time how greatly.

Locally, I've written some function that calls graphql-request in loop serially (one request per iteration) for specified number of times and sums up response timing from each request made (and calculates other stats).

I'd like to know:

  1. If there already a library, that does the same thing, that I could use (that makes serial requests and gathers total request time)?
  2. Is it a good practice to gather request time, making serial requests (maybe parallel queries will provide the same info, but in shorter time)?
  3. How this kind of testing is called (it is definitely not load testing)?
  4. If it is a valid way to test API, then how many requests should I make to an API (right now I am making 10)
  5. Are there other ways to evaluate efficiency of performance optimizations?

AppIntegrationTest is not using src/test/resources but src/main/resources for the gretty server test instance

So I have a properties file in my project in src/main/resources which has some configuration options for my web application. For testing I made some changes in the properties file and put it in src/test/resources. But the problem is that the server instance from the AppBeforeIntegrationTestTask is using the resources of the main folder.

Since this is obviously not working: How is the workflow to start the server instance for integration testing with a modified properties file?

Gretty 3.0.1

Autocomplete Mui Testing, simulate change doesn't work

I need to simulate an onChange event with enzyme to update a state's component that is not working, I share the component's code in order to be helped.

Component:

import React, { useState } from 'react';
import TextField from '@material-ui/core/TextField';
import Autocomplete from '@material-ui/lab/Autocomplete';

const top100Films = [
  { title: 'The Shawshank Redemption', year: 1994 },
  { title: 'The Godfather', year: 1972 },
  { title: 'The Godfather: Part II', year: 1974 },
];

const Counter = () => {
  const [value, setValue] = useState({ title: 'The Godfather', year: 1972 });

  const handleAutocomplete = (e, item) => {
    setValue(item);
  }

  return (
    <>
      {value && (
        <p id="option">{value.title}</p>
      )}
      <Autocomplete
        id="combo-box-demo"
        name="tags"
        debug
        options={top100Films}
        getOptionLabel={option => option.title}
        onChange={handleAutocomplete}
        style=
        renderInput={params => <TextField {...params} label="Combo box" variant="outlined" />}
      />
    </>
  )
}

test component.

import React from 'react';
import { mount } from 'enzyme';
import Counter from '../components/Counter';

it("shoult update component", () => {
  const wrapper = mount(<Counter />);
  const autocomplete = wrapper.find("input");

  console.log(autocomplete.debug());

Normality Test für splited data

maybe its a trivial question, but I couldn't find anything so far. I have a dataset with about 23.200 returns split in groups of 50 returns each via: split(LogRenditen, sample(rep(1:462, 50)))

Is it possible to perform a normality test (Jarque-Bera) for every subperiod?

thanks in advance

How to test Laraver package routes without Laravel framework

I’m developing the admin package for Laravel. I installed the orchestra/testbench package. How to test routes with his help?

How to initialize manually next.js app (for testing purpose)?

I try to test my web services, hosted in my Next.js app and I have an error with not found Next.js configuration.

My web service are regular one, stored in the pages/api directory. My API test fetches a constant ATTACKS_ENDPOINT thanks to this file:

/pages/api/tests/api.spec.js

import { ATTACKS_ENDPOINT } from "../config"

...

describe("endpoints", () => {
   beforeAll(buildOptionsFetch)

   it("should return all attacks for attacks endpoint", async () => {
      const response = await fetch(API_URL + ATTACKS_ENDPOINT, headers)

config.js

import getConfig from "next/config"

const { publicRuntimeConfig } = getConfig()

export const API_URL = publicRuntimeConfig.API_URL

My next.config.js is present and is used properly by the app when started.

When the test is run, this error is thrown

    TypeError: Cannot destructure property `publicRuntimeConfig` of 'undefined' or 'null'.

  1 | import getConfig from "next/config"
  2 | 
> 3 | const { publicRuntimeConfig } = getConfig()

I looked for solutions and I found this issue which talks about _manually initialise__ next app.

How to do that, given that I don't test React component but API web service ?

Understanding order of execution in testcafe fixture

I am looking to better understand the order of execution within TestCafe fixture test when using await.

In the example below will action 1 always precede action2 and likewise will action2 precede action3 - i.e., is it guaranteed that both typeTexts will precede the click action?

class Page {
    ....
}

const page = new Page()

await t
  .typeText(page.login.email, 'emailaddress') //action 1
  .typeText(page.login.password, "password")  //action 2
  .click(page.login.submit)                   //action 3

In snippet below, am I correct that section 1 will be executed before section 2

await t //section 1
  .typeText(page.login.email, 'emailaddress') 
  .typeText(page.login.password, "password")
  .click(page.login.submit)

await t //section 2
  .typeText(page.login.something, 'bblah')
  .click(page.dosomething.submit)

Change Rails session for tests with Capybara

I need to test a page in my Rails 5.2 app that displays the current cart if the user is not logged with Capybara system test. The app retrieves the cart by the card_id saved in the rails session (session[:cart_id] = @cart.id)

My test looks like this:

require 'application_system_test_case'

class CartUiTest < ApplicationSystemTestCase
  def setup
    @cart = create(:cart)
  end

  test 'show cart also if user is not logged' do
    visit edit_cart_path

    assert_text "Some text"
  end
end

How can I set the session in this test? I've tried the gem rack_session_access but I'm looking for another solution that not use this gem because it conflicts with other tests.

Test if methods from a ResultSet is called when doing a request

I want to test if this 2 methods are called when I am doing a request. How can I do this?

sub ordered_by {
  my ($self, $column, $type) = @_;
  my $order = $type eq 'asc' ? '-asc' : '-desc';
  return $self->search(undef, { order_by => [{ $order => $column },
    { -asc => 'username' }] });
}
sub with_num_comments {
  my $self = shift;
  return $self->search(undef, {
    '+select' => [
        {
          coalesce => $self->correlate('comments')->count_rs->as_query,
              -as => 'num_comments'
        }
   ],
   '+as' => [ 'num_comments' ],
 });
}

Visual Studio Multiuser load testing reports at API call level

I am currently writing and running load test cases in Visual Studio. These test cases are making api calls and recording the time it takes to get the response. When i run the test cases individually i get what i am looking for. These are things like (for every Url Request)

Request Url, Status, TotalTime, Request Bytes, Response.

However if i run the same test cases on multiuser scenario via load test i only get summary reports like Top Five Slowest Pages, Top Five Slowest tests and average time taken to complete the requests. Is there anyway of getting exactly the same report (mentioned on top) on multiuser scenarios at api call level?

How to mock a database connection with JUnit?

Simply can't find a concrete example where a db connection is mocked. Preffered with Mockito.

public Connection getCon() throws SQLException, Exception {



        Properties login = new Properties();

        TimeZone timeZone = TimeZone.getTimeZone("GMT+2:00");
        TimeZone.setDefault(timeZone);

        Connection conn = null;
        conn = DriverManager.getConnection(url, username, password);

        return conn;
    }

Want to test this method...

Access object property that is outside of array

My page object is structured so that I have all of the elements in an object and then an array of objects containing data about the fields that can be looped over to test max char length and error texts.

I would like the locator to reference a property that is outside the array so that the value does not need to be updated twice if the element changed.

Snippet from page object as an example...

module.exports = {
    siteName: element(by.id('P662_NAME')),
    fields: [
        {
            name: 'site name',
            max: 45,
            locator: element(by.id('P662_NAME'))
        }
    ]
}

I have tried using the following with no luck...

this.siteName, this.siteName, module.exports.siteName

Is there a way to do this?

How to exclude a Test package from running in parallel JUNIT 5?

I am using the new feature of junit 5 for parallel execution and i want to exclude a Controller folder where all the classes use mockMvc( not safe thread ) for testing sequentially . So i tried to use surefire but it's not working . SO is there any solution with junit 5 jupiter to exclude some packages from running in parallel . I use junit-plateform.properties :

junit.jupiter.execution.parallel.enabled=true
junit.jupiter.execution.parallel.config.strategy=dynamic
junit.jupiter.execution.parallel.config.fixed.parallelism=8

What is the best way to automate Tests for Apps

I'm working in software testing and we want to start automating our tests for our mobile Android and iOS Apps. The Apps are native and I have access to the source code.

What do you think of Appium? Firebase TestLab? for iOS: XCUITest or EarlGrey, for Android: Espresso / UIAutomator? Alternatives?

Maximize sum of array multiplied by array index number by dropping any unrequired number

A chef prepares food for his customers, and he gets the review ratings. So he multiplies the rating and order of dish to maximize the review-rating sum. So the rating comes in as {-1,3,4}.Therefore he calculates as below: 1(-1)+2(3)+3(4)=17. If he would have dropped the first order it would be 1(3)+2(4)=11. So he decides not to drop it. Likewise test case 2 be {4,-9,0,5,-7}: He drops dish with rating -9,-7 and calculates: 1(4)+2(0)+3(5)=19. So write program logic where he can maximize the sum.

Any testing methods to improve the system stability?

I looking for some testing methods to improve my OS stability,improve my own testing skills btw.So far I got some fuzzing testing methods.such as syzkaller、libfuzzer、oss-fuzz.I would be grateful if anyone give some suggestion to improve.thx!

mercredi 26 février 2020

Promotheus metrics for Robot Framework exist?

Is there any support for promotheus metrics (RED) for Robot Framework or robot based test cases .

global:

scrape_interval: 15s # By default, scrape targets every 15 seconds.

# Attach these labels to any time series or alerts when communicating with # external systems (federation, remote storage, Alertmanager). external_labels: monitor: 'codelab-monitor'

A scrape configuration containing exactly one endpoint to scrape:

Here it's Prometheus itself.

scrape_configs: # The job name is added as a label job=<job_name> to any timeseries scraped from this config. - job_name: 'prometheus'

# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s

static_configs:
  - targets: ['localhost:9090']

CodedUI test logs in .txt file

currently i am working in CodedUI testing to record test cases for calculator. i want logs for codedUI testing so that i can get all the steps i have performed during course of my testing like click button 2 ,click button add. i got these logs in html format but i want them in .txt format so that i can perform futhur processing. For that i have tried log4net and it is creating external txt file and storing but only those statements that we hard quote by writing log.debug("click 2 button");. i want that each time i run test case it automatically write log without explicit stating like it is doing in HTML case. My code loaction enter image description herehttps://github.com/Afsah-k/CodedUI-testing.please if anyone can help. i have add picture of my html CodedUI tescase log.

Jest: Cannot spy the property because it is not a function; undefined given instead

I'm trying to write a Jest test for a simple React component to confirm that a function has been called when I simulate a click.

However, when I use spyOn method, I keep getting TypeError: Cannot read property 'validateOnSave' of undefined. My code looks like this:

OptionsView.js

class OptionsView extends React.Component {
  constructor(props) {
    super(props);
    this.state = {
      reasonCode: null,
      remarkCode: null,
      otherCode: null,
      codeSelectionIsInvalid: [false, false, false],
    };
    this.validateOnSave = this.validateOnSave.bind(this);
    this.saveOptions = this.saveOptions.bind(this);

validateOnSave() {
    const copy = this.state.codeSelectionIsInvalid;
    copy[0] = !this.state.reasonCode;
    copy[1] = !this.state.remarkCode;
    copy[2] = !this.state.otherCode;
    this.setState({ codeSelectionIsInvalid: copy });

   if (!copy[0] && !copy[1] && !copy[2]) {
      this.saveOptions();
    }
  }

  saveOptions() {
    const { saveCallback } = this.props;
    if (saveCallback !== undefined) {
      saveCallback({ reasonCode: this.state.reasonCode, remarkCode: this.state.remarkCode, otherCode: this.state.otherCode,
      });
    }
  }
render() {
const cx = classNames.bind(styles);
const reasonCodes = this.props.reasonCodeset.map(reasonCode => (
      <Select.Option
        value={reasonCode.objectIdentifier}
        key={reasonCode.objectIdentifier}
        display={`${reasonCode.name}`}
      />
    ));
const remarkCodes = this.props.remarkCodeset.map(remarkCode => (
      <Select.Option
        value={remarkCode.objectIdentifier}
        key={remarkCode.objectIdentifier}
        display={`${remarkCode.name}`}
      />
    ));
const otherCodes = this.props.otherCodeset.map(otherCode => (
      <Select.Option
        value={otherCode.objectIdentifier}
        key={otherCode.objectIdentifier}
        display={`${otherCode.name}`}
      />
    ));
return (
      <ContentContainer fill>
        <Spacer marginTop="none" marginBottom="large+1" marginLeft="none" marginRight="none" paddingTop="large+2" paddingBottom="none" paddingLeft="large+2" paddingRight="large+2">
          <Fieldset legend="Code sets">
            <Grid>
              <Grid.Row>
                <Grid.Column tiny={3}>
                  <SelectField selectId="reasons" required placeholder="Select" label="Reasons:" error="Required field is missing" value={this.state.reasonCode} onChange={this.updateReasonCode} isInvalid={this.state.codeSelectionIsInvalid[0]}>
                    {reasonCodes}
                  </SelectField>
                </Grid.Column>
              </Grid.Row>
              <Grid.Row>
                <Grid.Column tiny={3}>
                  <SelectField selectId="remarks" required placeholder="Select" label="Remarks:" error="Required field is missing" value={this.state.remarkCode} onChange={this.updateRemarkCode} isInvalid={this.state.codeSelectionIsInvalid[1]}>
                    {remarkCodes}
                  </SelectField>
                </Grid.Column>
              </Grid.Row>
              <Grid.Row>
                <Grid.Column tiny={3}>
                  <SelectField selectId="other-codes" required placeholder="Select" label="Other Codes:" error="Required field is missing" value={this.state.otherCode} onChange={this.updateOtherCode} isInvalid={this.state.codeSelectionIsInvalid[2]}>
                    {otherCodes}
                  </SelectField>
</Grid.Column>
              </Grid.Row>
</Grid>

</Fieldset>
        </Spacer>
        <ActionFooter
          className={cx(['action-header-footer-color'])}
          end={(
            <React.Fragment>
              <Spacer isInlineBlock marginRight="medium">
                <Button text="Save" onClick={this.validateOnSave} />
              </Spacer>
            </React.Fragment>
          )}
        />
      </ContentContainer>
    );
  }
}

OptionsView.propTypes = propTypes;

export default injectIntl(OptionsView);

OptionsView.test

describe('RemittanceOptions View', () => {
let defaultProps = {...defined...}
beforeAll(() => {  
    Object.defineProperty(window, "matchMedia", {
      value: jest.fn(() => { 
        return { 
          matches: true,
          addEventListener: jest.fn(),
          removeEventListener: jest.fn(),
          addEventListener: jest.fn(),
          removeEventListener: jest.fn(),
          dispatchEvent: jest.fn(),
        } 
      })
    });
  });

it('should validate remit codes on save', () => {
    const wrapper = mountWithIntl(<OptionsView
      {...defaultProps}
    />); 
    const instance = wrapper.instance();
    const spy = jest.spyOn(instance, "validateOnSave");
    wrapper.setState({
      reasonCode: 84,
      remarkCode: 10,
      otherCode: null
    });
    console.log(wrapper.find('Button[text="Save"]').debug()); 
    const button = wrapper.find('Button[text="Save"]').at(0);
    expect(button.length).toBe(1);
    button.simulate('click');
    expect(spy).toHaveBeenCalled();
    expect(wrapper.state('codeSelectionIsInvalid')).toEqual([false,false,true]);
  });
});

Ultimate goal is to test two cases when save is clicked:

  1. When state.codeSelectionIsInvalid: [false,false,true]

  2. When state.codeSelectionIsInvalid: [false,false,false]

Where am I going wrong here. Any help is appreciated!

Go's httptest.NewRecorder() isnt registering hits in coverage report

Our goal is to have good integration tests on our api, so we use gin's router capabilities along with httptest.NewRecorder() to setup a dummy in process endpoint we can simulate http calls to. This ensures we test the actual path bindings, middleware, etc.

Our issue is in go's built in go coverage reports from say: go test -coverprofile=coverage.txt ./... It does not register hits against the apis. Full script we using to get coverage and upload to codecov.io is similar to this: https://github.com/codecov/example-go We loop per go package and aggregate a coverage.txt.

Has anyone experienced this? How do we fix? Is the indirect nature of invoking the code via simulated http requests expected to cause this gap?

How to reproduce:

http := gin.New()
api := http.Group("/api")
api.GET("/", apis.WelcomeHandler)

etc...

func PerformRequest(r http.Handler, method, path string, headers map[string]string, jsonPost string) *httptest.ResponseRecorder {
var payload io.Reader
if len(jsonPost) > 0 {
payload = bytes.NewBufferString(jsonPost)
}
req, err := http.NewRequest(method, path, payload)

if err != nil {
    fmt.Printf("Problem sending http request, %e", err)
    return nil
}
if headers != nil && len(headers) > 0 {
    for k, v := range headers {
        req.Header.Add(k, v)
    }
}
if len(jsonPost) > 0 {
    req.Header.Set("Content-Type", "application/json") // This makes it work
}
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
return w
}

Environment MacOS X go version go1.13.4 darwin/amd64

Testing Flink window

I have a simple Flink application, which sums up the events with the same id and timestamp within the last minute:

DataStream<String> input = env
                .addSource(consumerProps)
                .uid("app");

DataStream<Pixel> pixels = input.map(record -> mapper.readValue(record, Pixel.class));

pixels
        .keyBy("id", "timestampRoundedToMinutes")
        .timeWindow(Time.minutes(1))
        .sum("constant")
        .addSink(dynamoDBSink);

env.execute(jobName);

I am trying to test this application with the recommended approach in documentation. I also have looked at this stackoverflow question, but adding the sink hadn't helped.

I do have a @ClassRule as recommended in my test class. The function looks like this:

StreamExecutionEnvironment env=StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(2);

CollectSink.values.clear();

Pixel testPixel1 = Pixel.builder().id(1).timestampRoundedToMinutes("202002261219").constant(1).build();
Pixel testPixel2 = Pixel.builder().id(2).timestampRoundedToMinutes("202002261220").constant(1).build();
Pixel testPixel3 = Pixel.builder().id(1).timestampRoundedToMinutes("202002261219").constant(1).build();
Pixel testPixel4 = Pixel.builder().id(3).timestampRoundedToMinutes("202002261220").constant(1).build();

env.fromElements(testPixel1, testPixel2, testPixel3, testPixel4)
    .keyBy("id","timestampRoundedToMinutes")
    .timeWindow(Time.minutes(1))
    .sum("constant")
    .addSink(new CollectSink());

JobExecutionResult result = env.execute("AggregationTest");
assertNotEquals(0, CollectSink.values.size());

CollectSink is copied from documentation.

What am I doing wrong? Is there also a simple way to test the application with embedded kafka?

Thanks!

R: How to suppress warning message when testing for result AND warning

I want to test in R if a function

  1. returns the correct value
  2. throws the right warning during calculation

For that, I created a reproducible example. There a two scripts, the first one (e.g. test-warning-and-result.R) works fine and without any errors:

library(testthat)

f <- function(x) {
  if (x < 0) {
    warning("*x* is already negative")
    return(x)
  }
  -x
}

test_that("warning and result", {
  x = f(-1)
  expect_that(x, equals(-1))
  expect_warning(f(-1), "already negative")
})

However, when I run the tests from an external script (e.g. run-test.R), it logically throws a warning at "x = f(-1)"

library(testthat)
test_dir(".")

Picture of test results

Since I know there will be a warning and am testing for it, I'm searching for a way to suppress the warning which occurs when I'm checking for the result ("x = f(-1)")

Any Ideas would be appreciated

Automation Tool for a Kiosk(medicine vending machine)

I just wanted to know the best automation tools for automation(QA) that can be used for a product that has both hardware and software components. In short, how can we automate such products? Also, the software component is developed using c#. Looking for writing automation scripts for the product but not sure how to start. Looking for recommendations and suggestions.

Stripe webhook test returns empty table on execution, but works on stripe's webhook tester

We're having a problem with our test suite. When we run this using the test suite we get a 'The table is empty...' response from PHPUnit.

We know it works as we've also tested using Stripe's 'Send a web hook' test function which works, and the response is stored as expected.

Our code is here:

public function test_webhook_received()
{
    $this->expectsJobs([StoreStripeWebHookJob::class]);
    $this->postJson('/stripeHook', [
        'created' => 1326853478,
        'livemode' => false,
        'id' => 'evt_00000000000000',
        'type' => 'account.external_account.created',
        'object' => 'event',
        'request' => NULL,
        'pending_webhooks' => 1,
        'api_version' => '2019-12-03',
        'data' => [
            'object' => [
                'id' => 'ba_00000000000000',
                'object' => 'bank_account',
                'account' => 'acct_00000000000000',
                'account_holder_name' => 'Jane Austin',
                'account_holder_type' => 'individual',
                'bank_name' => 'STRIPE TEST BANK',
                'country' => 'US',
                'currency' => 'gbp',
                'fingerprint' => '8JXtPxqbdX5GnmYz',
                'last4' => '6789',
                'metadata' => [],
                'routing_number' => '110000000',
                'status' => 'new',
            ],
            ],
        ]);

        $this->assertDatabaseHas('stripe_webhooks', [
            'stripe_created_at' => 1326853478,
            'type' => 'account.external_account.created',
        ]);
}

The response received is:

Failed asserting that a row in the table [stripe_webhooks] matches the attributes { "stripe_created_at": 1326853478, "type": "account.external_account.created" }.

The table is empty..

If we remove the

$this->expectsJobs([StoreStripeWebHookJob::class]);

tests succeed. Obviously the expectsJob() call should be where it is though.

Benefit of Protractor (or mocha etc.) over action recording tools like Selenium IDE

I am automating some apps for learning purposes. A friend of mine told me that all your efforts are invain. AS there are tools that record actions of user and generate test cases according to it. I am not sure what should I do, Can anyone tell which one is the best for test automation and WHY? Will be thankful

Tools for bluetooth devices debug in remote test bed

I know it may sound odd, but are there any kind of jump-boxes or transparent proxy to work with remote Bluetooth devices (i.e. I'm in Ukraine and remote devices are in Germany).

Some kind of tunnel for bluetooth data through http...

We got heavy distributed team and lack of real HW devices is becoming pain.

Sample schema:
+------------+     +---------+       +---------+     +-----------+        +------+  +----+
| Remote     | Phy | Magic   | HTTPS | Backend | API |Custom BLE | Kernel |BLE   |  |Test|
| Device     |-----| BLE Box |-------| service |-----|Driver     |--------|Bundle|--|app |
+------------+     +---------+ MQTT? +---------+     +-----------+        +------+  +----+

If there is no such tool, will it be useful in open-source?

i make testing api for website by using jmeter and there is error message so i want solution for this problem

I make api testing for website by using jmeter and there is error message so i want solution for this problem where i put the base url .path ,and write the parameters then choose linster >>view tree result

enter image description here so this image included my error message

How to test interaction of micro-services in gitlab ci?

I have 4 services that are all NodeJS API's, 1 is the 'main-service' and I want to test the routing between the services.

Locally I can do that, I start up the other 3 services and run the test and they succeed.
Now I want to do the same thing, only on gitlab ci, but I can't figure out how.

I have build 4 docker images of the services and in test I pull them all, run the other 3 detached and run the 4th with the test command, but all the routing test fail with for example: 'RequestError: Error: connect ECONNREFUSED 0.0.0.0:9016'.
Locally when I set the adresses of the services with docker it works, but it looks like I can't reach them in gitlab ci.

Anyone an idea of how to do this?

How to test this perl function?

So I have this function: customer_statistics() and I want to test it in 2 ways:

  1. test if the function gets the correct parameters for a certain request
  2. test if the function returns the correct result

I need some guidance, thanks!

sub customer_statistics {
  my $self = shift;
  my @all_access_rights = $self->db->resultset('AccessRights')->ids();

  my $selected_access_rights = $self->param('access_rights') // '';
  my $user_access_rights =
    $self->db->resultset('UserAccessRights')->active();
  if ($selected_access_rights ne '') {
    return unless validate_access_rights_existence(
      $self, $selected_access_rights);
    $user_access_rights =
      $user_access_rights->by_access_rights_id($selected_access_rights);
  }
  my $no_subscriptions = $user_access_rights->count;
  my $user_login_statistics = $user_access_rights->last_logins()
    ->with_num_comments;

  my $items_per_page = $self->param('items_per_page') // 30;
  my $page = $self->param('page') // 1;
  my $total_pages = ceil($no_subscriptions / $items_per_page);

  my $column = $self->param('column') // 'date_begin';
  my $type = $self->param('type') // 'desc';
  $user_login_statistics =
    $user_login_statistics->by_page($page, $items_per_page)
      ->ordered_by($column, $type);

  return $self->render(
    all_access_rights => \@all_access_rights,
    items_per_page => $items_per_page,
    no_subscriptions => $no_subscriptions,
    page => $page,
    selected_access_rights => $selected_access_rights,
    total_pages => $total_pages,
    user_login_statistics => $user_login_statistics,
    column => $column,
    type => $type
  );
}

Java and JUnit, find missing @Test decorators

Let's say we have a project full of unit tests (thousands) and they all should look like this

@Test
public void testExceptionInBla() {
   // some test
}

But in one case someone forgot to put an @Test decorator on top of the test.

What would be an easy way to spot those tests, without looking through all the code manually?

I want to find code like this, it's a test without @Test:

public void testExceptionInBla() {
   // some test
}

How can I hover over an element then click using Cypress

I want to hover over the "My Account" button and the click the "Login" button opened popup. I have tried the below code but it did not work. Does anyone know a way to handle this situation? Thanks,

 Cypress.Commands.add('loginol', (email, password) => { 
     cy.get('#myAccount').click()
     cy.get('#myAccount').trigger('mouseover')
     cy.wait(3000)
     cy.get('#login').click()
     cy.get('#email').type(email)
     cy.get('#password').type(password)
     cy.get('.btn.full.btn-login-submit').click()

 })

I have uploaded the pictures in case it helps:

"Giriş Yap(My Account)" Button

After it is hovered below "Giriş Yap(Login)" Button

Website I'm working on: https://www.hepsiburada.com/

what is the difference between concurrency thread group and ultimate thread group

concurrency thread group

Ultimate thread group

Nightwatch: .elements() returns different result for geckodriver

I'm using Nightwatch.js. Here's this piece of code:

.elements("css selector", ".inputField", (inputFields: any) => {
  console.log("input fields", inputFields);
})

Using chromedriver it prints:

{
  sessionId: '5d160b0b2808e96e181a67e347760c51',
  status: 0,
  value: [{
      ELEMENT: '0.31906899492724383-12'
    },
    {
      ELEMENT: '0.31906899492724383-13'
    },
  ]
}

But using geckodriver:

{
  value: [{
      'element-6066-11e4-a52e-4f735466cecf': 'f53da795-6871-4
    },
    {
      'element-6066-11e4-a52e-4f735466cecf': '38acacec-17ae-4
    },
  ]
}

What's the problem? The return type should be the same. I've tried to set w3c: false in chromeOptions but nothing changed.

Should I unit test *what* or *how* for composite functions?

I have functions f and g, which already have a collection of unit tests ensuring that their behavior is correct for some known inputs + outputs pairs (plus exception handling, etc).

Now I'm creating the function h(), as follows:

def h(x):
    return f(x) + g(2*x)

What's a good approach for unit testing this function? Would this change if h() was substantially more complex?


My thoughts so far:

I see two possibilities for testing h().

1. Testing if h() is doing the correct "plumbing"

Mock out f() so that it returns y_0 when called with input x_0; mock g() so that it returns z_0 when called with input 2*x_0. Then check that calling h(x_0) returns y_0+z_0.

Advantages:

  • Very simple to implement the test.
  • Can quickly find bugs where I incorrectly connected the outputs of f and g, or where I called them with wrong arguments (say, calling g(x) instead of g(2*x) in h()).

Disadvantages:

  • This is testing how, not what h() should do. If later I want to refactor h(), then I'll probably need to rewrite these types of tests.
  • If the plumbing specified by the test does not produce the intended high-level behavior for h(), then these tests won't catch this error. For example, maybe the correct plumbing was supposed to be f(-x) + g(2*x), and I made it wrong both in the function definition and in the test definition.

1. Testing what h() should do

Let's say that the purpose of h() is to compute the sum of primes below the given argument. In this case, a natural test suite for h() would involve testing it with known input and output pairs. Something that makes sure that, for instance, h(1)=2, h(2)=5, h(5)=28, etc, not caring about how h() is computing these numbers.

Advantages:

  • This type of test checks that h() is indeed following its intended high-level behavior. Any plumbing mistakes that alter this will be caught.
  • Refactoring h() will probably not necessitate changing the test suite, and will even be easier, since the tests help us guarantee that the behavior of the function doesn't change.

Disadvantages:

  • In this simple example it's easy to produce such pairs because the mapping that h() performs is not very complicated (just sum the n first primes). However, for a very complicated h(), my only option for producing such pairs might be me to come up with an input x and compute the correct output by hand. This doesn't seem reasonable if h is very complicated.
  • Since coming up with known input-output pairs requires me to compute what f() and g() will produce given a certain input, there will probably be some duplication of effort, since I already spent some time doing that when creating the unit tests for these functions.

Related question: Unit testing composite functions.

This question is at first glance very similar to mine. However, the two most voted answers present completely different approaches to the problem (the two approaches I mentioned above). My question is an attempt in clarifying the pros/cons of each approach (and perhaps learn other approaches), and potentially establish which is best overall. If no approach is best in all cases, I would like to understand in which cases one should use each of them.

Does testcafe support soft assertions

I have a scenario that is an end to end testing where I have multiple assertion points. Observed that when an assertion fails test stops. But I need to just report a failed step in test results and proceed further with the test execution. Does Test cafe support soft assertions?

Open Source Projects Using Google Mock and Google Test?

I had started my learning in automation testing,since I was a beginner ,I started to learn from open source projects like opencv,I wondered that they have not used any mocks like gmock and they used only google test. My questions,

Is it possible to do unit testing with only googletest if not,how many open source projects use only googletest?

Is there any open source projects which uses gmock testing?

mardi 25 février 2020

How to add a c language file to the ttcn as a SUT for testing using ttcn3

I was trying to test a c language code using ttcn3,but i was not sure that where to include the code that i need to test(where the SUT code is placed) as i am new to ttcn3.can any one help me regarding this ?

For loop pure function jest test using expect... cannot read property

I am trying to make a pure function using a for loop pass a jest test/npm test in the terminal... I am getting an error that it cannot read the property of toBe...

My function:

const syntax = {
   for1: (a,b) => {
      for(let a=1; a<10; a++){
         for(let b=1; b<10; b++){
             return a+b;
         }
      }
   }
}

My Test.js file: I want it to test that 1+2 does not equal 0 making this test passing for the function

test('FORLOOP', () => {
    expect(syntax.for1(1,2).not.toBe(0));
});

TypeError in terminal:

TypeError: Cannot read property 'toBe' of undefined
      45 | test('FORLOOP', () => {
    > 46 |     expect(syntax.for1(1+3).not.toBe(0));
         |            ^
      47 | });

How can I access these field in testing ( Flutter)?

**I have these fields below in state class and I want to test if these fields work correctly. So I write testing dart file, how can I access these field in testing file **

class _LoginPageState extends State<LoginPage> {
      String _phoneNumber = "";
  String _smsCode = "";
  String _verificationId = "";
  int _currentStep = 0;

  List<StepState> _stepsState = [
    StepState.editing,
    StepState.indexed,
    StepState.indexed
  ];

  bool _showProgress = false;
  String _displayName = '';
  File _imageFile;

  bool _labeling = false;

  List<ImageLabel> _labels = List();

}

How to test ODBC deriver conformance?

Say there is an open-source ODBC driver for a specific database and one wants to test its level of support of ODBC standard, what would you recommend?

Is there an easy way (hell, even a hard way is fine too) to do that ?

The best is a some sort of a test suite that will show what ODBC APIs are implemented, not implemented or implemented incorrectly. Preferable cross-platform (Linux, Windows and MacOS), but Linux-only is fine too.

I've tried to go with unixODBC-Test (http://www.unixodbc.org/test/, https://sourceforge.net/p/unixodbc-test/code/HEAD/tree/) but it seems unmaintained and a bit outdated, to the point that I was unable to compile it from source for various reasons.

iODBC (http://www.iodbc.org https://github.com/openlink/iODBC) seems to not provide any complience-testing tools at all, which makes me sad.

I was able to google some books and software magazines, which mention that it is really important and should be done, but still there is no unified way of doing such a fundamental thing.

Enzyme tests(snapshot) do not work if there is a @observer(mobx) annotation in the component being tested

People! I have a problem with writing tests for the project. The project has a mobx store. Enzyme tests(snapshot) do not work if there is a @observer(mobx) annotation in the component being tested.

Getting an error:

Test suite failed to run TypeError: Cannot read property 'componentWillReact' of undefined

Test:

const props = {
  store: {...store},
};

describe('ViewModal component testing', () => {

  it('mount to dom', async () => {
    const component = shallow(
            <Provider {...props}>
                <ViewModal />
            </Provider>);
    expect(component).toMatchSnapshot();
  });
});

Component for test:

@inject('store')
@observer
export default class ViewModal extends React.Component<IProps> {
   componentDidMount() {
     this.props.store.getItem();
   }

  render() {
    const {item} =  this.props.store;
    return (
        <Row>
            {item}
        </Row>
    );
  }

}

If you remove @observer, the test is successful. How do I make the test run successfully with @observer in the component?

Sorry, my English very bad.))

Controlling Desktop Applications via Web Application (Admin Panel)

I want to build a service that is controlled via web based application (admin panel) that connects and automatically updates the desktop applications in real time on other computers in their network based on specific privileges set by the admin (web application). How would I go about doing that? I will also be building mobile apps to go with it too.

p.s. I apologize if this is a stupid question. I just started learning web development a couple months ago and have a project in mind. The reason I want to do this as a web application instead of solely a desktop application is to make it easier for the admin to monitor and make changes to specific users desktop applications from anywhere they can access the internet.

P.s. I am learning nodejs right now and will use that with Electron or Nw.js to get the beta version of my project built as quickly as possible.

Thanks in advance.

React test Error: Failed to execute 'appendChild' on 'Node': parameter 1 is not of type 'Node'

I have this file

import React, { RefObject } from 'react';

interface Props {
  domNodeId: string;
}

export default class Portal extends React.Component<Props> {
  elementRef: RefObject<HTMLDivElement> = React.createRef();

  componentDidMount() {
    const { domNodeId } = this.props;

    if (document.getElementById(domNodeId)) {
      document.getElementById(domNodeId).appendChild(this.elementRef.current);
    }
  }

  componentDidUpdate(prevProps) {
    const { domNodeId } = this.props;

    if (prevProps.domNodeId !== domNodeId && document.getElementById(domNodeId)) {
      const domNodeIdElement = document.getElementById(domNodeId);
      while (domNodeIdElement.firstChild) {
        domNodeIdElement.removeChild(domNodeIdElement.firstChild);
      }

      document.getElementById(domNodeId).appendChild(this.elementRef.current);
    }
  }

  render() {
    const { domNodeId } = this.props;

    return (
      <>
        {domNodeId && (
          <div className="portal" ref={this.elementRef}>
            {this.props.children}
          </div>
        )}
      </>
    );
  }
}

And my test file is

import { shallow } from 'enzyme';
import { mount } from 'enzyme';
import Portal from '~/portal';

const props = {
  domNodeId: 'domNodeId'
};


describe('<Portal />', () => {
beforeAll(() => {
    const div = document.createElement('div');
    div.setAttribute('id', 'domNodeId');
    div.innerHTML = '<div></div>';
    document.body.appendChild(div);
    });

  it('matches snapshot', () => {
    const tree = renderer.create(<Portal {...props} />).toJSON();

    expect(tree).toMatchSnapshot();
  });



  it('triggers componentDidUpdate', () => {
    const wrapper = shallow(<Portal {...props} />) as any;
    wrapper.setProps({ domNodeId: 'newdomNodeId' });
    const tree = renderer.create(<Portal {...props} />).toJSON();

    expect(tree).toMatchSnapshot();
  });

  it('componentWillUnmount should be called on unmount', () => {
    const component = shallow(<Portal {...props} />) as any;
    const componentWillUnmount = jest.spyOn(component.instance(), 'componentWillUnmount');
    component.unmount();
    expect(componentWillUnmount).toHaveBeenCalled();
  });

  it('componentWillUnmount will remove the portal', () => {
    const window = {
      document: {
        body: {
          removeChild: jest.fn()
        }
      }
    };
  const wrapper = shallow(<Portal {...props} />) as any;
  wrapper.setProps({ domNodeId: 'newdomNodeId' });
  });

I need to get 100% coverage on my tests files but I get the following error Error: Failed to execute 'appendChild' on 'Node': parameter 1 is not of type 'Node'

How can I mock the react reference so as to be and HTML Div element? I think that only then I won't get this error. Or should I use the expect function to check if the code works properly?

Do you have any other suggestion?

How to run Kubernetes E2E performance/density tests and obtain latency and throughput measurements?

I wanted to replicate the exact measurements and tests realized here or here (API responsiveness, pod startup time, networking/scheduling throughput ...etc.) and obtain the results to make visual graphs out of them but there is no clear guidance how to do it.

I compiled kubetest, kubectl, ginkgo and e2e.test and run the e2e tests but I don't find any way to do what has been done in the previous articles. Running with -ginkgo.dryRun shows all the list of possible tests and apprently there is no tests that match the ones I want to do (using -ginkgo.focus="Performance|Density" or other combinations doesn't also yield any interesting results).

I just want to know how the previous articles realized their tests and obtained the measurements of latency, throughput, pod startup times as well as other metrics. What benchmark they used and how they applied it ?

PS: I use a Kubernetes v1.17.0 cluster created with Kubespray on a bare-metal infrastructure (high-end servers). Used OS is Debian 10.

CLion Run configuration just gone

I was just using CLion for C++ and everything was fine. I was also testing the program.

Suddenly, I wasn't able to access the standard library anymore (also no auto completion for that) and the run configuration was gone. I don't know why, but it's just gone. I tried making a new one, but there, the target is not showing up.

Why $form->isValid() returns false when testing although it's true in production?

I'm using Symfony 3.4 and I'm writing tests for my actions

And as mentioned in the title $form->isValid() returns false although it works fine when I test the form in production.

In my tests I try to simulate POST request like so:

$crawler = $this->_client->request('GET', $this->_router->generate('product_new'));
$form = $crawler->filter('form')->form();
$values = $form->getPhpValues();
$values['product']['code'] = 'PRD';
$values['product']['name'] = 'TEST PRODUCT';
$values['product']['price'] = 550;
$this->_client->request($form->getMethod(), $form->getUri(), $values, $form->getPhpFiles());
$response = $this->_client->getResponse();
// the response should return 302 because I'm doing redirection after submitting the form
// so this one should return true
$this->assertEquals(Response::HTTP_FOUND, $response->getStatusCode());

And this is the action in ProductController class:

/**
 * @Route("/new", name="product_new")
 * @Method({"GET", "POST"})
 */
public function createAction(){
      $product = new Product();
      $form = $this->createForm(ProductType::class, $product);

      $form->handleRequest($request);
      // when I remove $form->isValid() the test works fine
      if ($form->isSubmitted() && $form->isValid()) {
          $em = $this->getDoctrine()->getManager();
          $em->persist($product);
          $em->flush();
          return $this->redirectToRoute('product_show', array('code' => $product->getCode()));
      }

      return $this->render('@Commerce/product/new.html.twig', array(
        'product' => $product,
        'form' => $form->createView()
      )); 
}

What am I missing here ?