[0] | 1 | import os
|
---|
[26] | 2 | import pytest
|
---|
[0] | 3 |
|
---|
[26] | 4 | from pyenvjasmine.runner import Runner, get_environment
|
---|
[0] | 5 |
|
---|
| 6 |
|
---|
[26] | 7 | class TestsRunner(object):
|
---|
[0] | 8 |
|
---|
| 9 | def test_runner_defaults(self):
|
---|
| 10 | """
|
---|
| 11 | Test the runner, using default values (which wil run the demo specs)
|
---|
| 12 | """
|
---|
[24] | 13 | jstests = Runner()
|
---|
[26] | 14 | success, stdout = jstests.run()
|
---|
| 15 | assert success
|
---|
[30] | 16 | assert 'Failed: 0' in stdout
|
---|
| 17 | assert 'Passed: 5' in stdout
|
---|
[0] | 18 |
|
---|
| 19 | def test_runner_params(self):
|
---|
| 20 | """
|
---|
| 21 | Test the runner, giving it some parameters
|
---|
| 22 | """
|
---|
| 23 | here = os.path.dirname(__file__)
|
---|
| 24 | sample = os.path.join(here, 'sample')
|
---|
| 25 | conf_file = os.path.join(sample, 'configfile.js')
|
---|
| 26 | envjasmine_dir = os.path.join(os.path.dirname(here), 'envjasmine')
|
---|
[24] | 27 | jstests = Runner(
|
---|
[0] | 28 | rootdir=envjasmine_dir,
|
---|
| 29 | testdir=sample,
|
---|
| 30 | configfile=conf_file,
|
---|
| 31 | )
|
---|
[26] | 32 | success, stdout = jstests.run(spec='tests/specs/test_demo.spec.js')
|
---|
| 33 | lines = stdout.splitlines()
|
---|
[30] | 34 | assert lines[0].endswith('specs/test_demo.spec.js')
|
---|
| 35 | assert lines[1].startswith('[ Envjs/1.6 (Rhino;')
|
---|
| 36 | assert 'Passed: 4' in lines
|
---|
| 37 | assert 'Failed: 0' in lines
|
---|
| 38 | assert 'Total : 4' in lines
|
---|
[0] | 39 |
|
---|
| 40 | def test_write_browser_htmlfile_markup_is_correct(self):
|
---|
| 41 | """
|
---|
| 42 | Test the created markup
|
---|
| 43 | """
|
---|
| 44 | here = os.path.dirname(__file__)
|
---|
| 45 | sample = os.path.join(here, 'sample')
|
---|
| 46 | browser_conf_file = os.path.join(sample, 'browser.configfile.js')
|
---|
| 47 | envjasmine_dir = os.path.join(os.path.dirname(here), 'envjasmine')
|
---|
[24] | 48 | jstests = Runner(
|
---|
[0] | 49 | rootdir=envjasmine_dir,
|
---|
| 50 | testdir=sample,
|
---|
| 51 | browser_configfile=browser_conf_file
|
---|
| 52 | )
|
---|
| 53 | expected = jstests.create_testRunnerHtml()
|
---|
[31] | 54 | success, stdout = jstests.run(spec='tests/specs/test_demo.spec.js')
|
---|
| 55 | assert not success
|
---|
| 56 | assert 'Failed: 2' in stdout
|
---|
[0] | 57 | with open("browser.runner.html",'r') as file:
|
---|
| 58 | actual = file.read()
|
---|
[26] | 59 | assert expected == actual
|
---|
[10] | 60 |
|
---|
[0] | 61 | def test_runner_with_browser_configfile(self):
|
---|
| 62 | """
|
---|
| 63 | Test the runner, giving it some parameters incl the browser config file
|
---|
| 64 | """
|
---|
| 65 | here = os.path.dirname(__file__)
|
---|
| 66 | sample = os.path.join(here, 'sample')
|
---|
| 67 | browser_conf_file = os.path.join(sample, 'browser.configfile.js')
|
---|
| 68 | envjasmine_dir = os.path.join(os.path.dirname(here), 'envjasmine')
|
---|
[24] | 69 | jstests = Runner(
|
---|
[0] | 70 | rootdir=envjasmine_dir,
|
---|
| 71 | testdir=sample,
|
---|
| 72 | browser_configfile=browser_conf_file
|
---|
| 73 | )
|
---|
[26] | 74 | success, stdout = jstests.run(spec='tests/specs/test_demo.spec.js')
|
---|
| 75 | assert not success
|
---|
[31] | 76 | assert 'Failed: 2' in stdout
|
---|
[0] | 77 |
|
---|
| 78 | def test_get_environment(self):
|
---|
| 79 | """
|
---|
| 80 | Testing the OS specific code
|
---|
| 81 | Could not figure out how to do this using mock,
|
---|
| 82 | so monkey patching the old way.
|
---|
| 83 | """
|
---|
| 84 | old_os_name = os.name
|
---|
| 85 | try:
|
---|
| 86 | os.name = 'nt'
|
---|
| 87 | res = get_environment()
|
---|
[26] | 88 | assert res == '--environment=WIN'
|
---|
[0] | 89 | os.name = 'posix'
|
---|
| 90 | res = get_environment()
|
---|
[26] | 91 | assert res == '--environment=UNIX'
|
---|
[0] | 92 | finally:
|
---|
| 93 | os.name = old_os_name
|
---|
[33] | 94 |
|
---|
| 95 | def test_did_test_pass(self):
|
---|
| 96 | # there is some coverage done with the previous tests,
|
---|
| 97 | # but we want to test also the case when a test failed
|
---|
| 98 | # and it does not appear in the "Failed:" report
|
---|
| 99 | jstests = Runner()
|
---|
| 100 | success = jstests.did_test_pass('')
|
---|
| 101 | assert not success
|
---|
| 102 | success = jstests.did_test_pass('some random data '*50)
|
---|
| 103 | assert not success
|
---|
| 104 | success = jstests.did_test_pass('some data FAILED some more data')
|
---|
| 105 | assert not success
|
---|
| 106 | success = jstests.did_test_pass('some data FAILEDsome more data')
|
---|
| 107 | assert not success
|
---|
| 108 | success = jstests.did_test_pass('Failed: 0')
|
---|
| 109 | assert success
|
---|
| 110 | success = jstests.did_test_pass('Failed: 0 FAILED')
|
---|
| 111 | assert not success
|
---|
| 112 | success = jstests.did_test_pass('FAILEDFailed: 0')
|
---|
| 113 | assert not success
|
---|
| 114 | success = jstests.did_test_pass('Failed: 0FAILED')
|
---|
| 115 | assert not success
|
---|
| 116 | success = jstests.did_test_pass('Failed: 1')
|
---|
| 117 | assert not success
|
---|
| 118 | success = jstests.did_test_pass('Failed: -11')
|
---|
| 119 | assert not success
|
---|
| 120 | success = jstests.did_test_pass('Failed: something-not-a-number')
|
---|
| 121 | assert not success
|
---|