1 | import os
|
---|
2 | import sys
|
---|
3 | import subprocess
|
---|
4 | import threading
|
---|
5 | import signal
|
---|
6 |
|
---|
7 |
|
---|
8 | def get_environment():
|
---|
9 | """
|
---|
10 | Get the environment parameter, depending on OS (Win/Unix).
|
---|
11 | """
|
---|
12 | if os.name == 'nt': # not tested!
|
---|
13 | environment = '--environment=WIN'
|
---|
14 | else:
|
---|
15 | environment = '--environment=UNIX'
|
---|
16 | return environment
|
---|
17 |
|
---|
18 |
|
---|
19 | def print_no_newline(string):
|
---|
20 | sys.stdout.write(str(string))
|
---|
21 | sys.stdout.flush()
|
---|
22 |
|
---|
23 |
|
---|
24 | def run_popen_with_timeout(
|
---|
25 | command, timeout, input_data, stdin, stdout, stderr, env=None):
|
---|
26 | """
|
---|
27 | Run a sub-program in subprocess.Popen, pass it the input_data,
|
---|
28 | kill it if the specified timeout has passed.
|
---|
29 | returns a tuple of success, stdout, stderr
|
---|
30 |
|
---|
31 | sample usage:
|
---|
32 |
|
---|
33 | timeout = 60 # seconds
|
---|
34 | path = '/path/to/event.log'
|
---|
35 | command = ['/usr/bin/tail', '-30', path]
|
---|
36 | input_data = ''
|
---|
37 | success, stdout, stderr = run_popen_with_timeout(command, timeout,
|
---|
38 | input_data)
|
---|
39 | if not success:
|
---|
40 | print('timeout on tail event.log output')
|
---|
41 | tail_output = stdout
|
---|
42 | """
|
---|
43 | kill_check = threading.Event()
|
---|
44 |
|
---|
45 | def _kill_process_after_a_timeout(pid):
|
---|
46 | try:
|
---|
47 | os.kill(pid, signal.SIGTERM)
|
---|
48 | except OSError:
|
---|
49 | # catch a possible race condition, the process terminated normally
|
---|
50 | # between the timer firing and our kill
|
---|
51 | return
|
---|
52 | kill_check.set() # tell the main routine that we had to kill
|
---|
53 | # use SIGKILL if hard to kill...
|
---|
54 | return
|
---|
55 |
|
---|
56 | stdout_l = []
|
---|
57 |
|
---|
58 | # don't use shell if command/options come in as list
|
---|
59 | use_shell = not isinstance(command, list)
|
---|
60 | try:
|
---|
61 | p = subprocess.Popen(command, bufsize=1, shell=use_shell,
|
---|
62 | stdin=stdin, stdout=stdout,
|
---|
63 | stderr=stderr, env=env)
|
---|
64 | except OSError as error_message:
|
---|
65 | stderr = 'OSError: ' + str(error_message)
|
---|
66 | return (False, '', stderr)
|
---|
67 | pid = p.pid
|
---|
68 |
|
---|
69 | watchdog = threading.Timer(timeout, _kill_process_after_a_timeout,
|
---|
70 | args=(pid, ))
|
---|
71 | watchdog.start()
|
---|
72 |
|
---|
73 | while True:
|
---|
74 | output = p.stdout.readline(1).decode('utf-8')
|
---|
75 | if output in ['', b''] and p.poll() is not None:
|
---|
76 | break
|
---|
77 | if output == '\n':
|
---|
78 | print(output)
|
---|
79 | else:
|
---|
80 | print_no_newline(output)
|
---|
81 | stdout_l.append(output)
|
---|
82 |
|
---|
83 | try:
|
---|
84 | (stdout, stderr) = p.communicate(input_data)
|
---|
85 | except OSError as error_message:
|
---|
86 | stdout = ''
|
---|
87 | stderr = 'OSError: ' + str(error_message)
|
---|
88 | p.returncode = -666
|
---|
89 |
|
---|
90 | watchdog.cancel() # if it's still waiting to run
|
---|
91 | # if it timed out, success is False
|
---|
92 | success = (not kill_check.isSet()) and p.returncode >= 0
|
---|
93 | kill_check.clear()
|
---|
94 | return (success, ''.join(stdout_l), stderr)
|
---|
95 |
|
---|
96 | class Runner(object):
|
---|
97 | """
|
---|
98 | Setup to run envjasmine "specs" (tests).
|
---|
99 |
|
---|
100 | To use it, probably best to put it inside a normal python
|
---|
101 | unit test suite, then just print out the output.
|
---|
102 | """
|
---|
103 |
|
---|
104 | def __init__(self, rootdir=None, testdir=None, configfile=None,
|
---|
105 | browser_configfile=None, testing_enviroment='phantomjs'):
|
---|
106 | """
|
---|
107 | Set up paths, by default everything is
|
---|
108 | inside the "envjasmine" folder right here.
|
---|
109 | Giving no paths, the sample specs from envjasmine will be run.
|
---|
110 | XXX: it would be more practical if this raised an exception
|
---|
111 | and you know you're not running the tests you want.
|
---|
112 |
|
---|
113 | parameters:
|
---|
114 | testdir - the directory that holds the "mocks", "specs"
|
---|
115 | and "include" directories for the actual tests.
|
---|
116 | rootdir - the directory where the envjasmine code lives in.
|
---|
117 | configfile - path to an extra js config file that is run for the tests.
|
---|
118 | browser_configfile - path to an extra js config file for running
|
---|
119 | the tests in browser.
|
---|
120 | """
|
---|
121 | here = os.path.dirname(__file__)
|
---|
122 | self.libdir = here
|
---|
123 | self.rootdir = rootdir or os.path.join(here, 'envjasmine')
|
---|
124 | self.testdir = testdir or self.rootdir
|
---|
125 | self.configfile = configfile
|
---|
126 | self.browser_configfile = browser_configfile
|
---|
127 | self.runner_js = os.path.join(here, 'run-jasmine3.js')
|
---|
128 | self.testing_enviroment = testing_enviroment
|
---|
129 | environment = get_environment()
|
---|
130 | rhino_path = os.path.join(self.rootdir, 'lib', 'rhino', 'js.jar')
|
---|
131 | envjasmine_js_path = os.path.join(self.rootdir, 'lib', 'envjasmine.js')
|
---|
132 | rootdir_param = '--rootDir=%s' % self.rootdir
|
---|
133 | testdir_param = '--testDir=%s' % self.testdir
|
---|
134 |
|
---|
135 | # using a dictionary to parameterize the different engines
|
---|
136 | self.envs = {
|
---|
137 | 'phantomjs' : {
|
---|
138 | 'command' : [
|
---|
139 | 'phantomjs',
|
---|
140 | '--debug=true',
|
---|
141 | self.runner_js,
|
---|
142 | 'browser.runner.html'
|
---|
143 | ],
|
---|
144 | 'runner_html' : 'runner3.html',
|
---|
145 | 'failed_mark' : 'FAILED',
|
---|
146 | 'success_mark' : '0 failures'
|
---|
147 | },
|
---|
148 | 'rhino' : {
|
---|
149 | 'command' : [
|
---|
150 | 'java',
|
---|
151 | '-Duser.timezone=US/Eastern',
|
---|
152 | '-Dfile.encoding=utf-8',
|
---|
153 | '-jar',
|
---|
154 | rhino_path,
|
---|
155 | envjasmine_js_path,
|
---|
156 | '--disableColor',
|
---|
157 | environment,
|
---|
158 | rootdir_param,
|
---|
159 | testdir_param
|
---|
160 | ],
|
---|
161 | 'runner_html' : 'runner.html',
|
---|
162 | 'failed_mark' : 'FAILED',
|
---|
163 | 'success_mark' : 'Failed: 0',
|
---|
164 | 'command_params_func' : self.command_params_rhino
|
---|
165 | }
|
---|
166 | }
|
---|
167 | self.runner_html = os.path.join(here,
|
---|
168 | self.envs[self.testing_enviroment]['runner_html'])
|
---|
169 |
|
---|
170 | def command_params_rhino(self, command):
|
---|
171 | """
|
---|
172 | Function specific to Rhino to add eventual arguments to the command line.
|
---|
173 | The function is referenced in the dictionary "envs" with key
|
---|
174 | "command_params_func", and for every engine that needs the same type of
|
---|
175 | manipulation there will be a similar function with relative reference in
|
---|
176 | the dictionary
|
---|
177 | """
|
---|
178 | if self.configfile and os.path.exists(self.configfile):
|
---|
179 | command.append('--configFile=%s' % self.configfile)
|
---|
180 | return command
|
---|
181 |
|
---|
182 | def run(self, spec=None, timeout=None):
|
---|
183 | """
|
---|
184 | Run the js tests with envjasmine, return success (true/false) and
|
---|
185 | the captured stdout data
|
---|
186 |
|
---|
187 | spec: (relative) path to a spec file (run only that spec)
|
---|
188 | timeout: Set it to a given number of seconds and the process running
|
---|
189 | the js tests will be killed passed that time
|
---|
190 | """
|
---|
191 | if self.browser_configfile and os.path.exists(self.browser_configfile):
|
---|
192 | self.write_browser_htmlfile()
|
---|
193 | command = self.envs[self.testing_enviroment]['command']
|
---|
194 | # Add eventual other parameters to the command by calling a
|
---|
195 | # function specific for the selected engine (rhino, phantomjs, etc.)
|
---|
196 | if 'command_params_func' in self.envs[self.testing_enviroment]:
|
---|
197 | command = self.envs[self.testing_enviroment]['command_params_func'](command)
|
---|
198 | # if we were asked to test only some of the spec files,
|
---|
199 | # addd them to the command line:
|
---|
200 | if spec is not None:
|
---|
201 | if not isinstance(spec, list):
|
---|
202 | spec = [spec]
|
---|
203 | command.extend(spec)
|
---|
204 |
|
---|
205 | shell = False
|
---|
206 | stdin = None
|
---|
207 | stdout = subprocess.PIPE
|
---|
208 | stderr = subprocess.PIPE
|
---|
209 | input_data = ''
|
---|
210 | success, stdout, stderr = run_popen_with_timeout(
|
---|
211 | command, timeout, input_data, stdin, stdout, stderr
|
---|
212 | )
|
---|
213 |
|
---|
214 | # success will be true if the subprocess did not timeout, now look
|
---|
215 | # for actual failures if there was not a timeout
|
---|
216 | if success:
|
---|
217 | success = self.did_test_pass(stdout)
|
---|
218 | return success, stdout
|
---|
219 |
|
---|
220 | def did_test_pass(self, stdout):
|
---|
221 | if self.envs[self.testing_enviroment]['failed_mark'] in stdout:
|
---|
222 | # it can happen that a test fails because of some timing issues
|
---|
223 | # (timer error). In such case it may happen that the test does
|
---|
224 | # not appear in the "Failed" report at the end, even if it
|
---|
225 | # failed, because the execution is interrupted there (no more
|
---|
226 | # tests are even run afterwards)
|
---|
227 | #
|
---|
228 | # in such case, we consider tests failed
|
---|
229 | return False
|
---|
230 | # Otherwise, look for a "Failed: 0" status, which we consider as
|
---|
231 | # tests passing ok
|
---|
232 | for line in stdout.splitlines():
|
---|
233 | if self.envs[self.testing_enviroment]['success_mark'] in line:
|
---|
234 | return True
|
---|
235 | return False
|
---|
236 |
|
---|
237 | def write_browser_htmlfile(self):
|
---|
238 | markup = self.create_testRunnerHtml()
|
---|
239 | with open("browser.runner.html", 'w') as file:
|
---|
240 | file.write(markup)
|
---|
241 |
|
---|
242 | def create_testRunnerHtml(self):
|
---|
243 | with open(self.runner_html, 'r') as runner_html:
|
---|
244 | html = runner_html.read()
|
---|
245 | return html % {"libDir": os.path.normpath(self.libdir),
|
---|
246 | "testDir": os.path.normpath(self.testdir),
|
---|
247 | "browser_configfile": self.browser_configfile}
|
---|