forked from skupperproject/skupper-router
-
Notifications
You must be signed in to change notification settings - Fork 0
/
system_test.py
executable file
·1448 lines (1217 loc) · 51.9 KB
/
system_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""System test library, provides tools for tests that start multiple processes,
with special support for qdrouter processes.
Features:
- Create separate directories for each test.
- Save logs, sub-process output, core files etc.
- Automated clean-up after tests: kill sub-processes etc.
- Tools to manipulate qdrouter configuration files.
- Sundry other tools.
"""
from typing import Callable
import errno
import logging
import sys
import time
from typing import List, Optional, Tuple
import __main__
import os
import random
import re
import shutil
import socket
import subprocess
from copy import copy
from datetime import datetime
from subprocess import PIPE, STDOUT
import queue as Queue
from threading import Thread
from threading import Event
import json
import uuid
import unittest
import proton
import proton.utils
from proton import Message
from proton import Delivery
from proton.handlers import MessagingHandler
from proton.reactor import AtLeastOnce, Container
from proton.reactor import AtMostOnce
from qpid_dispatch.management.client import Node
from qpid_dispatch.management.error import NotFoundStatus
# Optional modules
MISSING_MODULES = []
try:
import qpidtoollibs
except ImportError as err:
qpidtoollibs = None # pylint: disable=invalid-name
MISSING_MODULES.append(str(err))
try:
import qpid_messaging as qm
except ImportError as err:
qm = None # pylint: disable=invalid-name
MISSING_MODULES.append(str(err))
def find_exe(program):
"""Find an executable in the system PATH"""
def is_exe(fpath):
"""True if fpath is executable"""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
mydir = os.path.split(program)[0]
if mydir:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# The directory where this module lives. Used to locate static configuration files etc.
DIR = os.path.dirname(__file__)
def _check_requirements():
"""If requirements are missing, return a message, else return empty string."""
missing = MISSING_MODULES
required_exes = ['qdrouterd']
missing += ["No exectuable %s" % e for e in required_exes if not find_exe(e)]
if missing:
return "%s: %s" % (__name__, ", ".join(missing))
MISSING_REQUIREMENTS = _check_requirements()
def retry_delay(deadline, delay, max_delay):
"""For internal use in retry. Sleep as required
and return the new delay or None if retry should time out"""
remaining = deadline - time.time()
if remaining <= 0:
return None
time.sleep(min(delay, remaining))
return min(delay * 2, max_delay)
# Valgrind significantly slows down the response time of the router, so use a
# long default timeout
TIMEOUT = float(os.environ.get("QPID_SYSTEM_TEST_TIMEOUT", 60))
def retry(function: Callable[[], bool], timeout: float = TIMEOUT, delay: float = .001, max_delay: float = 1):
"""Call function until it returns a true value or timeout expires.
Double the delay for each retry up to max_delay.
Returns what function returns or None if timeout expires.
"""
deadline = time.time() + timeout
while True:
ret = function()
if ret:
return ret
else:
delay = retry_delay(deadline, delay, max_delay)
if delay is None:
return None
def retry_exception(function, timeout=TIMEOUT, delay=.001, max_delay=1, exception_test=None):
"""Call function until it returns without exception or timeout expires.
Double the delay for each retry up to max_delay.
Calls exception_test with any exception raised by function, exception_test
may itself raise an exception to terminate the retry.
Returns what function returns if it succeeds before timeout.
Raises last exception raised by function on timeout.
"""
deadline = time.time() + timeout
while True:
try:
return function()
except Exception as e: # pylint: disable=broad-except
if exception_test:
exception_test(e)
delay = retry_delay(deadline, delay, max_delay)
if delay is None:
raise
def get_local_host_socket(socket_address_family='IPv4'):
if socket_address_family == 'IPv4':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '127.0.0.1'
elif socket_address_family == 'IPv6':
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
host = '::1'
return s, host
def check_port_refuses_connection(port, socket_address_family='IPv4'):
"""Return true if connecting to host:port gives 'connection refused'."""
s, host = get_local_host_socket(socket_address_family)
try:
s.connect((host, port))
except OSError as e:
return e.errno == errno.ECONNREFUSED
finally:
s.close()
return False
def check_port_permits_binding(port, socket_address_family='IPv4'):
"""Return true if binding to the port succeeds."""
s, _ = get_local_host_socket(socket_address_family)
host = ""
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # so that followup binders are not blocked
s.bind((host, port))
except OSError:
return False
finally:
s.close()
return True
def is_port_available(port, socket_address_family='IPv4'):
"""Return true if a new server will be able to bind to the port."""
return (check_port_refuses_connection(port, socket_address_family)
and check_port_permits_binding(port, socket_address_family))
def wait_port(port, socket_address_family='IPv4', **retry_kwargs):
"""Wait up to timeout for port (on host) to be connectable.
Takes same keyword arguments as retry to control the timeout"""
def check(e):
"""Only retry on connection refused"""
if not isinstance(e, socket.error) or not e.errno == errno.ECONNREFUSED:
raise
host = None
def connect():
# macOS gives EINVAL for all connection attempts after a ECONNREFUSED
# man 3 connect: "If connect() fails, the state of the socket is unspecified. [...]"
s, host = get_local_host_socket(socket_address_family)
try:
s.connect((host, port))
finally:
s.close()
try:
retry_exception(connect, exception_test=check, **retry_kwargs)
except Exception as e:
raise Exception("wait_port timeout on host %s port %s: %s" % (host, port, e))
def wait_ports(ports, **retry_kwargs):
"""Wait up to timeout for all ports (on host) to be connectable.
Takes same keyword arguments as retry to control the timeout"""
for port, socket_address_family in ports.items():
wait_port(port=port, socket_address_family=socket_address_family, **retry_kwargs)
def message(**properties):
"""Convenience to create a proton.Message with properties set"""
m = Message()
for name, value in properties.items():
getattr(m, name) # Raise exception if not a valid message attribute.
setattr(m, name, value)
return m
def skip_test_in_ci(environment_var):
env_var = os.environ.get(environment_var)
if env_var is not None:
if env_var.lower() in ['true', '1', 't', 'y', 'yes']:
return True
return False
class Process(subprocess.Popen):
"""
Popen that can be torn down at the end of a TestCase and stores its output.
"""
# Expected states of a Process at teardown
RUNNING = -1 # Still running
EXIT_OK = 0 # Exit status 0
EXIT_FAIL = 1 # Exit status 1
unique_id = 0
@classmethod
def unique(cls, name):
cls.unique_id += 1
return "%s-%s" % (name, cls.unique_id)
def __init__(self, args, name=None, expect=EXIT_OK, **kwargs):
"""
Takes same arguments as subprocess.Popen. Some additional/special args:
@param expect: Raise error if process status not as expected at end of test:
L{RUNNING} - expect still running.
L{EXIT_OK} - expect process to have terminated with 0 exit status.
L{EXIT_FAIL} - expect process to have terminated with exit status 1.
integer - expected return code
@keyword stdout: Defaults to the file name+".out"
@keyword stderr: Defaults to be the same as stdout
"""
self.name = name or os.path.basename(args[0])
self.args = args
self.expect = expect
self.outdir = os.getcwd()
self.outfile = os.path.abspath(self.unique(self.name))
self.torndown = False
with open(self.outfile + '.out', 'w') as out:
kwargs.setdefault('stdout', out)
kwargs.setdefault('stderr', subprocess.STDOUT)
try:
super(Process, self).__init__(args, **kwargs)
with open(self.outfile + '.cmd', 'w') as f:
f.write("%s\npid=%s\n" % (' '.join(args), self.pid))
except Exception as e:
raise Exception("subprocess.Popen(%s, %s) failed: %s: %s" %
(args, kwargs, type(e).__name__, e))
def assert_running(self):
"""Assert that the process is still running"""
assert self.poll() is None, "%s: exited" % ' '.join(self.args)
def teardown(self):
"""Check process status and stop the process if necessary"""
if self.torndown:
return
self.torndown = True
def error(msg):
with open(self.outfile + '.out') as f:
raise RuntimeError("Process %s error: %s\n%s\n%s\n>>>>\n%s<<<<" % (
self.pid, msg, ' '.join(self.args),
self.outfile + '.cmd', f.read()))
status = self.poll()
if status is None: # Still running
self.terminate()
if self.expect is not None and self.expect != Process.RUNNING:
error("still running")
self.expect = 0 # Expect clean exit after terminate
status = self.wait()
if self.expect is not None and self.expect != status:
error("exit code %s, expected %s" % (status, self.expect))
class Config:
"""Base class for configuration objects that provide a convenient
way to create content for configuration files."""
def write(self, name, suffix=".conf"):
"""Write the config object to file name.suffix. Returns name.suffix."""
name = name + suffix
with open(name, 'w') as f:
f.write(str(self))
return name
class HttpServer(Process):
def __init__(self, args, name=None, expect=Process.RUNNING):
super(HttpServer, self).__init__(args, name=name, expect=expect)
class Http2Server(HttpServer):
"""A HTTP2 Server that will respond to requests made via the router."""
def __init__(self, name=None, listen_port=None, wait=True,
perform_teardown=True, cl_args=None,
server_file=None,
expect=Process.RUNNING):
self.name = name
self.listen_port = listen_port
self.ports_family = {self.listen_port: 'IPv4'}
self.cl_args = cl_args
self.perform_teardown = perform_teardown
self.server_file = server_file
self._wait_ready = False
self.args = [sys.executable, os.path.join(os.path.dirname(os.path.abspath(__file__)), self.server_file)]
if self.cl_args:
self.args += self.cl_args
super(Http2Server, self).__init__(self.args, name=name, expect=expect)
if wait:
self.wait_ready()
def wait_ready(self, **retry_kwargs):
"""
Wait for ports to be ready
"""
if not self._wait_ready:
self._wait_ready = True
self.wait_ports(**retry_kwargs)
def wait_ports(self, **retry_kwargs):
wait_ports(self.ports_family, **retry_kwargs)
class Qdrouterd(Process):
"""Run a Qpid Dispatch Router Daemon"""
class Config(list, Config): # type: ignore[misc] # Cannot resolve name "Config" (possible cyclic definition) # mypy#10958
"""
A router configuration.
The Config class is a list of tuples in the following format:
[ ('section-name', {attribute-map}), ...]
where attribute-map is a dictionary of key+value pairs. Key is an
attribute name (string), value can be any of [scalar | string | dict]
When written to a configuration file to be loaded by the router:
o) there is no ":' between the section-name and the opening brace
o) attribute keys are separated by a ":" from their values
o) attribute values that are scalar or string follow the ":" on the
same line.
o) attribute values do not have trailing commas
o) The section-name and attribute keywords are written
without enclosing quotes
o) string type attribute values are not enclosed in quotes
o) attribute values of type dict are written in their JSON representation.
Fills in some default values automatically, see Qdrouterd.DEFAULTS
"""
DEFAULTS = {
'listener': {'host': '0.0.0.0', 'saslMechanisms': 'ANONYMOUS', 'idleTimeoutSeconds': '120',
'authenticatePeer': 'no', 'role': 'normal'},
'connector': {'host': '127.0.0.1', 'saslMechanisms': 'ANONYMOUS', 'idleTimeoutSeconds': '120'},
'router': {'mode': 'standalone', 'id': 'QDR'}
}
def sections(self, name):
"""Return list of sections named name"""
return [p for n, p in self if n == name]
@property
def router_id(self): return self.sections("router")[0]["id"]
def defaults(self):
"""Fill in default values in gconfiguration"""
for name, props in self:
if name in Qdrouterd.Config.DEFAULTS:
for n, p in Qdrouterd.Config.DEFAULTS[name].items():
props.setdefault(n, p)
def __str__(self):
"""Generate config file content. Calls default() first."""
def tabs(level):
if level:
return " " * level
return ""
def value(item, level):
if isinstance(item, dict):
result = "{\n"
result += "".join(["%s%s: %s,\n" % (tabs(level + 1),
json.dumps(k),
json.dumps(v))
for k, v in item.items()])
result += "%s}" % tabs(level)
return result
return "%s" % item
def attributes(e, level):
assert(isinstance(e, dict))
# k = attribute name
# v = string | scalar | dict
return "".join(["%s%s: %s\n" % (tabs(level),
k,
value(v, level + 1))
for k, v in e.items()])
self.defaults()
# top level list of tuples ('section-name', dict)
return "".join(["%s {\n%s}\n" % (n, attributes(p, 1)) for n, p in self])
def __init__(self, name=None, config=Config(), pyinclude=None, wait=True,
perform_teardown=True, cl_args=None, expect=Process.RUNNING):
"""
@param name: name used for for output files, default to id from config.
@param config: router configuration
@keyword wait: wait for router to be ready (call self.wait_ready())
"""
cl_args = cl_args or []
self.config = copy(config)
self.perform_teardown = perform_teardown
if not name:
name = self.config.router_id
assert name
# setup log and debug dump files
self.dumpfile = os.path.abspath('%s-qddebug.txt' % name)
self.config.sections('router')[0]['debugDumpFile'] = self.dumpfile
default_log = [l for l in config if (l[0] == 'log' and l[1]['module'] == 'DEFAULT')]
if not default_log:
self.logfile = "%s.log" % name
config.append(
('log', {'module': 'DEFAULT', 'enable': 'trace+',
'includeSource': 'true', 'outputFile': self.logfile}))
else:
self.logfile = default_log[0][1].get('outputfile')
args = ['qdrouterd', '-c', config.write(name)] + cl_args
env_home = os.environ.get('QPID_DISPATCH_HOME')
if pyinclude:
args += ['-I', pyinclude]
elif env_home:
args += ['-I', os.path.join(env_home, 'python')]
args = os.environ.get('QPID_DISPATCH_RUNNER', '').split() + args
super(Qdrouterd, self).__init__(args, name=name, expect=expect)
self._management = None
self._wait_ready = False
if wait:
self.wait_ready()
@property
def management(self):
"""Return a management agent proxy for this router"""
if not self._management:
self._management = Node.connect(self.addresses[0], timeout=TIMEOUT)
return self._management
def teardown(self):
if self._management:
try:
self._management.close()
except:
pass
self._management = None
if not self.perform_teardown:
return
teardown_exc = None
try:
super(Qdrouterd, self).teardown()
except Exception as exc:
# re-raise _after_ dumping all the state we can
teardown_exc = exc
def check_output_file(filename, description):
"""check router's debug dump file for anything interesting (should be
empty) and dump it to stderr for perusal by organic lifeforms"""
try:
if os.stat(filename).st_size > 0:
with open(filename) as f:
sys.stderr.write("\nRouter %s %s:\n>>>>\n" %
(self.config.router_id, description))
sys.stderr.write(f.read())
sys.stderr.write("\n<<<<\n")
sys.stderr.flush()
except OSError:
# failed to open file. This can happen when an individual test
# spawns a temporary router (i.e. not created as part of the
# TestCase setUpClass method) that gets cleaned up by the test.
pass
check_output_file(filename=self.outfile + '.out', description="output file")
check_output_file(filename=self.dumpfile, description="debug dump file")
if teardown_exc:
# teardown failed - possible router crash?
# dump extra stuff (command line, output, log)
def tail_file(fname, line_count=50):
"""Tail a file to a list"""
out = []
with open(fname) as f:
line = f.readline()
while line:
out.append(line)
if len(out) > line_count:
out.pop(0)
line = f.readline()
return out
try:
for fname in [("output", self.outfile + '.out'),
("command", self.outfile + '.cmd')]:
with open(fname[1]) as f:
sys.stderr.write("\nRouter %s %s file:\n>>>>\n" %
(self.config.router_id, fname[0]))
sys.stderr.write(f.read())
sys.stderr.write("\n<<<<\n")
if self.logfile:
sys.stderr.write("\nRouter %s log file tail:\n>>>>\n" %
self.config.router_id)
tail = tail_file(os.path.join(self.outdir, self.logfile))
for ln in tail:
sys.stderr.write("%s" % ln)
sys.stderr.write("\n<<<<\n")
sys.stderr.flush()
except OSError:
# ignore file not found in case test never opens these
pass
raise teardown_exc
@property
def ports_family(self):
"""
Return a dict of listener ports and the respective port family
Example -
{ 23456: 'IPv4', 243455: 'IPv6' }
"""
ports_fam = {}
for l in self.config.sections('listener'):
if l.get('socketAddressFamily'):
ports_fam[l['port']] = l['socketAddressFamily']
else:
ports_fam[l['port']] = 'IPv4'
return ports_fam
@property
def ports(self):
"""Return list of configured ports for all listeners"""
return [l['port'] for l in self.config.sections('listener')]
def _cfg_2_host_port(self, c):
host = c['host']
port = c['port']
socket_address_family = c.get('socketAddressFamily', 'IPv4')
if socket_address_family == 'IPv6':
return "[%s]:%s" % (host, port)
elif socket_address_family == 'IPv4':
return "%s:%s" % (host, port)
raise Exception("Unknown socket address family: %s" % socket_address_family)
@property
def http_addresses(self):
"""Return http://host:port addresses for all http listeners"""
cfg = self.config.sections('httpListener')
return ["http://%s" % self._cfg_2_host_port(l) for l in cfg]
@property
def addresses(self):
"""Return amqp://host:port addresses for all listeners"""
cfg = self.config.sections('listener')
return ["amqp://%s" % self._cfg_2_host_port(l) for l in cfg]
@property
def connector_addresses(self):
"""Return list of amqp://host:port for all connectors"""
cfg = self.config.sections('connector')
return ["amqp://%s" % self._cfg_2_host_port(c) for c in cfg]
@property
def hostports(self):
"""Return host:port for all listeners"""
return [self._cfg_2_host_port(l) for l in self.config.sections('listener')]
def is_connected(self, port, host='127.0.0.1'):
"""If router has a connection to host:port:identity return the management info.
Otherwise return None"""
try:
ret_val = False
response = self.management.query(type="org.apache.qpid.dispatch.connection")
index_host = response.attribute_names.index('host')
for result in response.results:
outs = '%s:%s' % (host, port)
if result[index_host] == outs:
ret_val = True
return ret_val
except:
return False
def wait_address(self, address, subscribers=0, remotes=0, count=1, **retry_kwargs):
"""
Wait for an address to be visible on the router.
@keyword subscribers: Wait till subscriberCount >= subscribers
@keyword remotes: Wait till remoteCount >= remotes
@keyword count: Wait until >= count matching addresses are found
@param retry_kwargs: keyword args for L{retry}
"""
def check():
# TODO aconway 2014-06-12: this should be a request by name, not a query.
# Need to rationalize addresses in management attributes.
# endswith check is because of M/L/R prefixes
addrs = self.management.query(
type='org.apache.qpid.dispatch.router.address',
attribute_names=['name', 'subscriberCount', 'remoteCount']).get_entities()
addrs = [a for a in addrs if a['name'].endswith(address)]
return (len(addrs) >= count
and addrs[0]['subscriberCount'] >= subscribers
and addrs[0]['remoteCount'] >= remotes)
assert retry(check, **retry_kwargs)
def wait_address_unsubscribed(self, address, **retry_kwargs):
"""
Block until address has no subscribers
"""
a_type = 'org.apache.qpid.dispatch.router.address'
def check():
addrs = self.management.query(a_type).get_dicts()
rc = [a for a in addrs if a['name'].endswith(address)]
count = 0
for a in rc:
count += a['subscriberCount']
count += a['remoteCount']
return count == 0
assert retry(check, **retry_kwargs)
def get_host(self, socket_address_family):
if socket_address_family == 'IPv4':
return '127.0.0.1'
elif socket_address_family == 'IPv6':
return '::1'
else:
return '127.0.0.1'
def wait_ports(self, **retry_kwargs):
wait_ports(self.ports_family, **retry_kwargs)
def wait_connectors(self, **retry_kwargs):
"""
Wait for all connectors to be connected
@param retry_kwargs: keyword args for L{retry}
"""
for c in self.config.sections('connector'):
assert retry(lambda: self.is_connected(port=c['port'], host=self.get_host(c.get('socketAddressFamily'))),
**retry_kwargs), "Port not connected %s" % c['port']
def wait_ready(self, **retry_kwargs):
"""Wait for ports and connectors to be ready"""
if not self._wait_ready:
self._wait_ready = True
self.wait_ports(**retry_kwargs)
self.wait_connectors(**retry_kwargs)
return self
def is_router_connected(self, router_id, **retry_kwargs):
node = None
try:
self.management.read(identity="router.node/%s" % router_id)
# TODO aconway 2015-01-29: The above check should be enough, we
# should not advertise a remote router in management till it is fully
# connected. However we still get a race where the router is not
# actually ready for traffic. Investigate.
# Meantime the following actually tests send-thru to the router.
node = Node.connect(self.addresses[0], router_id, timeout=1)
return retry_exception(lambda: node.query('org.apache.qpid.dispatch.router'))
except (proton.ConnectionException, NotFoundStatus, proton.utils.LinkDetached):
# proton.ConnectionException: the router is not yet accepting connections
# NotFoundStatus: the queried router is not yet connected
# TODO(DISPATCH-2119) proton.utils.LinkDetached: should be removed, currently needed for DISPATCH-2033
return False
finally:
if node:
node.close()
def wait_router_connected(self, router_id, **retry_kwargs):
retry(lambda: self.is_router_connected(router_id), **retry_kwargs)
@property
def logfile_path(self):
return os.path.join(self.outdir, self.logfile)
class Tester:
"""Tools for use by TestCase
- Create a directory for the test.
- Utilities to create processes and servers, manage ports etc.
- Clean up processes on teardown"""
# Top level directory above any Tester directories.
# CMake-generated configuration may be found here.
top_dir = os.getcwd()
# The root directory for Tester directories, under top_dir
root_dir = os.path.abspath(__name__ + '.dir')
def __init__(self, id):
"""
@param id: module.class.method or False if no directory should be created
"""
self.directory = os.path.join(self.root_dir, *id.split('.')) if id else None
self.cleanup_list = []
def rmtree(self):
"""Remove old test class results directory"""
if self.directory:
shutil.rmtree(os.path.dirname(self.directory), ignore_errors=True)
def setup(self):
"""Called from test setup and class setup."""
if self.directory:
os.makedirs(self.directory)
os.chdir(self.directory)
def teardown(self):
"""Clean up (tear-down, stop or close) objects recorded via cleanup()"""
self.cleanup_list.reverse()
errors = []
for obj in self.cleanup_list:
try:
for method in ["teardown", "tearDown", "stop", "close"]:
cleanup = getattr(obj, method, None)
if cleanup:
cleanup()
break
except Exception as exc:
errors.append(exc)
if errors:
raise RuntimeError("Errors during teardown: \n\n%s" % "\n\n".join([str(e) for e in errors]))
def cleanup(self, x):
"""Record object x for clean-up during tear-down.
x should have on of the methods teardown, tearDown, stop or close"""
self.cleanup_list.append(x)
return x
def popen(self, *args, **kwargs):
"""Start a Process that will be cleaned up on teardown"""
return self.cleanup(Process(*args, **kwargs))
def qdrouterd(self, *args, **kwargs):
"""Return a Qdrouterd that will be cleaned up on teardown"""
return self.cleanup(Qdrouterd(*args, **kwargs))
def http2server(self, *args, **kwargs):
return self.cleanup(Http2Server(*args, **kwargs))
port_range = (20000, 30000)
next_port = random.randint(port_range[0], port_range[1])
@classmethod
def get_port(cls, socket_address_family='IPv4'):
"""Get an unused port"""
def advance():
"""Advance with wrap-around"""
cls.next_port += 1
if cls.next_port >= cls.port_range[1]:
cls.next_port = cls.port_range[0]
start = cls.next_port
while not is_port_available(cls.next_port, socket_address_family):
advance()
if cls.next_port == start:
raise Exception("No available ports in range %s", cls.port_range)
p = cls.next_port
advance()
return p
class TestCase(unittest.TestCase, Tester): # pylint: disable=too-many-public-methods
"""A TestCase that sets up its own working directory and is also a Tester."""
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
Tester.__init__(self, self.id())
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.maxDiff = None
cls.tester = Tester('.'.join([cls.__module__, cls.__name__, 'setUpClass']))
cls.tester.rmtree()
cls.tester.setup()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'tester'):
cls.tester.teardown()
del cls.tester
super().tearDownClass()
def setUp(self):
super().setUp()
Tester.setup(self)
def tearDown(self):
Tester.teardown(self)
super().tearDown()
def assert_fair(self, seq):
avg = sum(seq) / len(seq)
for i in seq:
assert i > avg / 2, "Work not fairly distributed: %s" % seq
if not hasattr(unittest.TestCase, 'assertRegex'):
def assertRegex(self, text, regexp, msg=None):
assert re.search(regexp, text), msg or "Can't find %r in '%s'" % (regexp, text)
if not hasattr(unittest.TestCase, 'assertNotRegex'):
def assertNotRegex(self, text, regexp, msg=None):
assert not re.search(regexp, text), msg or "Found %r in '%s'" % (regexp, text)
def main_module():
"""
Return the module name of the __main__ module - i.e. the filename with the
path and .py extension stripped. Useful to run the tests in the current file but
using the proper module prefix instead of '__main__', as follows:
if __name__ == '__main__':
unittest.main(module=main_module())
"""
return os.path.splitext(os.path.basename(__main__.__file__))[0]
class AsyncTestReceiver(MessagingHandler):
"""
A simple receiver that runs in the background and queues any received
messages. Messages can be retrieved from this thread via the queue member.
:param wait: block the constructor until the link has been fully
established.
:param recover_link: restart on remote link detach
"""
Empty = Queue.Empty
class MyQueue(Queue.Queue):
def __init__(self, receiver):
self._async_receiver = receiver
super(AsyncTestReceiver.MyQueue, self).__init__()
def get(self, timeout=TIMEOUT):
self._async_receiver.num_queue_gets += 1
msg = super(AsyncTestReceiver.MyQueue, self).get(timeout=timeout)
self._async_receiver._logger.log("message %d get"
% self._async_receiver.num_queue_gets)
return msg
def put(self, msg):
self._async_receiver.num_queue_puts += 1
super(AsyncTestReceiver.MyQueue, self).put(msg)
self._async_receiver._logger.log("message %d put"
% self._async_receiver.num_queue_puts)
def __init__(self, address, source, conn_args=None, container_id=None,
wait=True, recover_link=False, msg_args=None, print_to_console=False):
if msg_args is None:
msg_args = {}
super(AsyncTestReceiver, self).__init__(**msg_args)
self.address = address
self.source = source
self.conn_args = conn_args
self.queue = AsyncTestReceiver.MyQueue(self)
self._conn = None
self._container = Container(self)
cid = container_id or "ATR-%s:%s" % (source, uuid.uuid4())
self._container.container_id = cid
self._ready = Event()
self._recover_link = recover_link
self._recover_count = 0
self._stop_thread = False
self._thread = Thread(target=self._main)
self._logger = Logger(title="AsyncTestReceiver %s" % cid, print_to_console=print_to_console)
self._thread.daemon = True
self._thread.start()
self.num_queue_puts = 0
self.num_queue_gets = 0
if wait and self._ready.wait(timeout=TIMEOUT) is False:
raise Exception("Timed out waiting for receiver start")
self.queue_stats = "self.num_queue_puts=%d, self.num_queue_gets=%d"
def get_queue_stats(self):
return self.queue_stats % (self.num_queue_puts, self.num_queue_gets)
def _main(self):
self._container.timeout = 0.5
self._container.start()
self._logger.log("AsyncTestReceiver Starting reactor")
while self._container.process():
if self._stop_thread:
if self._conn:
self._conn.close()
self._conn = None
self._logger.log("AsyncTestReceiver reactor thread done")
def on_connection_error(self, event):
self._logger.log("AsyncTestReceiver on_connection_error=%s" % event.connection.remote_condition.description)
def on_link_error(self, event):
self._logger.log("AsyncTestReceiver on_link_error=%s" % event.link.remote_condition.description)
def stop(self, timeout=TIMEOUT):
self._stop_thread = True
self._container.wakeup()
self._thread.join(timeout=TIMEOUT)
self._logger.log("thread done")
if self._thread.is_alive():
raise Exception("AsyncTestReceiver did not exit")
del self._conn
del self._container
def on_start(self, event):
kwargs = {'url': self.address}
if self.conn_args:
kwargs.update(self.conn_args)
self._conn = event.container.connect(**kwargs)
def on_connection_opened(self, event):
self._logger.log("Connection opened")
kwargs = {'source': self.source}
event.container.create_receiver(event.connection, **kwargs)
def on_link_opened(self, event):
self._logger.log("link opened")
self._ready.set()
def on_link_closing(self, event):
self._logger.log("link closing")
event.link.close()
if self._recover_link and not self._stop_thread:
# lesson learned: the generated link name will be the same as the
# old link (which is bad) so we specify a new one
self._recover_count += 1
kwargs = {'source': self.source,
'name': "%s:%s" % (event.link.name, self._recover_count)}