Coverage for CIResults/run_import.py: 89%

567 statements  

« prev     ^ index     » next       coverage.py v7.6.9, created at 2024-12-19 09:20 +0000

1from django.utils.functional import cached_property 

2from django.core.exceptions import ValidationError 

3from django.core.validators import URLValidator 

4from django.db import transaction, connection 

5from django.db.models import Q 

6from django.utils import timezone 

7 

8from CIResults.models import TextStatus, TestResult, TestsuiteRun, RunConfig, RunConfigTag 

9from CIResults.models import Machine, Test, TestSuite, Build 

10from CIResults.models import UnknownFailure, KnownFailure, Issue 

11from CIResults.models import IssueFilterAssociated, RunFilterStatistic 

12 

13from collections import defaultdict 

14import configparser 

15import traceback 

16import datetime 

17import copy 

18import pytz 

19import time 

20import sys 

21import os 

22 

23cur_dir = os.path.dirname(os.path.realpath(__file__)) 

24sys.path.append(os.path.join(cur_dir, 'piglit')) 

25from framework import backends # noqa 

26 

27 

28def str_to_list(string, separator=' '): 

29 if string is None: 

30 return [] 

31 else: 

32 return [v.strip() for v in string.split(separator) if len(v) > 0] 

33 

34 

35def validated_url(url): 

36 if url is None: 

37 return None 

38 

39 try: 

40 URLValidator()(url) 

41 return url 

42 except ValidationError: 

43 return None 

44 

45 

46class TestsuiteTestResult: 

47 def __init__(self, name, status, start_time, duration, command=None, 

48 stdout=None, stderr=None, dmesg=None, url=None): 

49 self._name = name 

50 self._status = status 

51 self._start_time = start_time 

52 self._duration = duration 

53 self._command = command 

54 self._stdout = stdout 

55 self._stderr = stderr 

56 self._dmesg = dmesg 

57 self._url = url 

58 

59 @property 

60 def name(self): 

61 return self._name 

62 

63 @property 

64 def status(self): 

65 return self._status 

66 

67 @property 

68 def start_time(self): 

69 return self._start_time 

70 

71 @property 

72 def duration(self): 

73 return self._duration 

74 

75 @property 

76 def command(self): 

77 return self._command 

78 

79 @property 

80 def stdout(self): 

81 return self._stdout 

82 

83 @property 

84 def stderr(self): 

85 return self._stderr 

86 

87 @property 

88 def dmesg(self): 

89 return self._dmesg 

90 

91 @property 

92 def url(self): 

93 return self._url 

94 

95 

96class TestsuiteRunResults: 

97 def __init__(self, testsuite, machine_name, run_id, test_results, start_time, duration): 

98 self._testsuite = testsuite 

99 self._machine_name = machine_name 

100 self._run_id = run_id 

101 self._test_results = test_results 

102 self._start_time = start_time 

103 self._duration = duration 

104 

105 @classmethod 

106 def __result_url__(self, testsuite, run_id, machine_name, test_name): 

107 # Generate the testresult's external URL 

108 url_pattern = testsuite.result_url_pattern 

109 url = url_pattern.format(runconfig=testsuite.runconfig.name, 

110 testsuite_build=testsuite.build, 

111 run_id=run_id, test=test_name, 

112 machine=machine_name) 

113 return validated_url(url) 

114 

115 @property 

116 def testsuite(self): 

117 return self._testsuite 

118 

119 @property 

120 def machine_name(self): 

121 return self._machine_name 

122 

123 @property 

124 def run_id(self): 

125 return self._run_id 

126 

127 @property 

128 def test_results(self): 

129 return self._test_results 

130 

131 @property 

132 def start_time(self): 

133 return self._start_time 

134 

135 @property 

136 def duration(self): 

137 return self._duration 

138 

139 @cached_property 

140 def tests_set(self): 

141 s = set() 

142 for test_result in self._test_results: 

143 s.add(test_result.name) 

144 return s 

145 

146 @cached_property 

147 def statuses_set(self): 

148 s = set() 

149 for test_result in self._test_results: 

150 s.add(test_result.status) 

151 return s 

152 

153 

154class PiglitResult(TestsuiteRunResults): 

155 def __init__(self, testsuite, machine_name, run_id, dir_name): 

156 testresults = [] 

157 

158 try: 

159 results = backends.load(dir_name) 

160 test_duration_sum = datetime.timedelta() 

161 for test_name in results.tests: 

162 test = results.tests[test_name] 

163 

164 url = self.__result_url__(testsuite, run_id, machine_name, test_name) 

165 start_time = datetime.datetime.fromtimestamp(test.time.start, tz=pytz.utc) 

166 duration = datetime.timedelta(seconds=test.time.total) 

167 test_duration_sum += duration 

168 testresults.append(TestsuiteTestResult(name=test_name, status=test.result, 

169 start_time=start_time, 

170 duration=duration, 

171 command=test.command, 

172 stdout=str(test.out), 

173 stderr=str(test.err), 

174 dmesg=test.dmesg, 

175 url=url)) 

176 

177 start = datetime.datetime.fromtimestamp(results.time_elapsed.start, tz=pytz.utc) 

178 duration = datetime.timedelta(seconds=results.time_elapsed.total) 

179 

180 # Make sure the total duration is at least as long as the sum of all the test executions 

181 if duration < test_duration_sum: 

182 duration = test_duration_sum 

183 except IndexError: 

184 start = datetime.datetime.fromtimestamp(0, tz=pytz.utc) 

185 duration = datetime.timedelta(seconds=0) 

186 

187 super().__init__(testsuite, machine_name, run_id, testresults, 

188 start, duration) 

189 

190 

191class JsonResult(TestsuiteRunResults): 

192 def __init__(self, testsuite, machine_name: str, run_id: int, test_results: list[TestsuiteTestResult]): 

193 test_run_duration_sum = datetime.timedelta() 

194 for test_result in test_results: 

195 test_run_duration_sum += test_result.duration 

196 

197 start = datetime.datetime.fromtimestamp(0, tz=pytz.utc) 

198 super().__init__(testsuite, machine_name, run_id, test_results, start, test_run_duration_sum) 

199 

200 

201class TestsuiteResults: 

202 def __init__(self, runconfig, name, build, format, version, result_url_pattern): 

203 self._runconfig = runconfig 

204 self._name = name 

205 self._build = build 

206 self._format = format 

207 self._version = version 

208 self._result_url_pattern = result_url_pattern 

209 

210 # Check if the database contains the build, and then fetch the Testsuite 

211 # associated with it 

212 build = Build.objects.get(name=build) 

213 self._db_object = TestSuite.objects.get(name=build.component) 

214 

215 # Now check the result format 

216 if format == "piglit": 

217 if version != 1: 

218 msg = "The version {} of the testsuite result format '{}' is unsupported" 

219 raise ValueError(msg.format(version, format)) 

220 self._result_type = PiglitResult 

221 elif format == "json": 

222 self._result_type = JsonResult 

223 else: 

224 raise ValueError("The testsuite result format '{}' is unsupported".format(format)) 

225 

226 @property 

227 def runconfig(self): 

228 return self._runconfig 

229 

230 @property 

231 def name(self): 

232 return self._name 

233 

234 @property 

235 def build(self): 

236 return self._build 

237 

238 @property 

239 def format(self): 

240 return self._format 

241 

242 @property 

243 def version(self): 

244 return self._version 

245 

246 @property 

247 def result_url_pattern(self): 

248 return self._result_url_pattern 

249 

250 @property 

251 def db_object(self): 

252 return self._db_object 

253 

254 def read_results(self, machine, run_id, path): 

255 return self._result_type(self, machine, run_id, path) 

256 

257 

258class TestSuiteRunDef: 

259 def __to_int__(self, field_name, value): 

260 try: 

261 return int(value) 

262 except Exception: 

263 raise ValueError("The parameter {} '{}' should be an integer".format(field_name, value)) 

264 

265 def __init__(self, testsuite_build, results_format, results_format_version, 

266 machine, ts_run_id, ts_run_path): 

267 self.testsuite_build = testsuite_build 

268 self.results_format = results_format 

269 self.results_format_version = self.__to_int__("results_format_version", results_format_version) 

270 self.machine = machine 

271 self.ts_run_id = self.__to_int__("ts_run_id", ts_run_id) 

272 self.ts_run_path = ts_run_path 

273 

274 for field in ["testsuite_build", "results_format", "machine", "ts_run_id", "ts_run_path"]: 

275 if getattr(self, field) is None: 

276 raise ValueError("The parameter {} cannot be None".format(field)) 

277 

278 

279# WARNING: This function is tailored for the purpose of adding results! 

280# DO NOT USE AS A LIGHTWEIGHT REPLACEMENT FOR Issue.update_statistics() 

281def issue_simple_stats_recomputing(issue, runconfig, stats_changes): 

282 # Iterate through all the filters associated to this issue and check if 

283 # a filter's statistic has changed from not covered/affected to 

284 # covered/affected. If so, and if the issue was not already 

285 # covered/affected, update the statistics by directly doing +1 in the 

286 # relevant stats field. Since we are only adding results, we cannot be in a 

287 # situation where we need to -1 an issue. 

288 was_covered = False 

289 has_new_filter_covering = False 

290 was_affected = False 

291 has_new_filter_matched = False 

292 for issuefilter in issue.filters.all(): 

293 prev, new = stats_changes.get(issuefilter, (None, None)) 

294 

295 # If we have no previous stats for the filter, just fake empty ones 

296 if prev is None: 

297 prev = RunFilterStatistic(filter=issuefilter, runconfig=runconfig, 

298 matched_count=0, covered_count=0) 

299 

300 # Check if the issue was covered/affected for this runconfig before 

301 if prev.covered_count > 0: 

302 was_covered = True 

303 if prev.matched_count > 0: 

304 was_affected = True 

305 # OPTIMIZATION: The issue was already affected, so no changes in 

306 # statistics could come by adding more results 

307 return 

308 

309 if new is not None: 

310 if prev.covered_count == 0 and new.covered_count > 0: 

311 has_new_filter_covering = True 

312 if prev.matched_count == 0 and new.matched_count > 0: 

313 has_new_filter_matched = True 

314 

315 # Update the covered/affected count if necessary 

316 changed = False 

317 if not was_covered and has_new_filter_covering: 

318 issue.runconfigs_covered_count += 1 

319 changed = True 

320 

321 if not was_affected and has_new_filter_matched: 

322 issue.runconfigs_affected_count += 1 

323 issue.last_seen = runconfig.added_on 

324 issue.last_seen_runconfig = runconfig 

325 changed = True 

326 

327 if changed: 

328 issue.save() 

329 

330 

331class RunConfigResults: 

332 def __error__(self, msg): 

333 raise ValueError(self._error_prefix + msg) 

334 

335 def __init__(self, runconfig_dir=None, name=None, url=None, 

336 result_url_pattern=None, environment=None, builds=[], tags=[], 

337 results=[], temporary=False, parsed_results=[]): 

338 self._run_results = [] 

339 

340 if runconfig_dir is not None: 

341 self.__parse_run_info__(runconfig_dir) 

342 if self._testsuites.get("CIRESULTS_TESTSUITE") is not None: 

343 # DEPRECATED: Do not use this mode as it will be removed 

344 self.__load_results_single_testsuite__(runconfig_dir) 

345 else: 

346 self.__load_results_multiple_testsuites__(runconfig_dir) 

347 else: 

348 self._error_prefix = "" 

349 self._name = name 

350 self._url = url 

351 self._result_url_pattern = result_url_pattern 

352 self._environment = environment 

353 self._builds = builds 

354 self._tags = tags 

355 self._temporary = temporary 

356 

357 if self._name is None: 

358 self.__error__("runconfig name unspecified") 

359 

360 if results: 

361 self.__import_results_from_args__(results) 

362 elif parsed_results: 

363 self.__import_parsed_results__(parsed_results) 

364 

365 def __import_results_from_args__(self, results): 

366 self._testsuites = dict() 

367 build_to_testsuite = dict() 

368 ts_runs = set() 

369 

370 for r in results: 

371 # Check that the testsuite build is in the list of builds of the runconfig 

372 if r.testsuite_build not in self.builds: 

373 raise ValueError("The build named '{}' is not part of the list of " 

374 "builds of the runconfig".format(r.testsuite_build)) 

375 

376 # Get the testsuite associated to the build name, or create it 

377 tsp = build_to_testsuite.get((r.testsuite_build, r.results_format, 

378 r.results_format_version)) 

379 if tsp is None: 

380 try: 

381 testsuite_name = Build.objects.get(name=r.testsuite_build).component.name 

382 

383 # Create the testsuite results object 

384 tsp = TestsuiteResults(self, testsuite_name, r.testsuite_build, r.results_format, 

385 r.results_format_version, self._result_url_pattern) 

386 self._testsuites[(r.testsuite_build, r.results_format, r.results_format_version)] = tsp 

387 

388 build_to_testsuite[r.testsuite_build] = tsp 

389 except Build.DoesNotExist: 

390 raise ValueError("The build named '{}' does not exist".format(r.testsuite_build)) 

391 

392 # Check that the testsuite run has not been added already 

393 ts_run_key = (testsuite_name, r.ts_run_id, self._name, r.machine) 

394 if ts_run_key in ts_runs: 

395 msg = "Try to import twice {}'s run ID {} on the runconfig '{}' for the machine '{}'" 

396 raise ValueError(msg.format(testsuite_name, r.ts_run_id, self._name, r.machine)) 

397 else: 

398 ts_runs.add(ts_run_key) 

399 

400 # Import the results 

401 try: 

402 self._run_results.append(tsp.read_results(r.machine, r.ts_run_id, r.ts_run_path)) 

403 except FileNotFoundError: 

404 pass 

405 except Exception: 

406 traceback.print_exc() 

407 

408 def __import_parsed_results__(self, results: list[TestsuiteRunResults]): 

409 self._run_results = results 

410 

411 def __parse_run_info__(self, runconfig_dir): 

412 conf_path = os.path.join(runconfig_dir, "runconfig.ini") 

413 self._error_prefix = "The RunConfig file {} is invalid: ".format(conf_path) 

414 

415 config = configparser.ConfigParser() 

416 config.read(conf_path) 

417 

418 if not config.has_section("CIRESULTS_RUNCONFIG"): 

419 self.__error__("missing the section CIRESULTS_RUNCONFIG") 

420 

421 # Parse the main section 

422 section = config["CIRESULTS_RUNCONFIG"] 

423 self._name = section.get('name') 

424 self._url = validated_url(section.get('url')) 

425 self._result_url_pattern = section.get('result_url_pattern', '') 

426 self._environment = section.get('environment') 

427 self._builds = str_to_list(section.get("builds")) 

428 self._tags = str_to_list(section.get("tags")) 

429 self._temporary = section.getboolean('temporary', False) 

430 

431 # Check that if CIRESULTS_TESTSUITE is set, then no other sections are there 

432 if "CIRESULTS_TESTSUITE" in config.sections() and len(config.sections()) > 2: 

433 msg = "If the section CIRESULTS_TESTSUITE exists, then no additional section/testsuite can be added" 

434 self.__error__(msg) 

435 

436 # Parse the testsuite sections 

437 self._testsuites = dict() 

438 for section in config.sections(): 

439 # Ignore the main section 

440 if section == "CIRESULTS_RUNCONFIG": 

441 continue 

442 

443 build = config[section]['build'] 

444 if build not in self._builds: 

445 msg = "The build '{}' of the testsuite '{}' is not found in the list of builds of the runconfig {}" 

446 self.__error__(msg.format(build, section, self._name)) 

447 format = config[section]['format'] 

448 version = config[section].getint('version', 1) 

449 result_url_pattern = config[section].get('result_url_pattern', '') 

450 self._testsuites[section] = TestsuiteResults(self, section, build, 

451 format, version, 

452 result_url_pattern) 

453 

454 if self._name is None: 

455 self.__error__("runconfig name unspecified") 

456 

457 def __load_testsuite_results__(self, tsp, testsuite_path): 

458 # Since we have a valid test suite, we can now continue and look for 

459 # all the machines that run any test 

460 for machine in [f for f in os.listdir(testsuite_path)]: 

461 machine_path = os.path.join(testsuite_path, machine) 

462 if os.path.isfile(machine_path): 

463 continue 

464 

465 for ts_run_id in os.listdir(machine_path): 

466 ts_run_path = os.path.join(machine_path, ts_run_id) 

467 if os.path.isfile(ts_run_path): 

468 continue 

469 

470 # Convert the run_id to integer, or ignore the result 

471 try: 

472 ts_run_id = int(ts_run_id) 

473 except Exception: 

474 print("RunConfigResults: testsuite run ID '{}' should be an integer".format(ts_run_id)) 

475 continue 

476 

477 try: 

478 self._run_results.append(tsp.read_results(machine, ts_run_id, ts_run_path)) 

479 except FileNotFoundError: 

480 pass 

481 except Exception: 

482 traceback.print_exc() 

483 

484 def __load_results_single_testsuite__(self, runconfig_dir): 

485 tsp = self._testsuites.get("CIRESULTS_TESTSUITE") 

486 self.__load_testsuite_results__(tsp, runconfig_dir) 

487 

488 def __load_results_multiple_testsuites__(self, runconfig_dir): 

489 for testsuite_name in [f for f in os.listdir(runconfig_dir)]: 

490 testsuite_path = os.path.join(runconfig_dir, testsuite_name) 

491 if os.path.isfile(testsuite_path): 

492 continue 

493 

494 tsp = self._testsuites.get(testsuite_name) 

495 if tsp is None: 

496 print("Ignore the testsuite '{}' because it is not listed in the runconfig file".format(testsuite_name)) 

497 continue 

498 

499 self.__load_testsuite_results__(tsp, testsuite_path) 

500 

501 @property 

502 def name(self): 

503 return self._name 

504 

505 @property 

506 def url(self): 

507 return self._url 

508 

509 @property 

510 def environment(self): 

511 return self._environment 

512 

513 @property 

514 def builds(self): 

515 return self._builds 

516 

517 @property 

518 def tags(self): 

519 return self._tags 

520 

521 @property 

522 def temporary(self): 

523 return self._temporary 

524 

525 @property 

526 def testsuites(self): 

527 return self._testsuites 

528 

529 @cached_property 

530 def tests(self): 

531 tests = dict() 

532 for run in self._run_results: 

533 db_testsuite = run.testsuite.db_object 

534 if db_testsuite not in tests: 

535 tests[db_testsuite] = set() 

536 tests[db_testsuite] |= run.tests_set 

537 return tests 

538 

539 @cached_property 

540 def machines(self): 

541 machines = set() 

542 for run in self._run_results: 

543 machines.add(run.machine_name) 

544 return machines 

545 

546 @cached_property 

547 def text_statuses(self): 

548 statuses = dict() 

549 for run in self._run_results: 

550 db_testsuite = run.testsuite.db_object 

551 if db_testsuite not in statuses: 

552 statuses[db_testsuite] = set() 

553 statuses[db_testsuite] |= run.statuses_set 

554 return statuses 

555 

556 def __preload_resources__(self, obj_manager, names): 

557 ret = dict() 

558 try: 

559 if isinstance(names, list): 

560 for name in names: 

561 ret[name] = obj_manager.get(name=name) 

562 elif isinstance(names, str): 

563 ret[names] = obj_manager.get(name=names) 

564 except Exception as e: 

565 raise ValueError("The object {} does not exist in the database".format(name)) from e 

566 return ret 

567 

568 def __add_missing__(self, obj_type, obj_str, objs, key_field, args, filter={}): 

569 # Fetch the current list of objects 

570 db_objs = set([getattr(o, key_field) for o in obj_type.objects.filter(**filter)]) 

571 

572 # Add the missing objects 

573 to_add = [] 

574 for obj in (objs - db_objs): 

575 args[key_field] = obj 

576 to_add.append(obj_type(**args)) 

577 if len(to_add) > 0: 

578 print("adding {} missing {}".format(len(to_add), obj_str)) 

579 obj_type.objects.bulk_create(to_add) 

580 

581 # Now fetch all the objetcs and index them, making sure we do not add 

582 # unecessary objects 

583 ret = dict() 

584 for entry in obj_type.objects.filter(**filter): 

585 key = getattr(entry, key_field) 

586 if key in objs: 

587 ret[key] = entry 

588 return ret 

589 

590 def __ts_runs_to_dict__(self, runconfig): 

591 ret = dict() 

592 for r in TestsuiteRun.objects.filter(runconfig=runconfig).prefetch_related( 

593 "machine__tags", "runconfig__tags", "testsuite" 

594 ): 

595 ret[(r.testsuite.name, r.machine.name, r.run_id)] = r 

596 return ret 

597 

598 @transaction.atomic 

599 def commit_to_db(self, new_machines_public=False, new_tests_public=False, 

600 new_machines_vetted=False, new_tests_vetted=False, 

601 new_statuses_vetted=False, quiet=False): 

602 now = timezone.now() 

603 

604 # Pre-fetch all the resources 

605 db_builds = self.__preload_resources__(Build.objects, self._builds) 

606 db_tags = self.__preload_resources__(RunConfigTag.objects, self._tags) 

607 

608 # Create the runconfig if it does not exists 

609 try: 

610 runconfig = RunConfig.objects.get(name=self._name) 

611 cur_builds = set(runconfig.builds.all()) 

612 except Exception: 

613 # Create the run with the proper URL 

614 runconfig = RunConfig(name=self._name, url=self.url, 

615 environment=self.environment, 

616 temporary=self._temporary) 

617 runconfig.save() 

618 

619 # Add it to all the tags 

620 for tag in db_tags.values(): 

621 runconfig.tags.add(tag) 

622 

623 # We have no current builds 

624 cur_builds = set() 

625 

626 # Verify that we do not have two builds for the same component 

627 components = dict() 

628 for build in set(db_builds.values()) | cur_builds: 

629 if build.component not in components: 

630 components[build.component] = build 

631 else: 

632 msg = "ERROR: Two builds ({} and {}) cannot be from the same component ({})" 

633 self.__error__(msg.format(build, components[build.component], build.component)) 

634 

635 # Add all the new builds 

636 for new_build in set(db_builds.values()) - cur_builds: 

637 runconfig.builds.add(new_build) 

638 

639 # Abort early if there is nothing else to do 

640 if len(self._run_results) == 0: 

641 print("No results to add, exiting...") 

642 return 

643 

644 # Load all the existing testsuite runs on this runconfig 

645 db_ts_runs = dict() 

646 tsr_filter = TestsuiteRun.objects.filter(runconfig__name=self._name) 

647 for ts_run in tsr_filter.prefetch_related("machine"): 

648 if ts_run.machine.name not in db_ts_runs: 

649 db_ts_runs[ts_run.machine.name] = set() 

650 db_ts_runs[ts_run.machine.name].add((ts_run.testsuite.name, ts_run.run_id)) 

651 

652 # Add the missing machines, tests and statuses 

653 db_machines = self.__add_missing__(Machine, "machine(s)", self.machines, 

654 'name', {"public": new_machines_public, 

655 "vetted_on": now if new_machines_vetted else None}) 

656 db_tests = dict() 

657 for testsuite in self.tests: 

658 db_tests[testsuite.name] = self.__add_missing__(Test, 

659 "test(s) ({})".format(testsuite), 

660 self.tests[testsuite], 'name', 

661 {"public": new_tests_public, 

662 "testsuite": testsuite, 

663 "vetted_on": now if new_tests_vetted else None}, 

664 filter={"testsuite": testsuite}) 

665 

666 # If the runconfig is non-temporary, convert all the tests 

667 if not self._temporary: 

668 cur_tests_ids = list() 

669 for ts in db_tests.values(): 

670 cur_tests_ids.extend([t.id for t in ts.values()]) 

671 Test.objects.filter(pk__in=cur_tests_ids, first_runconfig=None).update(first_runconfig=runconfig) 

672 

673 db_statuses = dict() 

674 for testsuite in self.text_statuses: 

675 statuses = self.__add_missing__(TextStatus, 

676 "statuse(s) ({})".format(testsuite), 

677 self.text_statuses[testsuite], 

678 'name', {"testsuite": testsuite, 

679 "vetted_on": now if new_statuses_vetted else None}, 

680 filter={"testsuite": testsuite}) 

681 db_statuses[testsuite.name] = statuses 

682 

683 # Create all the ts_runs 

684 new_ts_runs = [] 

685 to_add = [] 

686 for run in self._run_results: 

687 # Ignore the runs we already have 

688 if run.machine_name in db_ts_runs and (run.testsuite.name, run.run_id) in db_ts_runs[run.machine_name]: 

689 continue 

690 

691 # Create the ts_run object 

692 s = TestsuiteRun(testsuite=run.testsuite.db_object, 

693 runconfig=runconfig, machine=db_machines[run.machine_name], 

694 run_id=run.run_id, start=run.start_time, duration=run.duration) 

695 to_add.append(s) 

696 new_ts_runs.append(run) 

697 if len(to_add) > 0: 

698 print("adding {} testsuite runs".format(len(to_add))) 

699 TestsuiteRun.objects.bulk_create(to_add) 

700 ts_runs_db = self.__ts_runs_to_dict__(runconfig) 

701 

702 # Create all the TestResults and find the failures 

703 new_results = [] 

704 failures = [] 

705 for run in new_ts_runs: 

706 db_ts_run = ts_runs_db[(run.testsuite.name, run.machine_name, run.run_id)] 

707 for result in run.test_results: 

708 ts = run.testsuite.db_object 

709 t = TestResult(test=db_tests[ts.name][result.name], 

710 ts_run=db_ts_run, 

711 status=db_statuses[ts.name][result.status], 

712 start=result.start_time, duration=result.duration, 

713 command=result.command, 

714 stdout=result.stdout, 

715 stderr=result.stderr, 

716 dmesg=result.dmesg, 

717 url=result.url) 

718 new_results.append(t) 

719 

720 if ts.is_failure(t.status): 

721 failures.append(t) 

722 if len(new_results) > 0: 

723 print("adding {} test results".format(len(new_results))) 

724 

725 # We'll need the primary IDs later, so bulk_create on postgresql 

726 # and not on the others (since they do not support retrieving it). 

727 if connection.vendor == "postgresql": 

728 TestResult.objects.bulk_create(new_results, batch_size=5000) 

729 else: 

730 for result in new_results: 

731 result.save() 

732 

733 # Fetch all the associated IssueFilters and their related issues 

734 db_filters = dict() 

735 active_ifas = IssueFilterAssociated.objects_ready_for_matching.filter(Q(deleted_on=None)).prefetch_related( 

736 "filter", "filter__tests__testsuite", "filter__statuses", "filter__statuses__testsuite" 

737 ) 

738 for e in active_ifas: 

739 if e.filter not in db_filters: 

740 db_filters[e.filter] = list() 

741 db_filters[e.filter].append(e) 

742 

743 # Map the failures 

744 start = time.time() 

745 known_failures = [] 

746 unknown_failures = [] 

747 

748 # Get the filters statistics already-existing on this runconfig and lock them for update 

749 stats_changes = dict() # stores results as a tuple (current, new) 

750 if not self.temporary: 

751 for stat in RunFilterStatistic.objects.select_for_update().filter(runconfig=runconfig): 

752 stats_changes[stat.filter] = (stat, None) 

753 

754 for result in new_results: 

755 found = False 

756 for issuefilter in db_filters.keys(): 

757 # start matching the result to the current filter 

758 if issuefilter.covers(result): 

759 # Get or create a statistics object for the current filter and runconfig 

760 fs = stats_changes.get(issuefilter, (None, None)) 

761 if fs[1] is None: 

762 if fs[0] is not None: 

763 stats_changes[issuefilter] = fs = (fs[0], copy.copy(fs[0])) 

764 else: 

765 stats_changes[issuefilter] = fs = (None, RunFilterStatistic(filter=issuefilter, 

766 runconfig=runconfig, 

767 matched_count=0, 

768 covered_count=0)) 

769 

770 fs[1].covered_count += 1 

771 if result not in failures: 

772 continue 

773 

774 if issuefilter.matches(result, skip_cover_test=True): 

775 fs[1].matched_count += 1 

776 for ifa in db_filters[issuefilter]: 

777 known_failures.append(KnownFailure(result=result, 

778 matched_ifa=ifa)) 

779 found = True 

780 if result in failures and not found: 

781 unknown_failures.append(UnknownFailure(result=result)) 

782 

783 msg = "Found {} test failures ({} filters matched, {} failures left unmatched) in {:.2f} ms" 

784 print(msg.format(len(failures), len(known_failures), len(unknown_failures), (time.time() - start) * 1000)) 

785 KnownFailure.objects.bulk_create(known_failures) 

786 UnknownFailure.objects.bulk_create(unknown_failures) 

787 

788 # Create the statistics objects, but only if the run was not temporary 

789 if not self.temporary: 

790 # Create a new transaction, in case the creation of stats fail 

791 try: 

792 with transaction.atomic(): 

793 # Save all the stats (WARNING: some might already exist in the DB, hence the call to .save()) 

794 new_stats = [fs[1] for fs in stats_changes.values() if fs[1] is not None] 

795 print("Updating the statistics of {} filters".format(len(new_stats))) 

796 start = time.time() 

797 for stat in new_stats: 

798 try: 

799 stat.save() 

800 except Exception: 

801 traceback.print_exc() 

802 print("Filter statistics updated in {:.2f} ms".format((time.time() - start) * 1000)) 

803 

804 # Get the list of issues that need to be updated because the filters' 

805 # statistics got updated 

806 issues = Issue.objects.select_for_update().filter(filters__in=stats_changes.keys()) 

807 print("Updating the statistics of {} issues".format(len(issues))) 

808 start = time.time() 

809 for issue in issues.prefetch_related('filters'): 

810 issue_simple_stats_recomputing(issue, runconfig, stats_changes) 

811 print("Issue statistics updated in {:.2f} ms".format((time.time() - start) * 1000)) 

812 except Exception: 

813 traceback.print_exc() 

814 

815 # Go through all the unknown failures 

816 if len(unknown_failures) > 0: 

817 # Fetch all archived IFAs that are less than 6 months old 

818 db_archived_filters = defaultdict(list) 

819 archived_threshold = now - datetime.timedelta(days=180) 

820 a_ifas = IssueFilterAssociated.objects.exclude(deleted_on=None).exclude(deleted_on__lt=archived_threshold) 

821 for e in a_ifas: 

822 db_archived_filters[e.filter].append(e) 

823 

824 # Try to match the unknown failures with archived IFAs 

825 start = time.time() 

826 filters_matching = set() 

827 for failure in unknown_failures: 

828 for issuefilter in db_archived_filters.keys(): 

829 if issuefilter.matches(failure.result): 

830 failure.matched_archived_ifas.add(*db_archived_filters[issuefilter]) 

831 filters_matching.add(issuefilter) 

832 

833 msg = "Found {}/{} recently-archived filters matching some unknown failures in {:.2f} ms" 

834 print(msg.format(len(filters_matching), len(a_ifas), (time.time() - start) * 1000))