diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bcadf4296fac774070bea002623b0b13e6ce43e7 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,14 @@ +--- +name: CI +# yamllint disable-line rule:truthy +on: [push] +jobs: + check: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Install dependencies and check + run: | + sudo apt install flake8 + make check diff --git a/EBBR.yaml b/EBBR.yaml index b9d64e83031608da2e9144ec42895555d300c72c..ec55406e4d26bf2aa7aae2a0f2cbfc7c7aa1577f 100644 --- a/EBBR.yaml +++ b/EBBR.yaml @@ -1553,12 +1553,88 @@ update: result: IGNORED +- rule: Force false-positive time services set time failure as ignored. + SetTime does not retain year properly with U-Boot RTC emulation. + SetTime is required only when an RTC is present (EBBR Table 2.6 + EFI_RUNTIME_SERVICES Implementation Requirements). + criteria: + descr: Time Services Test + device path: No device path + group: RuntimeServicesTest + guid: 3B96A20C-2B1F-44EA-BAA9-F96FEE131D05 + log: Status - Success, TPL - 4 + name: RT.SetTime - Verify year after change + result: FAILURE + revision: '0x00010000' + set guid: 603B46BE-7E14-408A-93D7-DD9DEC732968 + sub set: SetTime_Func + test set: TimeServicesTest + update: + result: IGNORED + +- rule: Force false-positive time services set time failure (2) as ignored. + SetTime does not retain year properly with U-Boot RTC emulation. + SetTime is required only when an RTC is present (EBBR Table 2.6 + EFI_RUNTIME_SERVICES Implementation Requirements). + criteria: + descr: Time Services Test + device path: No device path + group: RuntimeServicesTest + guid: 2D5CDBE5-1055-4EF6-8E90-0C993F93F698 + log: Status - Success, TPL - 4 + name: RT.SetTime - Verify month after change + result: FAILURE + revision: '0x00010000' + set guid: 603B46BE-7E14-408A-93D7-DD9DEC732968 + sub set: SetTime_Func + test set: TimeServicesTest + update: + result: IGNORED + +- rule: Force false-positive time services set time failure (3) as ignored. + SetTime does not retain year properly with U-Boot RTC emulation. + SetTime is required only when an RTC is present (EBBR Table 2.6 + EFI_RUNTIME_SERVICES Implementation Requirements). + criteria: + descr: Time Services Test + device path: No device path + group: RuntimeServicesTest + guid: E664E1D7-B733-410D-BC53-D4CFF2464355 + log: Status - Success, TPL - 8 + name: RT.SetTime - Verify year after change + result: FAILURE + revision: '0x00010000' + set guid: 603B46BE-7E14-408A-93D7-DD9DEC732968 + sub set: SetTime_Func + test set: TimeServicesTest + update: + result: IGNORED + +- rule: Force false-positive time services set time failure (4) as ignored. + SetTime does not retain year properly with U-Boot RTC emulation. + SetTime is required only when an RTC is present (EBBR Table 2.6 + EFI_RUNTIME_SERVICES Implementation Requirements). + criteria: + descr: Time Services Test + device path: No device path + group: RuntimeServicesTest + guid: DA4B19E7-F605-4FB9-A181-CCD335290BFE + log: Status - Success, TPL - 8 + name: RT.SetTime - Verify month after change + result: FAILURE + revision: '0x00010000' + set guid: 603B46BE-7E14-408A-93D7-DD9DEC732968 + sub set: SetTime_Func + test set: TimeServicesTest + update: + result: IGNORED + ############################################################################### # Known U-Boot limitations # ############################################################################### # We force the following tests result as `KNOWN U-BOOT LIMITATION'. They are -# genuine bugs, which much ultimately be fixed. We know about them; they are due +# genuine bugs, which must ultimately be fixed. We know about them; they are due # to U-Boot FAT filesystem implementation limitations and they do not prevent an # OS to boot. @@ -1864,6 +1940,109 @@ update: result: KNOWN U-BOOT LIMITATION +- rule: Force misc runtime services update capsule conformance failure as known + U-Boot limitation + UpdateCapsule implementation in U-Boot up to (at least) v2021.07 does not + check properly for all invalid inputs conditions + criteria: + descr: Misc Runtime Services Test + device path: No device path + group: RuntimeServicesTest + guid: 304F6960-79D0-4F17-8811-620FC6BDB0D4 + log: Status - Unsupported + name: RT.UpdateCapsule - invoke UpdateCapsule with invalid ScatterGatherList + result: FAILURE + revision: '0x00010000' + set guid: 7227CFAC-CA96-4680-9314-E3FBC60A2A61 + sub set: UpdateCapsule_Conf + test set: MiscRuntimeServicesTest + update: + result: KNOWN U-BOOT LIMITATION + +- rule: Force misc runtime services update capsule conformance failure (2) as + known U-Boot limitation + UpdateCapsule implementation in U-Boot up to (at least) v2021.07 does not + check properly for all invalid inputs conditions + criteria: + descr: Misc Runtime Services Test + device path: No device path + group: RuntimeServicesTest + guid: 18F86BF8-76CF-4225-8E3E-1B1F63432600 + log: Status - Unsupported + name: RT.UpdateCapsule - invoke UpdateCapsule with invalid Flags + result: FAILURE + revision: '0x00010000' + set guid: 7227CFAC-CA96-4680-9314-E3FBC60A2A61 + sub set: UpdateCapsule_Conf + test set: MiscRuntimeServicesTest + update: + result: KNOWN U-BOOT LIMITATION + +- rule: Force misc runtime services update capsule conformance failure (3) as + known U-Boot limitation + UpdateCapsule implementation in U-Boot up to (at least) v2021.07 does not + check properly for all invalid inputs conditions + criteria: + descr: Misc Runtime Services Test + device path: No device path + group: RuntimeServicesTest + guid: 145E4790-3342-4C8C-99F2-7F6500FD26E5 + log: Status - Unsupported + name: RT.UpdateCapsule - invoke UpdateCapsule with invalid Flags + result: FAILURE + revision: '0x00010000' + set guid: 7227CFAC-CA96-4680-9314-E3FBC60A2A61 + sub set: UpdateCapsule_Conf + test set: MiscRuntimeServicesTest + update: + result: KNOWN U-BOOT LIMITATION + +############################################################################### +# Known ACS limitations # +############################################################################### + +# We force the following tests result as `KNOWN ACS LIMITATION'. They are +# genuine bugs, which are fixed in a more recent version of the ACS or which +# must ultimately be fixed and which we know about. + +- rule: Force boot services memory map test failure with ACS-IR + v21.05_0.8_BETA-0 as known ACS limitation + criteria: + descr: SbbrBootServices Tests + device path: No device path + group: BootServicesTest + guid: 58A44F17-760E-478B-BAEF-E20BFBCD7457 + log: MemoryMap Not Found + name: MemoryMap + result: FAILURE + revision: '0x00010000' + set guid: 8540C12D-5413-4A0B-A545-89A8E3C7DA4B + sub set: MemoryMap + test set: SbbrBootServices + update: + result: KNOWN ACS LIMITATION + +- rule: Force event services create event test failures with ACS-IR + v21.05_0.8_BETA-0 as known ACS limitation. + The ACS BETA 0 has an old version of the SCT, which erroneously creates + events with Tpl level high. + This is fixed in ACS BETA 1 and up. + criteria: + descr: Event, Timer, and Task Priority Services Test + device path: No device path + group: BootServicesTest + guid: EF317ADE-8668-456F-BED9-766056672DFF + log: NotifyTpl - 31 + name: BS.CreateEvent - Create event with all valid event type and supported + TPL. + result: FAILURE + revision: '0x00010000' + set guid: 83CF7F0B-C274-4918-AEA6-413DA9CF68CF + sub set: CreateEvent_Func + test set: EventTimerandPriorityServicesTest + update: + result: KNOWN ACS LIMITATION + ############################################################################### # Comments # ############################################################################### @@ -1976,23 +2155,6 @@ # We add comments to the following tests failures for which we know a likely # cause, to help triaging/debugging. -- rule: Add comments to boot services memory map test failure with ACS-IR - v21.05_0.8_BETA-0 - criteria: - descr: SbbrBootServices Tests - device path: No device path - group: BootServicesTest - guid: 58A44F17-760E-478B-BAEF-E20BFBCD7457 - log: MemoryMap Not Found - name: MemoryMap - result: FAILURE - revision: '0x00010000' - set guid: 8540C12D-5413-4A0B-A545-89A8E3C7DA4B - sub set: MemoryMap - test set: SbbrBootServices - update: - comments: This is a known issue with ACS-IR v21.05_0.8_BETA-0 - - rule: Add comments to failure due to missing ESP criteria: descr: Image Services Test diff --git a/Makefile b/Makefile index 5570e8d44724233cc6851fe55c327c0dc3340b24..128e1ed54a14e781f8ef475be7ef8ef71fe59136 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,8 @@ all: doc help: @echo 'Targets:' @echo ' all' - @echo ' check Perform sanity checks (currently yamllint)' + @echo ' check Perform sanity checks' + @echo ' (currently yamllint, shellcheck and flake8)' @echo ' clean' @echo ' doc Generate README.pdf' @echo ' help Print this help.' @@ -18,6 +19,8 @@ doc: README.pdf check: yamllint . + shellcheck $$(find -name '*.sh') + flake8 clean: -rm -f README.pdf diff --git a/README.md b/README.md index 717e092fe42a2fb084bef57fb547f042d0f106fe..847c8f59c353c52ba9bb3101af06b0a7060dc4d6 100644 --- a/README.md +++ b/README.md @@ -23,12 +23,27 @@ INFO main: 0 dropped(s), 1 failure(s), 93 ignored(s), 106 known u-boot limitatio ``` ## Usage -Usage to generate a "result md" is such. `python3 parser.py ` -If you do no provided any command line arguments it will use `sample.ekl` and `sample.seq`. -The output filename can be specified with `--md `. + +Usage to generate a `result.md` is such: + +``` {.sh} +$ python3 parser.py +``` + +The output filename can be specified with the `--md` option: + +``` {.sh} +$ ./parser.py --md out.md ... +``` An online help is available with the `-h` option. +The generated `result md` can be easily converted to HTML using `pandoc` with: + +``` {.sh} +$ pandoc -oresult.html result.md +``` + ### Custom search For a custom Key:value search, the next two arguments *MUST be included together.* The program will search and display files that met that constraint, without the crosscheck, and display the names, guid, and key:value to the command line. `python3 parser.py ` @@ -131,6 +146,23 @@ $ ./parser.py \ --fields 'count,result,name' --uniq --print ... ``` +### Re-reading markdown results + +It is possible to re-read a previously generated markdown results file with the +`--input-md` option. This can be useful to perform further processing on the +tests. + +Example command to read a previously generated markdown: + +``` {.sh} +$ ./parser.py --input-md 'result.md' ... +``` + +* By default an output markdown is still generated, except in the case where the + input and output markdown have the same filename. +* The generated markdown results do not contain the "passed" tests. They can + therefore not be re-read. + ## Configuration file It is possible to use a configuration file with command line option `--config @@ -190,13 +222,13 @@ tests. ### Sample -A `sample.yaml` configuration file is provided as example, to use with the -`sample.ekl` and `sample.seq` files. +In the folder `sample`, a `sample.yaml` configuration file is provided as +example, to use with the `sample.ekl` and `sample.seq` files. Try it with: ``` {.sh} -$ ./parser.py --config sample.yaml ... +$ ./parser.py --config sample/sample.yaml sample/sample.ekl sample/sample.seq ``` ### Generating a configuration template @@ -232,6 +264,10 @@ override the result of some tests with the following ones: We know about them; they are due to U-Boot FAT filesystem implementation limitations and they do not prevent an OS to boot. + + `KNOWN ACS LIMITATION` Genuine bugs, which are fixed in a more recent version + of the ACS or which must ultimately be fixed and which + we know about. ------------------------------------------------------------------------------- Some of the rules just add a `comments` field with some help text. @@ -250,8 +286,18 @@ It is possible to convert this `README.md` into `README.pdf` with pandoc using ### Sanity checks -To perform sanity checks, run `make check`. For the moment this runs `yamllint`, -which will inspect all [YAML] files and report errors. See `make help`. +To perform sanity checks, run `make check`. It runs a number of checkers and +reports errors: + +------------------------------- + Checker Target +------------- ---------------- + `flake8` Python scripts. + `yamllint` [YAML] files. + `shellcheck` Shell scripts. +------------------------------- + +See `make help`. ### db structure: @@ -326,3 +372,19 @@ their test run according to the log file. We create artificial tests entries for those dropped tests sets, with the "result" fields set to "SKIPPED". + +## Contributed files + +A few contributed files are stored in sub-folders under `contrib` for +convenience: + +------------------------------------------------------------------------------- + Sub-folder Contents +--------------------- -------------------------------------------------------- + `v21.05_0.8_BETA-0/` EBBR sequence file from [ACS-IR v21.05_0.8_BETA-0]. + + `v21.07_0.9_BETA/` EBBR sequence files from [ACS-IR v21.07_0.9_BETA]. +------------------------------------------------------------------------------- + +[ACS-IR v21.05_0.8_BETA-0]: https://github.com/ARM-software/arm-systemready/tree/main/IR/prebuilt_images/v21.05_0.8_BETA-0 +[ACS-IR v21.07_0.9_BETA]: https://github.com/ARM-software/arm-systemready/tree/main/IR/prebuilt_images/v21.07_0.9_BETA diff --git a/contrib/v21.05_0.8_BETA-0/EBBR.seq b/contrib/v21.05_0.8_BETA-0/EBBR.seq new file mode 100644 index 0000000000000000000000000000000000000000..1f0cb8ac84aa9a5ec595a3778bb8930536c78bbf Binary files /dev/null and b/contrib/v21.05_0.8_BETA-0/EBBR.seq differ diff --git a/contrib/v21.07_0.9_BETA/EBBR.seq b/contrib/v21.07_0.9_BETA/EBBR.seq new file mode 100644 index 0000000000000000000000000000000000000000..63ee1b7a28cbe05585fe80b5960f30432b083711 Binary files /dev/null and b/contrib/v21.07_0.9_BETA/EBBR.seq differ diff --git a/contrib/v21.07_0.9_BETA/EBBR_manual.seq b/contrib/v21.07_0.9_BETA/EBBR_manual.seq new file mode 100644 index 0000000000000000000000000000000000000000..243314f8cfc0183f6d1cdec4e6ae232f0b9a1518 Binary files /dev/null and b/contrib/v21.07_0.9_BETA/EBBR_manual.seq differ diff --git a/parser.py b/parser.py index 7cd9789945c5e1c643036c4e28871bf687a9d1ce..dc2da208ca57f36c6b14e7381d7d516dbf843744 100755 --- a/parser.py +++ b/parser.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -#SCT log parser +# SCT log parser import sys @@ -33,7 +33,7 @@ else: yaml_load_args = {} -#based loosley on https://stackoverflow.com/a/4391978 +# based loosley on https://stackoverflow.com/a/4391978 # returns a filtered dict of dicts that meet some Key-value pair. # I.E. key="result" value="FAILURE" def key_value_find(list_1, key, value): @@ -44,21 +44,25 @@ def key_value_find(list_1, key, value): return found -#Were we intrept test logs into test dicts +# Were we intrept test logs into test dicts def test_parser(string, current): test_list = { - "name": string[2], #FIXME:Sometimes, SCT has name and Description, - "result": string[1], - **current, - "guid": string[0], #FIXME:GUID's overlap - #"comment": string[-1], #FIXME:need to hash this out, sometime there is no comments - "log": ' '.join(string[3:]) + "name": string[2], + # FIXME:Sometimes, SCT has name and Description, + "result": string[1], + **current, + "guid": string[0], + # FIXME:GUID's overlap + # "comment": string[-1], # FIXME:need to hash this out, + # sometime there is no comments + "log": ' '.join(string[3:]) } return test_list -#Parse the ekl file, and create a map of the tests -def ekl_parser (file): - #create our "database" dict + +# Parse the ekl file, and create a map of the tests +def ekl_parser(file): + # create our "database" dict temp_list = list() # All tests are grouped by the "HEAD" line, which precedes them. current = {} @@ -122,14 +126,17 @@ def ekl_parser (file): 'device path': '|'.join(split_line[13:]), } - #FIXME:? EKL file has an inconsistent line structure, + # FIXME:? EKL file has an inconsistent line structure, # sometime we see a line that consits ' dump of GOP->I\n' - #easiest way to skip is check for blank space in the first char + # easiest way to skip is check for blank space in the first char elif split_line[0] != '' and split_line[0][0] != " ": try: - #deliminiate on ':' for tests - split_test = [new_string for old_string in split_line for new_string in old_string.split(':')] - #put the test into a dict, and then place that dict in another dict with GUID as key + # deliminiate on ':' for tests + split_test = [new_string for old_string in + split_line for new_string in + old_string.split(':')] + # put the test into a dict, and then place that dict in another + # dict with GUID as key tmp_dict = test_parser(split_test, current) temp_list.append(tmp_dict) n += 1 @@ -144,74 +151,85 @@ def ekl_parser (file): return temp_list -#Parse Seq file, used to tell which tests should run. + +# Parse Seq file, used to tell which tests should run. def seq_parser(file): temp_dict = list() - lines=file.readlines() - magic=7 #a test in a seq file is 7 lines, if not mod7, something wrong.. - if len(lines)%magic != 0: + lines = file.readlines() + magic = 7 + # a test in a seq file is 7 lines, if not mod7, something wrong.. + if len(lines) % magic != 0: sys.exit("seqfile cut short, should be mod7") - #the utf-16 char makes this looping a bit harder, so we use x+(i) where i is next 0-6th - for x in range(0,len(lines),magic): #loop ever "7 lines" - #(x+0)[Test Case] - #(x+1)Revision=0x10000 - #(x+2)Guid=XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX - #(x+3)Name=InstallAcpiTableFunction - #(x+4)Order=0xFFFFFFFF - #(x+5)Iterations=0xFFFFFFFF - #(x+6)(utf-16 char) - #currently only add tests that are supposed to run, should add all? - #0xFFFFFFFF in "Iterations" means the test is NOT supposed to run - if not "0xFFFFFFFF" in lines[x+5]: + # the utf-16 char makes this looping a bit harder, so we use x+(i) where i + # is next 0-6th + # loop ever "7 lines" + for x in range(0, len(lines), magic): + # (x+0)[Test Case] + # (x+1)Revision=0x10000 + # (x+2)Guid=XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + # (x+3)Name=InstallAcpiTableFunction + # (x+4)Order=0xFFFFFFFF + # (x+5)Iterations=0xFFFFFFFF + # (x+6)(utf-16 char) + # currently only add tests that are supposed to run, should add all? + # 0xFFFFFFFF in "Iterations" means the test is NOT supposed to run + if "0xFFFFFFFF" not in lines[x + 5]: seq_dict = { - "name": lines[x+3][5:-1],#from after "Name=" to end (5char long) - "guid": lines[x+2][5:-1],#from after"Guid=" to the end, (5char long) - "Iteration": lines[x+5][11:-1],#from after "Iterations=" (11char long) - "rev": lines[x+1][9:-1],#from after "Revision=" (9char long) - "Order": lines[x+4][6:-1]#from after "Order=" (6char long) + # from after "Name=" to end (5char long) + "name": lines[x + 3][5:-1], + # from after"Guid=" to the end, (5char long) + "guid": lines[x + 2][5:-1], + # from after "Iterations=" (11char long) + "Iteration": lines[x + 5][11:-1], + # from after "Revision=" (9char long) + "rev": lines[x + 1][9:-1], + # from after "Order=" (6char long) + "Order": lines[x + 4][6:-1] } - temp_dict.append(seq_dict) #put in a dict based on guid + # put in a dict based on guid + temp_dict.append(seq_dict) return temp_dict -#group items by key, and print by key -#we slowly iterate through the list, group and print groups -def key_tree_2_md(input_list,file,key): - #make a copy so we don't destroy the first list. - temp_list = input_list.copy() - while temp_list: - test_dict = temp_list.pop() - found, not_found = [test_dict],[] - #go through whole list looking for key match - while temp_list: - next_dict = temp_list.pop() - if next_dict[key] == test_dict[key]: #if match add to found - found.append(next_dict) - else: #else not found - not_found.append(next_dict) - temp_list = not_found #start over with found items removed - file.write("### " + test_dict[key]) - dict_2_md(found,file) - - - -#generic writer, takes a list of dicts and turns the dicts into an MD table. -def dict_2_md(input_list,file): + +# Print items by "group" +def key_tree_2_md(input_list, file): + h = {} + + # Bin by group + for t in input_list: + g = t['group'] + + if g not in h: + h[g] = [] + + h[g].append(t) + + # Print each group + for g in sorted(h.keys()): + file.write("### " + g) + dict_2_md(h[g], file) + + +# generic writer, takes a list of dicts and turns the dicts into an MD table. +def dict_2_md(input_list, file): if len(input_list) > 0: file.write("\n\n") - #create header for MD table using dict keys + k = input_list[0].keys() + # create header for MD table using dict keys temp_string1, temp_string2 = "|", "|" - for x in (input_list[0].keys()): + for x in k: temp_string1 += (x + "|") temp_string2 += ("---|") - file.write(temp_string1+"\n"+temp_string2+"\n") - #print each item from the dict into the table + file.write(temp_string1 + "\n" + temp_string2 + "\n") + # print each item from the dict into the table for x in input_list: test_string = "|" - for y in x.keys(): - test_string += (x[y] + "|") - file.write(test_string+'\n') - #seprate table from other items in MD + for y in k: + v = x[y] if y in x else '' + test_string += v + "|" + file.write(test_string + '\n') + # seprate table from other items in MD file.write("\n\n") @@ -482,6 +500,7 @@ def gen_template(cross_check, filename): # Print to stdout # The fields to write are supplied as a list +# We handle the case where not all fields are present for all records def do_print(cross_check, fields): logging.debug(f'Print (fields: {fields})') @@ -494,7 +513,7 @@ def do_print(cross_check, fields): for x in cross_check: for f in fm1: - w[f] = max(w[f], len(str(x[f]))) + w[f] = max(w[f], len(str(x[f]) if f in x else '')) # Second pass where we print lf = fields[len(fields) - 1] @@ -505,7 +524,7 @@ def do_print(cross_check, fields): for x in cross_check: print(' '.join([ - *map(lambda f: f"{x[f]:{w[f] if f in x else ''}}", fm1), + *map(lambda f: f"{x[f] if f in x else '':{w[f]}}", fm1), x[lf] if lf in x else ''])) @@ -578,6 +597,122 @@ def combine_dbs(db1, db2): return cross_check +# Read the .ekl log file and the .seq file and combine them into a single +# database, which we return. +def read_log_and_seq(log_file, seq_file): + # ekl file to open + # "database 1" all tests. + db1 = list() + logging.debug(f'Read {log_file}') + + # files are encoded in utf-16 + with open(log_file, "r", encoding="utf-16") as f: + db1 = ekl_parser(f.readlines()) + + logging.debug('{} test(s)'.format(len(db1))) + + # seq file to open + # "database 2" all test sets that should run + db2 = dict() + logging.debug(f'Read {seq_file}') + + # files are encoded in utf-16 + with open(seq_file, "r", encoding="utf-16") as f: + db2 = seq_parser(f) + + logging.debug('{} test set(s)'.format(len(db2))) + + # Produce a single cross_check database from our two db1 and db2 databases. + return combine_dbs(db1, db2) + + +# generate MD summary +def gen_md(md, res_keys, bins): + logging.debug(f'Generate {md}') + + with open(md, 'w') as resultfile: + resultfile.write("# SCT Summary \n\n") + resultfile.write("| | |\n") + resultfile.write("|--|--|\n") + + # Loop on all the result values we found for the summary + for k in sorted(res_keys): + resultfile.write( + "|{}:|{}|\n".format(k.title(), len(bins[k]))) + + resultfile.write("\n\n") + + # Loop on all the result values we found (except PASS) for the sections + # listing the tests by group + n = 1 + res_keys_np = set(res_keys) + res_keys_np.remove('PASS') + + for k in sorted(res_keys_np): + resultfile.write("## {}. {} by group\n\n".format(n, k.title())) + key_tree_2_md(bins[k], resultfile) + n += 1 + + +# Read back results from a previously generated summary markdown file. +# From this, we re-create a database the best we can and we return it. +def read_md(input_md): + logging.debug(f'Read {input_md}') + tables = [] + + with open(input_md, 'r') as f: + t = None + + for i, line in enumerate(f): + line = line.rstrip() + + if re.match(r'^\|', line): + # Split the line. We need to take care of preserving special + # cases such as "Pci(0|0)" for example + line = re.sub(r'\((\w+)\|(\w+)\)', r'(\1%\2)', line) + x = line.split('|') + x = x[1:len(x) - 1] + x = [re.sub(r'%', '|', e) for e in x] + + if t is None: + t = [] + logging.debug(f'Table line {i + 1}, keys: {x}') + + t.append(x) + + elif t is not None: + tables.append(t) + t = None + + assert(t is None) + + # Remove summary table + assert(len(tables[0][0]) == 2) + del tables[0] + + # Transform tables lines to dicts and merge everything + cross_check = [] + + for t in tables: + # Save keys + keys = t.pop(0) + n = len(keys) + # Drop underlines + t.pop(0) + + # Convert lines + for i, x in enumerate(t): + assert(len(x) == n) + y = {} + + for j, k in enumerate(keys): + y[k] = x[j] + + cross_check.append(y) + + return cross_check + + if __name__ == '__main__': parser = argparse.ArgumentParser( description='Process SCT results.' @@ -604,6 +739,7 @@ if __name__ == '__main__': '--uniq', action='store_true', help='Collapse duplicates') parser.add_argument( '--print', action='store_true', help='Print results to stdout') + parser.add_argument('--input-md', help='Input .md filename') parser.add_argument('log_file', help='Input .ekl filename') parser.add_argument('seq_file', help='Input .seq filename') parser.add_argument('find_key', nargs='?', help='Search key') @@ -624,26 +760,15 @@ if __name__ == '__main__': format='%(levelname)s %(funcName)s: %(message)s', level=logging.DEBUG if args.debug else logging.INFO) - #Command line argument 1, ekl file to open - db1 = list() #"database 1" all tests. - logging.debug(f'Read {args.log_file}') - - with open(args.log_file,"r",encoding="utf-16") as f: #files are encoded in utf-16 - db1 = ekl_parser(f.readlines()) - - logging.debug('{} test(s)'.format(len(db1))) - - #Command line argument 2, seq file to open - db2 = dict() #"database 2" all test sets that should run - logging.debug(f'Read {args.seq_file}') - - with open(args.seq_file,"r",encoding="utf-16") as f: #files are encoded in utf-16 - db2 = seq_parser(f) - - logging.debug('{} test set(s)'.format(len(db2))) + if args.input_md is not None: + cross_check = read_md(args.input_md) + else: + # Command line argument 1 is the ekl file to open. + # Command line argument 2 is the seq file to open. + # Read both and combine them into a single cross_check database. + cross_check = read_log_and_seq(args.log_file, args.seq_file) - # Produce a single cross_check database from our two db1 and db2 databases. - cross_check = combine_dbs(db1, db2) + logging.debug('{} combined test(s)'.format(len(cross_check))) # Take configuration file into account. This can perform transformations on # the tests results. @@ -660,7 +785,8 @@ if __name__ == '__main__': # search for failures, warnings, passes & others # We detect all present keys in additions to the expected ones. This is - # handy with config rules overriding the result field with arbitrary values. + # handy with config rules overriding the result field + # with arbitrary values. res_keys = set(['DROPPED', 'FAILURE', 'WARNING', 'PASS']) for x in cross_check: @@ -680,30 +806,10 @@ if __name__ == '__main__': logging.info(', '.join(s)) # generate MD summary - logging.debug(f'Generate {args.md}') - - with open(args.md, 'w') as resultfile: - resultfile.write("# SCT Summary \n\n") - resultfile.write("| | |\n") - resultfile.write("|--|--|\n") - - # Loop on all the result values we found for the summary - for k in sorted(res_keys): - resultfile.write( - "|{}:|{}|\n".format(k.title(), len(bins[k]))) - - resultfile.write("\n\n") - - # Loop on all the result values we found (except PASS) for the sections - # listing the tests by group - n = 1 - res_keys_np = set(res_keys) - res_keys_np.remove('PASS') - - for k in sorted(res_keys_np): - resultfile.write("## {}. {} by group\n\n".format(n, k.title())) - key_tree_2_md(bins[k], resultfile, "group") - n += 1 + # As a special case, we skip generation when we are reading from a markdown + # summary, which has the same name as the output. + if args.input_md is None or args.input_md != args.md: + gen_md(args.md, res_keys, bins) # Generate yaml config template if requested if 'template' in args and args.template is not None: @@ -740,9 +846,9 @@ if __name__ == '__main__': # command line argument 3&4, key are to support a key & value search. # these will be displayed in CLI if args.find_key is not None and args.find_value is not None: - found = key_value_find(db1, args.find_key, args.find_value) - #print the dict - print("found:",len(found),"items with search constraints") + found = key_value_find(cross_check, args.find_key, args.find_value) + # print the dict + print("found:", len(found), "items with search constraints") for x in found: print( x["guid"], ":", x["name"], "with", args.find_key, ":", diff --git a/sample.ekl b/sample/sample.ekl similarity index 100% rename from sample.ekl rename to sample/sample.ekl diff --git a/sample.seq b/sample/sample.seq similarity index 100% rename from sample.seq rename to sample/sample.seq diff --git a/sample.yaml b/sample/sample.yaml similarity index 100% rename from sample.yaml rename to sample/sample.yaml