From bdfc9dac71d87c0454e716a7ac04c4ad1c4ad0e3 Mon Sep 17 00:00:00 2001 From: Jordan Carlin Date: Wed, 11 Dec 2024 23:20:10 -0800 Subject: [PATCH] Lots of f-strings --- bin/CacheSim.py | 12 +++++----- bin/iterelf | 11 ++++----- bin/regression-wally | 56 ++++++++++++++++++++++---------------------- 3 files changed, 39 insertions(+), 40 deletions(-) diff --git a/bin/CacheSim.py b/bin/CacheSim.py index 1da42c7ce..cb441b529 100755 --- a/bin/CacheSim.py +++ b/bin/CacheSim.py @@ -53,8 +53,8 @@ class CacheLine: self.dirty = False def __str__(self): - string = "(V: " + str(self.valid) + ", D: " + str(self.dirty) - string += ", Tag: " + str(hex(self.tag)) + ")" + string = f"(V: {self.valid}, D: {self.dirty}" + string += f", Tag: {hex(self.tag)})" return string def __repr__(self): @@ -193,9 +193,9 @@ class Cache: def __str__(self): string = "" for i in range(self.numways): - string += "Way " + str(i) + ": " + string += f"Way {i}: " for line in self.ways[i]: - string += str(line) + ", " + string += f"{line}, " string += "\n\n" return string @@ -285,13 +285,13 @@ def main(args): atoms += 1 if not result == lninfo[2]: - print("Result mismatch at address", lninfo[0]+ ". Wally:", lninfo[2]+", Sim:", result) + print(f"Result mismatch at address {lninfo[0]}. Wally: {lninfo[2]}, Sim: {result}") mismatches += 1 if args.dist: percent_loads = str(round(100*loads/totalops)) percent_stores = str(round(100*stores/totalops)) percent_atoms = str(round(100*atoms/totalops)) - print("This log had", percent_loads+"% loads,", percent_stores+"% stores, and", percent_atoms+"% atomic operations.") + print(f"This log had {percent_loads}% loads, {percent_stores}% stores, and {percent_atoms}% atomic operations.") if args.perf: ratio = round(hits/misses,3) diff --git a/bin/iterelf b/bin/iterelf index b24c9bff7..7be21d68a 100755 --- a/bin/iterelf +++ b/bin/iterelf @@ -30,7 +30,6 @@ def search_log_for_mismatches(logfile): greperr = "grep -H Error: " + logfile os.system(greperr) grepcmd = f"grep -a -e 'Mismatches : 0' '{logfile}' > /dev/null" -# print(" search_log_for_text invoking %s" % grepcmd) return os.system(grepcmd) == 0 def run_test_case(elf): @@ -47,14 +46,14 @@ def run_test_case(elf): # print("cmd = " + cmd) os.system(cmd) if search_log_for_mismatches(logfile): - print(f"{bcolors.OKGREEN}%s: Success{bcolors.ENDC}" % (cmd)) + print(f"{bcolors.OKGREEN}{cmd}: Success{bcolors.ENDC}") return 0 elif "WALLY-cbom-01" in elf: # Remove this when CBO instructions are modeled in ImperasDV - print(f"{bcolors.OKCYAN}%s: Expected mismatch because ImperasDV does not yet model cache for CBO instructions {bcolors.ENDC}" % (cmd)) + print(f"{bcolors.OKCYAN}{cmd}: Expected mismatch because ImperasDV does not yet model cache for CBO instructions {bcolors.ENDC}") return 0 else: - print(f"{bcolors.FAIL}%s: Failures detected in output{bcolors.ENDC}" % (cmd)) + print(f"{bcolors.FAIL}{cmd}: Failures detected in output{bcolors.ENDC}") print(f" Check {logfile}") return 1 @@ -99,9 +98,9 @@ with Pool(processes=min(len(ElfList),multiprocessing.cpu_count(), ImperasDVLicen num_fail+=result.get(timeout=TIMEOUT_DUR) except MPTimeoutError: num_fail+=1 - print(f"{bcolors.FAIL}%s: Timeout - runtime exceeded %d seconds{bcolors.ENDC}" % (elf, TIMEOUT_DUR)) + print(f"{bcolors.FAIL}{elf}: Timeout - runtime exceeded {TIMEOUT_DUR} seconds{bcolors.ENDC}") if num_fail == 0: print(f"{bcolors.OKGREEN}SUCCESS! All tests ran without failures{bcolors.ENDC}") else: - print(f"{bcolors.FAIL}Completed %d tests with %d failures{bcolors.ENDC}" % (len(ElfList), num_fail)) + print(f"{bcolors.FAIL}Completed {len(ElfList)} tests with {num_fail} failures{bcolors.ENDC}") diff --git a/bin/regression-wally b/bin/regression-wally index c77c278eb..60a40de48 100755 --- a/bin/regression-wally +++ b/bin/regression-wally @@ -264,21 +264,21 @@ class bcolors: UNDERLINE = '\033[4m' def addTests(tests, sim): - sim_logdir = WALLY+ "/sim/" + sim + "/logs/" + sim_logdir = f"{WALLY}/sim/{sim}/logs/" for test in tests: config = test[0] suites = test[1] if len(test) >= 3: - args = " --args " + " ".join(test[2]) + args = f" --args {test[2]}" else: args = "" if len(test) >= 4: gs = test[3] else: gs = "All tests ran without failures" - cmdPrefix="wsim --sim " + sim + " " + coverStr + " " + config + cmdPrefix=f"wsim --sim {sim} {coverStr} {config}" for t in suites: - sim_log = sim_logdir + config + "_" + t + ".log" + sim_log = f"{sim_logdir}{config}_{t}.log" if len(test) >= 5: grepfile = sim_logdir + test[4] else: @@ -286,7 +286,7 @@ def addTests(tests, sim): tc = TestCase( name=t, variant=config, - cmd=cmdPrefix + " " + t + args + " > " + sim_log, + cmd=f"{cmdPrefix} {t} {args} > {sim_log}", grepstr=gs, grepfile = grepfile) configs.append(tc) @@ -294,27 +294,27 @@ def addTests(tests, sim): def addTestsByDir(testDir, config, sim, lockstepMode=0): if os.path.isdir(testDir): - sim_logdir = WALLY+ "/sim/" + sim + "/logs/" + sim_logdir = f"{WALLY}/sim/{sim}/logs/" if coverStr == "--fcov": # use --fcov in place of --lockstep - cmdPrefix="wsim --sim " + sim + " " + coverStr + " " + config + cmdPrefix=f"wsim --sim {sim} {coverStr} {config}" gs="Mismatches : 0" if ("cvw-arch-verif/tests" in testDir and not "priv" in testDir): fileEnd = "ALL.elf" else: fileEnd = ".elf" elif coverStr == "--ccov": - cmdPrefix="wsim --sim " + sim + " " + coverStr + " " + config + cmdPrefix=f"wsim --sim {sim} {coverStr} {config}" gs="Single Elf file tests are not signatured verified." if ("cvw-arch-verif/tests" in testDir and not "priv" in testDir): fileEnd = "ALL.elf" else: fileEnd = ".elf" elif lockstepMode: - cmdPrefix="wsim --lockstep --sim " + sim + " " + config + cmdPrefix=f"wsim --lockstep --sim {sim} {config}" gs="Mismatches : 0" fileEnd = ".elf" else: - cmdPrefix="wsim --sim " + sim + " " + config + cmdPrefix=f"wsim --sim {sim} {config}" gs="Single Elf file tests are not signatured verified." fileEnd = ".elf" for dirpath, _, filenames in os.walk(os.path.abspath(testDir)): @@ -324,29 +324,29 @@ def addTestsByDir(testDir, config, sim, lockstepMode=0): fullfile = os.path.join(dirpath, file) fields = fullfile.rsplit('/', 3) if fields[2] == "ref": - shortelf = fields[1] + "_" + fields[3] + shortelf = f"{fields[1]}_{fields[3]}" else: - shortelf = fields[2] + "_" + fields[3] + shortelf = f"{fields[2]}_{fields[3]}" if shortelf in lockstepwaivers: # skip tests that itch bugs in ImperasDV print(f"{bcolors.WARNING}Skipping waived test {shortelf}{bcolors.ENDC}") continue - sim_log = sim_logdir + config + "_" + shortelf + ".log" + sim_log = f"{sim_logdir}{config}_{shortelf}.log" tc = TestCase( name=file, variant=config, - cmd=cmdPrefix + " " + fullfile + " > " + sim_log, + cmd=f"{cmdPrefix} {fullfile} > {sim_log}", grepstr=gs, grepfile = sim_log) configs.append(tc) else: - print("Error: Directory not found: " + testDir) + print(f"Error: Directory not found: {testDir}") sys.exit(1) def search_log_for_text(text, grepfile): """Search through the given log file for text, returning True if it is found or False if it is not""" - grepwarn = "grep -i -H Warning: " + grepfile + grepwarn = f"grep -i -H Warning: {grepfile}" os.system(grepwarn) - greperr = "grep -i -H Error: " + grepfile + greperr = f"grep -i -H Error: {grepfile}" os.system(greperr) grepcmd = f"grep -a -e '{text}' '{grepfile}' > /dev/null" return os.system(grepcmd) == 0 @@ -367,10 +367,10 @@ def run_test_case(config, dryrun: bool = False): os.system(cmd) if search_log_for_text(config.grepstr, grepfile): # Flush is needed to flush output to stdout when running in multiprocessing Pool - print(f"{bcolors.OKGREEN}%s: Success{bcolors.ENDC}" % (config.cmd), flush=True) + print(f"{bcolors.OKGREEN}{config.cmd}: Success{bcolors.ENDC}", flush=True) return 0 else: - print(f"{bcolors.FAIL}%s: Failures detected in output{bcolors.ENDC}" % (config.cmd), flush=True) + print(f"{bcolors.FAIL}{config.cmd}: Failures detected in output{bcolors.ENDC}", flush=True) print(f" Check {grepfile}", flush=True) return 1 @@ -418,9 +418,9 @@ configs = [ TestCase( name="lints", variant="all", - cmd="lint-wally " + nightMode + " | tee " + WALLY + "/sim/verilator/logs/all_lints.log", + cmd=f"lint-wally {nightMode} | tee {WALLY}/sim/verilator/logs/all_lints.log", grepstr="lints run with no errors or warnings", - grepfile = WALLY + "/sim/verilator/logs/all_lints.log") + grepfile = f"{WALLY}/sim/verilator/logs/all_lints.log") ] @@ -467,11 +467,11 @@ if (args.testfloat or args.nightly): # for nightly, run testfloat along with oth if "f_" in config: tests.remove("cvtfp") for test in tests: - sim_log = WALLY + "/sim/" + testfloatsim + "/logs/"+config+"_"+test+".log" + sim_log = f"{WALLY}/sim/{testfloatsim}/logs/{config}_{test}.log" tc = TestCase( name=test, variant=config, - cmd="wsim --tb testbench_fp --sim " + testfloatsim + " " + config + " " + test + " > " + sim_log, + cmd=f"wsim --tb testbench_fp --sim {testfloatsim} {config} {test} > {sim_log}", grepstr="All Tests completed with 0 errors", grepfile = sim_log) configs.append(tc) @@ -509,13 +509,13 @@ if (args.testfloat or args.nightly): # for nightly, run testfloat along with oth if "f_" in config: tests.remove("cvtfp") for test in tests: - sim_log = WALLY + "/sim/questa/logs/"+config+"_"+test+".log" + sim_log = f"{WALLY}/sim/{testfloatsim}/logs/{config}_{test}.log" tc = TestCase( name=test, variant=config, - cmd="wsim --tb testbench_fp " + config + " " + test + " > " + sim_log, + cmd=f"wsim --tb testbench_fp --sim {testfloatsim} {config} {test} > {sim_log}", grepstr="All Tests completed with 0 errors", - grepfile = WALLY + "/sim/questa/logs/"+config+"_"+test+".log") + grepfile = f"{WALLY}/sim/{testfloatsim}/logs/{config}_{test}.log") configs.append(tc) @@ -560,7 +560,7 @@ def main(): except MPTimeoutError: pool.terminate() num_fail+=1 - print(f"{bcolors.FAIL}%s: Timeout - runtime exceeded %d seconds{bcolors.ENDC}" % (config.cmd, TIMEOUT_DUR)) + print(f"{bcolors.FAIL}{config.cmd}: Timeout - runtime exceeded {TIMEOUT_DUR} seconds{bcolors.ENDC}") # Coverage report if args.ccov: @@ -569,7 +569,7 @@ def main(): os.system('make -C '+WALLY+'/addins/cvw-arch-verif merge') # Count the number of failures if num_fail: - print(f"{bcolors.FAIL}Regression failed with %s failed configurations{bcolors.ENDC}" % num_fail) + print(f"{bcolors.FAIL}Regression failed with {num_fail} failed configurations{bcolors.ENDC}") else: print(f"{bcolors.OKGREEN}SUCCESS! All tests ran without failures{bcolors.ENDC}") return num_fail