From 3168859268dfa125d79f35cabb9c1b86feaa363f Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Thu, 4 Jan 2024 22:00:43 -0500 Subject: [PATCH 01/20] updated install tool chain file to use verilator v5.016 --- bin/wally-tool-chain-install.sh | 13 +++++++------ tests/riscof/Makefile | 5 +++-- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/bin/wally-tool-chain-install.sh b/bin/wally-tool-chain-install.sh index ef2f9bcc7..5123836b0 100755 --- a/bin/wally-tool-chain-install.sh +++ b/bin/wally-tool-chain-install.sh @@ -39,7 +39,7 @@ set -e # break on error NUM_THREADS=8 # for >= 32GiB #NUM_THREADS=16 # for >= 64GiB -sudo mkdir -p $RISCV +#sudo mkdir -p $RISCV # *** need to update permissions to local user # Update and Upgrade tools (see https://itsfoss.com/apt-update-vs-upgrade/) @@ -47,13 +47,13 @@ sudo apt update -y sudo apt upgrade -y sudo apt install -y git gawk make texinfo bison flex build-essential python3 libz-dev libexpat-dev autoconf device-tree-compiler ninja-build libpixman-1-dev ncurses-base ncurses-bin libncurses5-dev dialog curl wget ftp libgmp-dev libglib2.0-dev python3-pip pkg-config opam z3 zlib1g-dev automake autotools-dev libmpc-dev libmpfr-dev gperf libtool patchutils bc # Other python libraries used through the book. -sudo pip3 install sphinx sphinx_rtd_theme matplotlib scipy scikit-learn adjustText lief +# sudo pip3 install sphinx sphinx_rtd_theme matplotlib scipy scikit-learn adjustText lief # needed for Ubuntu 22.04, gcc cross compiler expects python not python2 or python3. if ! command -v python &> /dev/null then echo "WARNING: python3 was installed as python3 rather than python. Creating symlink." - sudo ln -sf /usr/bin/python3 /usr/bin/python + sudo ln -sf /bin/python3 /usr/bin/python fi # gcc cross-compiler (https://github.com/riscv-collab/riscv-gnu-toolchain) @@ -71,8 +71,8 @@ cd riscv-gnu-toolchain # Temporarily use the following commands until gcc-13 is part of riscv-gnu-toolchain (issue #1249) #git clone https://github.com/gcc-mirror/gcc -b releases/gcc-13 gcc-13 #./configure --prefix=/opt/riscv --with-multilib-generator="rv32e-ilp32e--;rv32i-ilp32--;rv32im-ilp32--;rv32iac-ilp32--;rv32imac-ilp32--;rv32imafc-ilp32f--;rv32imafdc-ilp32d--;rv64i-lp64--;rv64ic-lp64--;rv64iac-lp64--;rv64imac-lp64--;rv64imafdc-lp64d--;rv64im-lp64--;" --with-gcc-src=`pwd`/gcc-13 -./configure --prefix=${RISCV} --with-multilib-generator="rv32e-ilp32e--;rv32i-ilp32--;rv32im-ilp32--;rv32iac-ilp32--;rv32imac-ilp32--;rv32imafc-ilp32f--;rv32imafdc-ilp32d--;rv64i-lp64--;rv64ic-lp64--;rv64iac-lp64--;rv64imac-lp64--;rv64imafdc-lp64d--;rv64im-lp64--;" -make -j ${NUM_THREADS} +#./configure --prefix=${RISCV} --with-multilib-generator="rv32e-ilp32e--;rv32i-ilp32--;rv32im-ilp32--;rv32iac-ilp32--;rv32imac-ilp32--;rv32imafc-ilp32f--;rv32imafdc-ilp32d--;rv64i-lp64--;rv64ic-lp64--;rv64iac-lp64--;rv64imac-lp64--;rv64imafdc-lp64d--;rv64im-lp64--;" +#make -j ${NUM_THREADS} # elf2hex (https://github.com/sifive/elf2hex) #The elf2hex utility to converts executable files into hexadecimal files for Verilog simulation. @@ -124,7 +124,8 @@ git clone https://github.com/verilator/verilator # Only first time unset VERILATOR_ROOT # For bash cd verilator git pull # Make sure git repository is up-to-date -git checkout master # Use development branch (e.g. recent bug fixes) +git checkout v5.016 # Use development branch (e.g. recent bug fixes) +#git checkout master # Use development branch (e.g. recent bug fixes) autoconf # Create ./configure script ./configure # Configure and create Makefile make -j ${NUM_THREADS} # Build Verilator itself (if error, try just 'make') diff --git a/tests/riscof/Makefile b/tests/riscof/Makefile index a9855d41f..1a55f953f 100644 --- a/tests/riscof/Makefile +++ b/tests/riscof/Makefile @@ -8,8 +8,9 @@ wally_workdir = $(work)/wally-riscv-arch-test current_dir = $(shell pwd) #XLEN ?= 64 -all: root arch32 wally32 arch32e arch64 wally64 -wally-riscv-arch-test: root wally32 wally64 +#all: root arch32 wally32 arch32e arch64 wally64 +#wally-riscv-arch-test: root wally32 wally64 +all: root wally32 wally64 root: mkdir -p $(work_dir) From 981c3ccf6b4775505c6356ff3e9bd60f7f0ba5f4 Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Thu, 4 Jan 2024 22:01:52 -0500 Subject: [PATCH 02/20] updated gitignore file --- .gitignore | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.gitignore b/.gitignore index 1664b939f..ae362d3ca 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,14 @@ **/work* **/wally_*.log +# files for my own repo +setup_host.sh +tests/riscof/Makefile +bin/wally-tool-chain-install.sh +sim/coverage_output.txt +sim/regression_output.txt + + .nfs* __pycache__/ From 8d9f52061c5cee3f3e561e2479db442e185c67e3 Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Wed, 31 Jan 2024 17:43:46 -0600 Subject: [PATCH 03/20] adding nightly tests --- bin/nightly_build/nightly_build_v13_beta.sh | 85 ++++++++++++++++ bin/nightly_build/src/error_detector.py | 37 +++++++ bin/nightly_build/src/parse_coverage.py | 95 ++++++++++++++++++ bin/nightly_build/src/parse_regression.py | 102 ++++++++++++++++++++ bin/nightly_build/src/send_mail_html.sh | 79 +++++++++++++++ 5 files changed, 398 insertions(+) create mode 100755 bin/nightly_build/nightly_build_v13_beta.sh create mode 100644 bin/nightly_build/src/error_detector.py create mode 100644 bin/nightly_build/src/parse_coverage.py create mode 100644 bin/nightly_build/src/parse_regression.py create mode 100755 bin/nightly_build/src/send_mail_html.sh diff --git a/bin/nightly_build/nightly_build_v13_beta.sh b/bin/nightly_build/nightly_build_v13_beta.sh new file mode 100755 index 000000000..2e0ca42d4 --- /dev/null +++ b/bin/nightly_build/nightly_build_v13_beta.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# set WALLY path +WALLY=$(dirname ${BASH_SOURCE[0]:-$0}) +export WALLY=$(cd "$WALLY/../../" && pwd) +echo "WALLY is set to: $WALLY" + +# Going to nightly runs +cd $WALLY/../ + +# check if directories exist +if [ ! -d "build-results" ]; then + echo "Directory does not exist, creating it..." + mkdir -p "build-results" + if [ $? -eq 0 ]; then + echo "Directory created successfully." + else + echo "Failed to create directory." + exit 1 + fi +else + echo "Directory already exists." +fi + +if [ ! -d "logs" ]; then + echo "Directory does not exist, creating it..." + mkdir -p "logs" + if [ $? -eq 0 ]; then + echo "Directory created successfully." + else + echo "Failed to create directory." + exit 1 + fi +else + echo "Directory already exists." +fi + +# setup source okstate file +echo "Sourcing setup files" +source $WALLY/setup_host.sh +source $WALLY/../setup-files/setup_tools.sh + +# Navigate to the gir repo +cd $WALLY + +# pull the repository +echo "Pulling submodules" +#git pull --recurse-submodules origin main + +# build the regression tests +echo "Building the regression tests" +cd sim +#if make wally-riscv-arch-test; then +#if make all; then +# echo "Make successfull" +#else +# echo "Make failed" +# cd $WALLY/.. + # add the the regression result and the coverage result that there was an error in making the tests +# python $WALLY/bin/nightly_build/src/error_detector.py --tag make -o $WALLY/../build-results/regression_results.md +# python $WALLY/bin/nightly_build/src/error_detector.py --tag make -o $WALLY/../build-results/coverage_results.md + + # exit the program + #exit 1 +#fi + +# execute the simulation / regression tests and save output to a file +echo "running the regression test" +#./regression-wally > $WALLY/../logs/regression_output.log 2>&1 + +echo "running coverage tests" +#./coverage > $WALLY/../logs/coverage_output.log 2>&1 + + +# run the Python script to parse the output and generate the log file +echo "Parsing output data from the regression test" +cd $WALLY/../ + +python $WALLY/bin/nightly_build/src/parse_regression.py -i $WALLY/../logs/regression_output.log -o $WALLY/../build-results/regression_results.md + +python $WALLY/bin/nightly_build/src/parse_coverage.py -i $WALLY/../logs/coverage_output.log -o $WALLY/../build-results/coverage_results.md + +# email update +cd $WALLY/bin/nightly_build/src/ +./send_mail_html.sh diff --git a/bin/nightly_build/src/error_detector.py b/bin/nightly_build/src/error_detector.py new file mode 100644 index 000000000..c239032e9 --- /dev/null +++ b/bin/nightly_build/src/error_detector.py @@ -0,0 +1,37 @@ +import argparse +import datetime + +def add_failure_to_markdown(tag, output_file, input_file = None): + # Get the current date and time + current_datetime = datetime.datetime.now() + formatted_datetime = current_datetime.strftime("%Y-%m-%d %H:%M:%S") + + # Create the failure message based on the provided tag and input content + if tag == "make": + failure_message = f"# {tag.capitalize()} riscof comilation failure - {formatted_datetime}\n\n" + failure_message += f"The error was due to a problem in compiling the the riscof tests:\n\n" + if input_file != None: + failure_message += f"The particular error: {input_file}\n\n" + else: + failure_message = f"# {tag.capitalize()} Failure - {formatted_datetime}\n\n" + failure_message += f":\n\n" + + # Append the failure message to the specified output file + with open(output_file, "a") as file: + file.write(failure_message) + + print(f"Failure information added to {output_file}.") + +if __name__ == "__main__": + # Set up argparse + parser = argparse.ArgumentParser(description="Add failure information to Markdown file.") + parser.add_argument("--tag", required=True, help="Specify the tag for the failure type (e.g., 'make', 'custom').") + parser.add_argument("-i", required=False, help="Specify the input file containing failure details.") + parser.add_argument("-o", required=True, help="Specify the output file to write the failure information.") + + # Parse command-line arguments + args = parser.parse_args() + + # Call the function with the specified tag, input file, and output file + add_failure_to_markdown(args.tag, args.o) + diff --git a/bin/nightly_build/src/parse_coverage.py b/bin/nightly_build/src/parse_coverage.py new file mode 100644 index 000000000..33811b0a1 --- /dev/null +++ b/bin/nightly_build/src/parse_coverage.py @@ -0,0 +1,95 @@ +import os +import argparse +from datetime import datetime +import re +from colorama import init, Fore, Style + +def parse_regression_output(output): + passed_configs = [] + failed_configs = [] + + lines = output.split('\n') + index = 0 + + while index < len(lines): + # Remove ANSI escape codes + line = re.sub(r'\x1b\[[0-9;]*[mGK]', '', lines[index]) + if "Success" in line: + passed_configs.append(line.split(':')[0].strip()) + elif "Failures detected in output" in line: + try: + config_name = line.split(':')[0].strip() + log_file = os.path.abspath(config_name+".log") + failed_configs.append((config_name, log_file)) + except: + failed_configs.append((config_name, "Log file not found")) + + index += 1 + + # alphabetically sort the configurations + passed_configs.sort() + failed_configs.sort() + return passed_configs, failed_configs + +def write_to_markdown(passed_configs, failed_configs, output_file): + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + with open(output_file, 'a') as md_file: + md_file.write(f"\n\n# Coverage Test Results - {timestamp}\n\n") + + md_file.write("\n## Failed Configurations\n") + for config, log_file in failed_configs: + md_file.write(f"- {config} ({log_file})\n") + if len(failed_configs) == 0: + md_file.write(" - no failures\n") + + md_file.write("\n## Passed Configurations\n") + for config in passed_configs: + md_file.write(f"- {config}\n") + + +def write_new_markdown(passed_configs, failed_configs): + timestamp = datetime.now().strftime("%Y-%m-%d") + output_file = f"/home/thkidd/nightly_runs/build-results/builds/coverage/wally_coverage_{timestamp}.md" + + with open(output_file, 'w') as md_file: + # Title + md_file.write(f"\n\n# Coverage Test Results - {timestamp}\n\n") + + ## File path + md_file.write(f"\n**File:** {output_file}\n") + + md_file.write("\n## Failed Configurations\n") + # add in if there were no failures + if len(failed_configs) == 0: + md_file.write(f"No Failures\n") + + for config, log_file in failed_configs: + md_file.write(f"- {config} ({log_file})\n") + + md_file.write("\n## Passed Configurations\n") + for config in passed_configs: + md_file.write(f"- {config}\n") + + + + +if __name__ == "__main__": + init(autoreset=True) # Initialize colorama + parser = argparse.ArgumentParser(description='Parse regression test output and append to a markdown file.') + parser.add_argument('-i', '--input', help='Input file containing regression test output', required=True) + parser.add_argument('-o', '--output', help='Output markdown file', default='regression_results.md') + args = parser.parse_args() + + with open(args.input, 'r') as input_file: + regression_output = input_file.read() + + passed_configs, failed_configs = parse_regression_output(regression_output) + write_to_markdown(passed_configs, failed_configs, args.output) + + print(f"Markdown file updated: {args.output}") + + write_new_markdown(passed_configs, failed_configs) + + print("New markdown file created") + diff --git a/bin/nightly_build/src/parse_regression.py b/bin/nightly_build/src/parse_regression.py new file mode 100644 index 000000000..0d8c9467d --- /dev/null +++ b/bin/nightly_build/src/parse_regression.py @@ -0,0 +1,102 @@ +import argparse +import os +from datetime import datetime +import re +from colorama import init, Fore, Style + +def parse_regression_output(output): + passed_configs = [] + failed_configs = [] + + lines = output.split('\n') + index = 0 + + while index < len(lines): + # Remove ANSI escape codes + line = re.sub(r'\x1b\[[0-9;]*[mGK]', '', lines[index]) + #print("The cleaned line: ", line) + if "Success" in line: + passed_configs.append(line.split(':')[0].strip()) + elif "Failures detected in output" in line: + try: + config_name = line.split(':')[0].strip() + log_file = os.path.abspath(config_name+".log") + failed_configs.append((config_name, log_file)) + except: + failed_configs.append((config_name, "Log file not found")) + elif "Timeout" in line: + try: + config_name = line.split(':')[0].strip() + log_file = os.path.abspath(config_name+".log") + failed_configs.append((config_name, log_file)) + except: + failed_configs.append((config_name, "Log file not found")) + index += 1 + + # alphabetically sort the configurations + passed_configs.sort() + failed_configs.sort() + return passed_configs, failed_configs + +def write_to_markdown(passed_configs, failed_configs, output_file): + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + with open(output_file, 'a') as md_file: + md_file.write(f"\n\n
\n# Regression Test Results - {timestamp}\n
\n\n") + #md_file.write(f"\n\n# Regression Test Results - {timestamp}\n\n") + + if failed_configs: + md_file.write("## Failed Configurations\n") + for config, log_file in failed_configs: + md_file.write(f"- {config} ({log_file})\n") + md_file.write("\n") + else: + md_file.write("## No Failed Configurations\n") + + md_file.write("\n## Passed Configurations\n") + for config in passed_configs: + md_file.write(f"- {config}\n") + +def write_new_markdown(passed_configs, failed_configs): + timestamp = datetime.now().strftime("%Y-%m-%d") + output_file = f"/home/thkidd/nightly_runs/build-results/builds/regression/wally_regression_{timestamp}.md" + with open(output_file, 'w') as md_file: + + # Title + md_file.write(f"\n\n# Regression Test Results - {timestamp}\n\n") + #md_file.write(f"\n\n
\n# Regression Test Results - {timestamp}\n
\n\n") + + # File Path + md_file.write(f"\n**File:** {output_file}\n\n") + + if failed_configs: + md_file.write("## Failed Configurations\n\n") + for config, log_file in failed_configs: + md_file.write(f"- {config} ({log_file})\n") + md_file.write("\n") + else: + md_file.write("## Failed Configurations\n") + md_file.write(f"No Failures\n") + + md_file.write("\n## Passed Configurations\n") + for config in passed_configs: + md_file.write(f"- {config}\n") + +if __name__ == "__main__": + init(autoreset=True) # Initialize colorama + parser = argparse.ArgumentParser(description='Parse regression test output and append to a markdown file.') + parser.add_argument('-i', '--input', help='Input file containing regression test output', required=True) + parser.add_argument('-o', '--output', help='Output markdown file containing formatted file', default='regression_results.md') + args = parser.parse_args() + + with open(args.input, 'r') as input_file: + regression_output = input_file.read() + + passed_configs, failed_configs = parse_regression_output(regression_output) + write_to_markdown(passed_configs, failed_configs, args.output) + + print(f"Markdown file updated: {args.output}") + + write_new_markdown(passed_configs, failed_configs) + + print("New markdown file created") + diff --git a/bin/nightly_build/src/send_mail_html.sh b/bin/nightly_build/src/send_mail_html.sh new file mode 100755 index 000000000..5c3735a3a --- /dev/null +++ b/bin/nightly_build/src/send_mail_html.sh @@ -0,0 +1,79 @@ +current_date=$(date "+%Y-%m-%d") +email_address="thomas.kidd@okstate.edu" +#email_address="WALLY-REGRESSION@LISTSERV.OKSTATE.EDU" +subject="WALLY regression and coverage test report" +attachments="" +html_body="" +host_name=$(hostname) +os_info=$(lsb_release -a 2>/dev/null) +script_location=$WALLY/bin/nightly_build/ + +html_body="
+

System Information

+

Server Name: $host_name@okstate.edu

+

Operating System: $os_info

+

Script Origin: $script_location

+
+ +

Testing sending HTML content through mutt

" + +# Iterate through the files and concatenate their content to the body +for file in $WALLY/../build-results/builds/*/wally_*_"$current_date"*.md; do + attachments+=" -a $file" + + # Convert Markdown to HTML using pandoc + html_content=$(pandoc "$file") + + # add the file full path + # html_body+="

File: $file

" + # Append the HTML content to the body + html_body+="$html_content" +done +echo "Sending email" + +# Get server hostname and OS information +host_name=$(hostname) +os_info=$(uname -a) + +# Define HTML body content + +# Use mutt to send the email with HTML body +#mutt -e "my_hdr From:James Stine " -s "$subject" $attachments \ +mutt -e "my_hdr From:Thomas Kidd " -s "$subject" \ + -e "set content_type=text/html" -- $email_address < + + + + Nightly Build Results - $current_date + + + +

Test Results - $current_date

+ + $html_body + + +EOF + From 942a2804c33a28cf31171f4a203144a91f50fb15 Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Wed, 31 Jan 2024 17:48:03 -0600 Subject: [PATCH 04/20] reverted the verilator checkout to checkout master --- bin/wally-tool-chain-install.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/wally-tool-chain-install.sh b/bin/wally-tool-chain-install.sh index 473bc074b..79a556571 100755 --- a/bin/wally-tool-chain-install.sh +++ b/bin/wally-tool-chain-install.sh @@ -125,8 +125,7 @@ git clone https://github.com/verilator/verilator # Only first time unset VERILATOR_ROOT # For bash cd verilator git pull # Make sure git repository is up-to-date -git checkout v5.016 # Use development branch (e.g. recent bug fixes) -#git checkout master # Use development branch (e.g. recent bug fixes) +git checkout master # Use development branch (e.g. recent bug fixes) autoconf # Create ./configure script ./configure # Configure and create Makefile make -j ${NUM_THREADS} # Build Verilator itself (if error, try just 'make') From 85f214a02501b11aa5e69509b27e4684665b2e1d Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Thu, 1 Feb 2024 09:07:14 -0600 Subject: [PATCH 05/20] Revert "reverted the verilator checkout to checkout master" This reverts commit 942a2804c33a28cf31171f4a203144a91f50fb15. --- bin/wally-tool-chain-install.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bin/wally-tool-chain-install.sh b/bin/wally-tool-chain-install.sh index 79a556571..473bc074b 100755 --- a/bin/wally-tool-chain-install.sh +++ b/bin/wally-tool-chain-install.sh @@ -125,7 +125,8 @@ git clone https://github.com/verilator/verilator # Only first time unset VERILATOR_ROOT # For bash cd verilator git pull # Make sure git repository is up-to-date -git checkout master # Use development branch (e.g. recent bug fixes) +git checkout v5.016 # Use development branch (e.g. recent bug fixes) +#git checkout master # Use development branch (e.g. recent bug fixes) autoconf # Create ./configure script ./configure # Configure and create Makefile make -j ${NUM_THREADS} # Build Verilator itself (if error, try just 'make') From 590f53f5a55c97aba93bae9e649382ed005e68c4 Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Thu, 1 Feb 2024 09:09:12 -0600 Subject: [PATCH 06/20] Revert "updated gitignore file" This reverts commit 981c3ccf6b4775505c6356ff3e9bd60f7f0ba5f4. --- .gitignore | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.gitignore b/.gitignore index 66da1691c..a01f1c07d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,14 +1,6 @@ **/work* **/wally_*.log -# files for my own repo -setup_host.sh -tests/riscof/Makefile -bin/wally-tool-chain-install.sh -sim/coverage_output.txt -sim/regression_output.txt - - .nfs* __pycache__/ From d2e606b41bd35d037cfa840ab271db0d6ce3f33e Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Thu, 1 Feb 2024 09:23:15 -0600 Subject: [PATCH 07/20] Revert "updated install tool chain file to use verilator v5.016" This reverts commit 3168859268dfa125d79f35cabb9c1b86feaa363f. --- bin/wally-tool-chain-install.sh | 13 ++++++------- tests/riscof/Makefile | 5 ++--- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/bin/wally-tool-chain-install.sh b/bin/wally-tool-chain-install.sh index 473bc074b..ca8a28c05 100755 --- a/bin/wally-tool-chain-install.sh +++ b/bin/wally-tool-chain-install.sh @@ -40,7 +40,7 @@ set -e # break on error NUM_THREADS=8 # for >= 32GiB #NUM_THREADS=16 # for >= 64GiB -#sudo mkdir -p $RISCV +sudo mkdir -p $RISCV # *** need to update permissions to local user # Update and Upgrade tools (see https://itsfoss.com/apt-update-vs-upgrade/) @@ -48,13 +48,13 @@ sudo apt update -y sudo apt upgrade -y sudo apt install -y git gawk make texinfo bison flex build-essential python3 libz-dev libexpat-dev autoconf device-tree-compiler ninja-build libpixman-1-dev ncurses-base ncurses-bin libncurses5-dev dialog curl wget ftp libgmp-dev libglib2.0-dev python3-pip pkg-config opam z3 zlib1g-dev automake autotools-dev libmpc-dev libmpfr-dev gperf libtool patchutils bc # Other python libraries used through the book. -# sudo pip3 install sphinx sphinx_rtd_theme matplotlib scipy scikit-learn adjustText lief +sudo pip3 install sphinx sphinx_rtd_theme matplotlib scipy scikit-learn adjustText lief # needed for Ubuntu 22.04, gcc cross compiler expects python not python2 or python3. if ! command -v python &> /dev/null then echo "WARNING: python3 was installed as python3 rather than python. Creating symlink." - sudo ln -sf /bin/python3 /usr/bin/python + sudo ln -sf /usr/bin/python3 /usr/bin/python fi # gcc cross-compiler (https://github.com/riscv-collab/riscv-gnu-toolchain) @@ -72,8 +72,8 @@ cd riscv-gnu-toolchain # Temporarily use the following commands until gcc-13 is part of riscv-gnu-toolchain (issue #1249) #git clone https://github.com/gcc-mirror/gcc -b releases/gcc-13 gcc-13 #./configure --prefix=/opt/riscv --with-multilib-generator="rv32e-ilp32e--;rv32i-ilp32--;rv32im-ilp32--;rv32iac-ilp32--;rv32imac-ilp32--;rv32imafc-ilp32f--;rv32imafdc-ilp32d--;rv64i-lp64--;rv64ic-lp64--;rv64iac-lp64--;rv64imac-lp64--;rv64imafdc-lp64d--;rv64im-lp64--;" --with-gcc-src=`pwd`/gcc-13 -#./configure --prefix=${RISCV} --with-multilib-generator="rv32e-ilp32e--;rv32i-ilp32--;rv32im-ilp32--;rv32iac-ilp32--;rv32imac-ilp32--;rv32imafc-ilp32f--;rv32imafdc-ilp32d--;rv64i-lp64--;rv64ic-lp64--;rv64iac-lp64--;rv64imac-lp64--;rv64imafdc-lp64d--;rv64im-lp64--;" -#make -j ${NUM_THREADS} +./configure --prefix=${RISCV} --with-multilib-generator="rv32e-ilp32e--;rv32i-ilp32--;rv32im-ilp32--;rv32iac-ilp32--;rv32imac-ilp32--;rv32imafc-ilp32f--;rv32imafdc-ilp32d--;rv64i-lp64--;rv64ic-lp64--;rv64iac-lp64--;rv64imac-lp64--;rv64imafdc-lp64d--;rv64im-lp64--;" +make -j ${NUM_THREADS} # elf2hex (https://github.com/sifive/elf2hex) #The elf2hex utility to converts executable files into hexadecimal files for Verilog simulation. @@ -125,8 +125,7 @@ git clone https://github.com/verilator/verilator # Only first time unset VERILATOR_ROOT # For bash cd verilator git pull # Make sure git repository is up-to-date -git checkout v5.016 # Use development branch (e.g. recent bug fixes) -#git checkout master # Use development branch (e.g. recent bug fixes) +git checkout master # Use development branch (e.g. recent bug fixes) autoconf # Create ./configure script ./configure # Configure and create Makefile make -j ${NUM_THREADS} # Build Verilator itself (if error, try just 'make') diff --git a/tests/riscof/Makefile b/tests/riscof/Makefile index 1a55f953f..a9855d41f 100644 --- a/tests/riscof/Makefile +++ b/tests/riscof/Makefile @@ -8,9 +8,8 @@ wally_workdir = $(work)/wally-riscv-arch-test current_dir = $(shell pwd) #XLEN ?= 64 -#all: root arch32 wally32 arch32e arch64 wally64 -#wally-riscv-arch-test: root wally32 wally64 -all: root wally32 wally64 +all: root arch32 wally32 arch32e arch64 wally64 +wally-riscv-arch-test: root wally32 wally64 root: mkdir -p $(work_dir) From 78db8c584a334cad9b84f862f436d3867c1e7b68 Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Thu, 1 Feb 2024 09:31:41 -0600 Subject: [PATCH 08/20] Revert "updated gitignore file" This reverts commit 981c3ccf6b4775505c6356ff3e9bd60f7f0ba5f4. --- bin/wally-tool-chain-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/wally-tool-chain-install.sh b/bin/wally-tool-chain-install.sh index ca8a28c05..6e7e4c8e9 100755 --- a/bin/wally-tool-chain-install.sh +++ b/bin/wally-tool-chain-install.sh @@ -125,7 +125,7 @@ git clone https://github.com/verilator/verilator # Only first time unset VERILATOR_ROOT # For bash cd verilator git pull # Make sure git repository is up-to-date -git checkout master # Use development branch (e.g. recent bug fixes) +git checkout master autoconf # Create ./configure script ./configure # Configure and create Makefile make -j ${NUM_THREADS} # Build Verilator itself (if error, try just 'make') From 190ef91751e3e2e9aa2eb2db2675c2ed9321b217 Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Mon, 12 Feb 2024 18:21:04 -0600 Subject: [PATCH 09/20] Replacing nightly_build folder with nightly_build.py and bash wrapper script --- bin/nightly_build.py | 693 ++++++++++++++++++++ bin/nightly_build/nightly_build_v13_beta.sh | 85 --- bin/nightly_build/src/error_detector.py | 37 -- bin/nightly_build/src/parse_coverage.py | 95 --- bin/nightly_build/src/parse_regression.py | 102 --- bin/nightly_build/src/send_mail_html.sh | 79 --- bin/wrapper_nightly_runs.sh | 28 + 7 files changed, 721 insertions(+), 398 deletions(-) create mode 100755 bin/nightly_build.py delete mode 100755 bin/nightly_build/nightly_build_v13_beta.sh delete mode 100644 bin/nightly_build/src/error_detector.py delete mode 100644 bin/nightly_build/src/parse_coverage.py delete mode 100644 bin/nightly_build/src/parse_regression.py delete mode 100755 bin/nightly_build/src/send_mail_html.sh create mode 100755 bin/wrapper_nightly_runs.sh diff --git a/bin/nightly_build.py b/bin/nightly_build.py new file mode 100755 index 000000000..21d0ce025 --- /dev/null +++ b/bin/nightly_build.py @@ -0,0 +1,693 @@ +#!/usr/bin/python3 +""" +Python Regression Build Automation Script + +This Python script serves the purpose of automating nightly regression builds for a software project. +The script is designed to handle the setup, execution, and reporting aspects of the regression testing process. + +Features: + + 1. Nightly Regression Builds: The script is scheduled to run on a nightly basis, making and executing the regression builds. + + 2. Markdown Report Generation: Upon completion of the regression tests, the script generates detailed reports in Markdown format. + These reports provide comprehensive insights into the test results, including test cases executed, pass/fail status, and any encountered issues. + + 3. Email Notification: The script is configured to send out email notifications summarizing the regression test results. + These emails serve as communication channels for stakeholders, providing them with timely updates on the software's regression status. + +Usage: + +- The script is designed to be scheduled and executed automatically on a nightly basis using task scheduling tools such as Cronjobs. To create a cronjob do the following: + 1) Open Terminal: + + Open your terminal application. This is where you'll enter the commands to create and manage cron jobs. + + 2) Access the Cron Table: + + Type the following command and press Enter: + + crontab -e + + This command opens the crontab file in your default text editor. If it's your first time, you might be prompted to choose a text editor. + + 3) Edit the Cron Table: + The crontab file will open in your text editor. Each line in this file represents a cron job. You can now add your new cron job. + + 4) Syntax: + + Our cron job has the following syntax: + 0 3 * * * BASH_ENV=~/.bashrc bash -l -c "*WHERE YOUR CVW IS MUST PUT FULL PATH*/cvw/bin/wrapper_nightly_runs.sh > *WHERE YOU WANT TO STORE LOG FILES/cron.log 2>&1" + + This cronjob sources the .bashrc file and executes the wrapper script as a user. + + 5) Double check: + + Execute the following command to see your cronjobs: + + crontab -l + +Dependencies: + Python: + - os + - shutil + - datetime from datetime + - re + - markdown + - subprocess + + Bash: + - mutt (email sender) + +Conclusion: + +In summary, this Python script facilitates the automation of nightly regression builds, providing comprehensive reporting and email notification capabilities to ensure effective communication and monitoring of regression test results. +""" + +import os +import shutil +from datetime import datetime +import re +import markdown +import subprocess + + + +class FolderManager: + """A class for managing folders and repository cloning.""" + + def __init__(self): + """ + Initialize the FolderManager instance. + + Args: + base_dir (str): The base directory where folders will be managed and repository will be cloned. + """ + env_extract_var = 'WALLY' + print(f"The environemntal variable is {env_extract_var}") + self.base_dir = os.environ.get(env_extract_var) + print(f"The base directory is: {self.base_dir}") + self.base_parent_dir = os.path.dirname(self.base_dir) + + # print(f"The new WALLY vairable is: {os.environ.get('WALLY')}") + # print(f"The Base Directory is now : {self.base_dir}") + # print(f"The Base Parent Directory is now : {self.base_parent_dir}") + + def create_preliminary_folders(self, folders): + """ + Create preliminary folders if they do not exist. + These folders are: + nightly_runs/repos/ + nightly_runs/results/ + + Args: + folders (list): A list of folder names to be created. + + Returns: + None + """ + + for folder in folders: + folder_path = os.path.join(self.base_parent_dir, folder) + if not os.path.exists(folder_path): + os.makedirs(folder_path) + + def create_new_folder(self, folders): + """ + Create a new folder based on the current date if it does not already exist. + + Args: + folder_name (str): The base name for the new folder. + + Returns: + str: The path of the newly created folder if created, None otherwise. + """ + + todays_date = datetime.now().strftime("%Y-%m-%d") + return_folder_path = [] + for folder in folders: + folder_path = os.path.join(self.base_parent_dir, folder, todays_date) + if not os.path.exists(folder_path): + os.makedirs(folder_path) + return_folder_path.append(folder_path) + else: + return_folder_path.append(None) # Folder already exists + + return return_folder_path + + def clone_repository(self, folder, repo_url): + """ + Clone a repository into the 'cvw' folder if it does not already exist. + + Args: + repo_url (str): The URL of the repository to be cloned. + + Returns: + None + """ + todays_date = datetime.now().strftime("%Y-%m-%d") + repo_folder = os.path.join(self.base_parent_dir, folder, todays_date, 'cvw') + tmp_folder = os.path.join(repo_folder, "tmp") # temprorary files will be stored in here + + if not os.path.exists(repo_folder): + os.makedirs(repo_folder) + os.system(f"git clone --recurse-submodules {repo_url} {repo_folder}") + os.makedirs(tmp_folder) + + +class TestRunner: + """A class for making, running, and formatting test results.""" + + def __init__(self): + self.base_dir = os.environ.get('WALLY') + self.base_parent_dir = os.path.dirname(self.base_dir) + self.current_datetime = datetime.now() + #self.temp_dir = self.base_parent_dir + #print(f"Base Directory: {self.base_parent_dir}") + + def copy_setup_script(self, folder): + """ + Copy the setup script to the destination folder. + + The setup script will be copied from the base directory to a specific folder structure inside the base directory. + + Args: + folder: the "nightly_runs/repos/" + + Returns: + bool: True if the script is copied successfully, False otherwise. + """ + # Get today's date in YYYY-MM-DD format + todays_date = datetime.now().strftime("%Y-%m-%d") + + # Define the source and destination paths + source_script = os.path.join(self.base_dir, "setup_host.sh") + destination_folder = os.path.join(self.base_parent_dir, folder, todays_date, 'cvw') + + # Check if the source script exists + if not os.path.exists(source_script): + print(f"Error: Source script '{source_script}' not found.") + return False + + + # Check if the destination folder exists, create it if necessary + if not os.path.exists(destination_folder): + print(f"Error: Destination folder '{destination_folder}' not found.") + return False + + # Copy the script to the destination folder + try: + shutil.copy(source_script, destination_folder) + #print(f"Setup script copied to: {destination_folder}") + return True + except Exception as e: + print(f"Error copying setup script: {e}") + return False + + + def set_env_var(self, folder): + """ + Source a shell script. + + Args: + script_path (str): Path to the script to be sourced. + + Returns: + None + """ + # find the new repository made + todays_date = datetime.now().strftime("%Y-%m-%d") + wally_path = os.path.join(self.base_parent_dir, folder, todays_date, 'cvw') + + # set the WALLY environmental variable to the new repository + os.environ["WALLY"] = wally_path + + self.base_dir = os.environ.get('WALLY') + self.base_parent_dir = os.path.dirname(self.base_dir) + self.temp_dir = self.base_parent_dir + + # print(f"The new WALLY vairable is: {os.environ.get('WALLY')}") + # print(f"The Base Directory is now : {self.base_dir}") + # print(f"The Base Parent Directory is now : {self.base_parent_dir}") + + def execute_makefile(self, target=None): + """ + Execute a Makefile with optional target. + + Args: + makefile_path (str): Path to the Makefile. + target (str, optional): Target to execute in the Makefile. + + Returns: + True if the tests were made + False if the tests didnt pass + """ + # Prepare the command to execute the Makefile + make_file_path = os.path.join(self.base_dir, "sim") + os.chdir(make_file_path) + + output_file = os.path.join(self.base_dir, "tmp", "make_output.log") + + command = ["make"] + + # Add target to the command if specified + if target: + command.append(target) + #print(f"The command is: {command}") + + # Execute the command using subprocess and save the output into a file + with open(output_file, "w") as f: + formatted_datetime = self.current_datetime.strftime("%Y-%m-%d %H:%M:%S") + f.write(formatted_datetime) + f.write("\n\n") + result = subprocess.run(command, stdout=f, stderr=subprocess.STDOUT, text=True) + + # Execute the command using a subprocess and not save the output + #result = subprocess.run(command, text=True) + + # Check the result + if result.returncode == 0: + #print(f"Makefile executed successfully{' with target ' + target if target else ''}.") + return True + else: + #print("Error executing Makefile.") + return False + + def run_tests(self, test_type=None, test_name=None, test_exctention=None): + """ + Run a script through the terminal and save the output to a file. + + Args: + test_name (str): The test name will allow the function to know what test to execute in the sim directory + test_type (str): The type such as python, bash, etc + Returns: + True and the output file location + """ + + # Prepare the function to execute the simulation + test_file_path = os.path.join(self.base_dir, "sim") + + output_file = os.path.join(self.base_dir, "tmp", f"{test_name}-output.log") + os.chdir(test_file_path) + + if test_exctention: + command = [test_type, test_name, test_exctention] + else: + command = [test_type, test_name] + + # Execute the command using subprocess and save the output into a file + with open(output_file, "w") as f: + formatted_datetime = self.current_datetime.strftime("%Y-%m-%d %H:%M:%S") + f.write(formatted_datetime) + f.write("\n\n") + result = subprocess.run(command, stdout=f, stderr=subprocess.STDOUT, text=True) + + # Check if the command executed successfully + if result.returncode or result.returncode == 0: + return True, output_file + else: + print("Error:", result.returncode) + return False, output_file + + + def clean_format_output(self, input_file, output_file=None): + """ + Clean and format the output from tests. + + Args: + input_file (str): Path to the input file with raw test results. + output_file (str): Path to the file where cleaned and formatted output will be saved. + + Returns: + None + """ + # Implement cleaning and formatting logic here + + # Open up the file with only read permissions + with open(input_file, 'r') as input_file: + unlceaned_output = input_file.read() + + # use something like this function to detect pass and fail + passed_configs = [] + failed_configs = [] + + lines = unlceaned_output.split('\n') + index = 0 + + while index < len(lines): + # Remove ANSI escape codes + line = re.sub(r'\x1b\[[0-9;]*[mGK]', '', lines[index]) + #print(line) + if "Success" in line: + passed_configs.append(line.split(':')[0].strip()) + elif "passed lint" in line: + #print(line) + passed_configs.append(line.split(' ')[0].strip()) + #passed_configs.append(line) # potentially use a space + elif "failed lint" in line: + failed_configs.append(line.split(' ')[0].strip(), "no log file") + #failed_configs.append(line) + + elif "Failures detected in output" in line: + try: + config_name = line.split(':')[0].strip() + log_file = os.path.abspath("logs/"+config_name+".log") + #print(f"The log file saving to: {log_file} in the current working directory: {os.getcwd()}") + failed_configs.append((config_name, log_file)) + except: + failed_configs.append((config_name, "Log file not found")) + + + index += 1 + + # alphabetically sort the configurations + if len(passed_configs) != 0: + passed_configs.sort() + + if len(failed_configs) != 0: + failed_configs.sort() + #print(f"The passed configs are: {passed_configs}") + #print(f"The failed configs are {failed_configs}") + return passed_configs, failed_configs + + def rewrite_to_markdown(self, test_name, passed_configs, failed_configs): + """ + Rewrite test results to markdown format. + + Args: + input_file (str): Path to the input file with cleaned and formatted output. + markdown_file (str): Path to the markdown file where test results will be saved. + + Returns: + None + """ + # Implement markdown rewriting logic here + timestamp = datetime.now().strftime("%Y-%m-%d") + + output_directory = os.path.join(self.base_parent_dir, "../../results", timestamp) + os.chdir(output_directory) + current_directory = os.getcwd() + output_file = os.path.join(current_directory, f"{test_name}.md") + #print("Current directory:", current_directory) + #print("Output File:", output_file) + + with open(output_file, 'w') as md_file: + + # Title + md_file.write(f"\n\n# Regression Test Results - {timestamp}\n\n") + #md_file.write(f"\n\n
\n# Regression Test Results - {timestamp}\n
\n\n") + + # File Path + md_file.write(f"\n**File:** {output_file}\n\n") + + if failed_configs: + md_file.write("## Failed Configurations\n\n") + for config, log_file in failed_configs: + md_file.write(f"- {config} ({log_file})\n") + md_file.write("\n") + else: + md_file.write("## Failed Configurations\n") + md_file.write(f"No Failures\n") + + md_file.write("\n## Passed Configurations\n") + for config in passed_configs: + md_file.write(f"- {config}\n") + + def combine_markdown_files(self, passed_tests, failed_tests, test_list, total_number_failures, total_number_success, test_type="default", markdown_file=None): + """ + First we want to display the server properties like: + - Server full name + - Operating System + + Combine the markdown files and format them to display all of the failures at the top categorized by what kind of test it was + Then display all of the successes. + + Args: + passed_tests (list): a list of successful tests + failed_tests (list): a list of failed tests + test_list (list): a list of the test names. + markdown_file (str): Path to the markdown file where test results will be saved. + + Returns: + None + """ + timestamp = datetime.now().strftime("%Y-%m-%d") + + output_directory = os.path.join(self.base_parent_dir, "../../results", timestamp) + os.chdir(output_directory) + current_directory = os.getcwd() + output_file = os.path.join(current_directory, "results.md") + + + with open(output_file, 'w') as md_file: + # Title + md_file.write(f"\n\n# Nightly Test Results - {timestamp}\n\n") + # Host information + try: + # Run hostname command + hostname = subprocess.check_output(['hostname', '-A']).strip().decode('utf-8') + md_file.write(f"**Host name:** {hostname}") + md_file.write("\n") + # Run uname command to get OS information + os_info = subprocess.check_output(['uname', '-a']).strip().decode('utf-8') + md_file.write(f"\n**Operating System Information:** {os_info}") + md_file.write("\n") + except subprocess.CalledProcessError as e: + # Handle if the command fails + md_file.write(f"Failed to identify host and Operating System information: {str(e)}") + + # Which tests did we run + md_file.write(f"\n**Tests made:** `make {test_type}`\n") + + # File Path + md_file.write(f"\n**File:** {output_file}\n\n") # *** needs to be changed + md_file.write(f"**Total Successes: {total_number_success}**\n") + md_file.write(f"**Total Failures: {total_number_failures}**\n") + + # Failed Tests + md_file.write(f"\n\n## Failed Tests") + md_file.write(f"\nTotal failed tests: {total_number_failures}") + for (test_item, item) in zip(test_list, failed_tests): + md_file.write(f"\n\n### {test_item[1]} test") + md_file.write(f"\n**General Information**\n") + md_file.write(f"\n* Test type: {test_item[0]}\n") + md_file.write(f"\n* Test name: {test_item[1]}\n") + md_file.write(f"\n* Test extension: {test_item[2]}\n\n") + md_file.write(f"**Failed Tests:**\n") + + + + if len(item) == 0: + md_file.write("\n") + md_file.write(f"* No failures\n") + md_file.write("\n") + else: + for failed_test in item: + config = failed_test[0] + log_file = failed_test[1] + + md_file.write("\n") + md_file.write(f"* {config} ({log_file})\n") + md_file.write("\n") + # Successfull Tests + + md_file.write(f"\n\n## Successfull Tests") + md_file.write(f"\n**Total successfull tests: {total_number_success}**") + for (test_item, item) in zip(test_list, passed_tests): + md_file.write(f"\n\n### {test_item[1]} test") + md_file.write(f"\n**General Information**\n") + md_file.write(f"\n* Test type: {test_item[0]}") + md_file.write(f"\n* Test name: {test_item[1]}") + md_file.write(f"\n* Test extension: {test_item[2]}\n\n") + md_file.write(f"\n**Successfull Tests:**\n") + + + + if len(item) == 0: + md_file.write("\n") + md_file.write(f"* No successes\n") + md_file.write("\n") + else: + for passed_tests in item: + config = passed_tests + + md_file.write("\n") + md_file.write(f"* {config}\n") + md_file.write("\n") + + + + def convert_to_html(self, markdown_file="results.md", html_file="results.html"): + """ + Convert markdown file to HTML. + + Args: + markdown_file (str): Path to the markdown file. + html_file (str): Path to the HTML file where converted output will be saved. + + Returns: + None + """ + # Implement markdown to HTML conversion logic here + todays_date = self.current_datetime.strftime("%Y-%m-%d") + markdown_file_path = os.path.join(self.base_parent_dir, "../../results", todays_date) + os.chdir(markdown_file_path) + + with open(markdown_file, 'r') as md_file: + md_content = md_file.read() + html_content = markdown.markdown(md_content) + + with open(html_file, 'w') as html_file: + html_file.write(html_content) + + + + def send_email(self, sender_email=None, receiver_emails=None, subject="Nightly Regression Test"): + """ + Send email with HTML content. + + Args: + self: The instance of the class. + sender_email (str): The sender's email address. Defaults to None. + receiver_emails (list[str]): List of receiver email addresses. Defaults to None. + subject (str, optional): Subject of the email. Defaults to "Nightly Regression Test". + + Returns: + None + """ + + # check if there are any emails + if not receiver_emails: + print("No receiver emails provided.") + return + # grab thge html file + todays_date = self.current_datetime.strftime("%Y-%m-%d") + html_file_path = os.path.join(self.base_parent_dir, "../../results", todays_date) + os.chdir(html_file_path) + html_file = "results.html" + + with open(html_file, 'r') as html_file: + body = html_file.read() + + + + + for receiver_email in receiver_emails: + # Compose the mutt command for each receiver email + command = [ + 'mutt', + '-s', subject, + '-e', 'set content_type=text/html', + '-e', 'my_hdr From: James Stine ', + '--', receiver_email + ] + + # Open a subprocess to run the mutt command + process = subprocess.Popen(command, stdin=subprocess.PIPE) + + # Write the email body to the subprocess + process.communicate(body.encode('utf-8')) + + +############################################# +# SETUP # +############################################# +folder_manager = FolderManager() # creates the object + +# setting the path on where to clone new repositories of cvw +path = folder_manager.create_preliminary_folders(["nightly_runs/repos/", "nightly_runs/results/"]) +new_folder = folder_manager.create_new_folder(["nightly_runs/repos/", "nightly_runs/results/"]) + +# clone the cvw repo +folder_manager.clone_repository("nightly_runs/repos/", "https://github.com/openhwgroup/cvw.git") + + + +############################################# +# SETUP # +############################################# + +test_runner = TestRunner() # creates the object +test_runner.set_env_var("nightly_runs/repos/") # ensures that the new WALLY environmental variable is set correctly + + +############################################# +# MAKE TESTS # +############################################# + + +# target = "wally-riscv-arch-test" +target = "all" +if test_runner.execute_makefile(target = target): + print(f"The {target} tests were made successfully") + +############################################# +# RUN TESTS # +############################################# + + +test_list = [["python", "regression-wally", "-nightly"], ["bash", "lint-wally", "-nightly"], ["bash", "coverage", "--search"]] +output_log_list = [] # a list where the output markdown file lcoations will be saved to +total_number_failures = 0 # an integer where the total number failures from all of the tests will be collected +total_number_success = 0 # an integer where the total number of sucess will be collected + +total_failures = [] +total_success = [] + +for test_type, test_name, test_exctention in test_list: + print("--------------------------------------------------------------") + print(f"Test type: {test_type}") + print(f"Test name: {test_name}") + print(f"Test extenction: {test_exctention}") + + check, output_location = test_runner.run_tests(test_type=test_type, test_name=test_name, test_exctention=test_exctention) + print(check) + print(output_location) + if check: # this checks if the test actually ran successfully + output_log_list.append(output_location) + + # format tests to markdown + try: + passed, failed = test_runner.clean_format_output(input_file = output_location) + except: + print("There was an error cleaning the data") + + print(f"The # of failures are for {test_name}: {len(failed)}") + total_number_failures+= len(failed) + total_failures.append(failed) + + print(f"The # of sucesses are for {test_name}: {len(passed)}") + total_number_success += len(passed) + total_success.append(passed) + test_runner.rewrite_to_markdown(test_name, passed, failed) + +print(f"The total sucesses are: {total_number_success}") +print(f"The total failures are: {total_number_failures}") + + + + + + +############################################# +# FORMAT TESTS # +############################################# + +# Combine multiple markdown files into one file + +test_runner.combine_markdown_files(passed_tests = total_success, failed_tests = total_failures, test_list = test_list, total_number_failures = total_number_failures, total_number_success = total_number_success, test_type=target, markdown_file=None) + + +############################################# +# WRITE MD TESTS # +############################################# +test_runner.convert_to_html() + + + +############################################# +# SEND EMAIL # +############################################# + +sender_email = 'james.stine@okstate.edu' +receiver_emails = ['thomas.kidd@okstate.edu', 'james.stine@okstate.edu', 'harris@g.hmc.edu', 'rose.thompson10@okstate.edu'] +test_runner.send_email(sender_email=sender_email, receiver_emails=receiver_emails) \ No newline at end of file diff --git a/bin/nightly_build/nightly_build_v13_beta.sh b/bin/nightly_build/nightly_build_v13_beta.sh deleted file mode 100755 index 2e0ca42d4..000000000 --- a/bin/nightly_build/nightly_build_v13_beta.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash - -# set WALLY path -WALLY=$(dirname ${BASH_SOURCE[0]:-$0}) -export WALLY=$(cd "$WALLY/../../" && pwd) -echo "WALLY is set to: $WALLY" - -# Going to nightly runs -cd $WALLY/../ - -# check if directories exist -if [ ! -d "build-results" ]; then - echo "Directory does not exist, creating it..." - mkdir -p "build-results" - if [ $? -eq 0 ]; then - echo "Directory created successfully." - else - echo "Failed to create directory." - exit 1 - fi -else - echo "Directory already exists." -fi - -if [ ! -d "logs" ]; then - echo "Directory does not exist, creating it..." - mkdir -p "logs" - if [ $? -eq 0 ]; then - echo "Directory created successfully." - else - echo "Failed to create directory." - exit 1 - fi -else - echo "Directory already exists." -fi - -# setup source okstate file -echo "Sourcing setup files" -source $WALLY/setup_host.sh -source $WALLY/../setup-files/setup_tools.sh - -# Navigate to the gir repo -cd $WALLY - -# pull the repository -echo "Pulling submodules" -#git pull --recurse-submodules origin main - -# build the regression tests -echo "Building the regression tests" -cd sim -#if make wally-riscv-arch-test; then -#if make all; then -# echo "Make successfull" -#else -# echo "Make failed" -# cd $WALLY/.. - # add the the regression result and the coverage result that there was an error in making the tests -# python $WALLY/bin/nightly_build/src/error_detector.py --tag make -o $WALLY/../build-results/regression_results.md -# python $WALLY/bin/nightly_build/src/error_detector.py --tag make -o $WALLY/../build-results/coverage_results.md - - # exit the program - #exit 1 -#fi - -# execute the simulation / regression tests and save output to a file -echo "running the regression test" -#./regression-wally > $WALLY/../logs/regression_output.log 2>&1 - -echo "running coverage tests" -#./coverage > $WALLY/../logs/coverage_output.log 2>&1 - - -# run the Python script to parse the output and generate the log file -echo "Parsing output data from the regression test" -cd $WALLY/../ - -python $WALLY/bin/nightly_build/src/parse_regression.py -i $WALLY/../logs/regression_output.log -o $WALLY/../build-results/regression_results.md - -python $WALLY/bin/nightly_build/src/parse_coverage.py -i $WALLY/../logs/coverage_output.log -o $WALLY/../build-results/coverage_results.md - -# email update -cd $WALLY/bin/nightly_build/src/ -./send_mail_html.sh diff --git a/bin/nightly_build/src/error_detector.py b/bin/nightly_build/src/error_detector.py deleted file mode 100644 index c239032e9..000000000 --- a/bin/nightly_build/src/error_detector.py +++ /dev/null @@ -1,37 +0,0 @@ -import argparse -import datetime - -def add_failure_to_markdown(tag, output_file, input_file = None): - # Get the current date and time - current_datetime = datetime.datetime.now() - formatted_datetime = current_datetime.strftime("%Y-%m-%d %H:%M:%S") - - # Create the failure message based on the provided tag and input content - if tag == "make": - failure_message = f"# {tag.capitalize()} riscof comilation failure - {formatted_datetime}\n\n" - failure_message += f"The error was due to a problem in compiling the the riscof tests:\n\n" - if input_file != None: - failure_message += f"The particular error: {input_file}\n\n" - else: - failure_message = f"# {tag.capitalize()} Failure - {formatted_datetime}\n\n" - failure_message += f":\n\n" - - # Append the failure message to the specified output file - with open(output_file, "a") as file: - file.write(failure_message) - - print(f"Failure information added to {output_file}.") - -if __name__ == "__main__": - # Set up argparse - parser = argparse.ArgumentParser(description="Add failure information to Markdown file.") - parser.add_argument("--tag", required=True, help="Specify the tag for the failure type (e.g., 'make', 'custom').") - parser.add_argument("-i", required=False, help="Specify the input file containing failure details.") - parser.add_argument("-o", required=True, help="Specify the output file to write the failure information.") - - # Parse command-line arguments - args = parser.parse_args() - - # Call the function with the specified tag, input file, and output file - add_failure_to_markdown(args.tag, args.o) - diff --git a/bin/nightly_build/src/parse_coverage.py b/bin/nightly_build/src/parse_coverage.py deleted file mode 100644 index 33811b0a1..000000000 --- a/bin/nightly_build/src/parse_coverage.py +++ /dev/null @@ -1,95 +0,0 @@ -import os -import argparse -from datetime import datetime -import re -from colorama import init, Fore, Style - -def parse_regression_output(output): - passed_configs = [] - failed_configs = [] - - lines = output.split('\n') - index = 0 - - while index < len(lines): - # Remove ANSI escape codes - line = re.sub(r'\x1b\[[0-9;]*[mGK]', '', lines[index]) - if "Success" in line: - passed_configs.append(line.split(':')[0].strip()) - elif "Failures detected in output" in line: - try: - config_name = line.split(':')[0].strip() - log_file = os.path.abspath(config_name+".log") - failed_configs.append((config_name, log_file)) - except: - failed_configs.append((config_name, "Log file not found")) - - index += 1 - - # alphabetically sort the configurations - passed_configs.sort() - failed_configs.sort() - return passed_configs, failed_configs - -def write_to_markdown(passed_configs, failed_configs, output_file): - timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - - with open(output_file, 'a') as md_file: - md_file.write(f"\n\n# Coverage Test Results - {timestamp}\n\n") - - md_file.write("\n## Failed Configurations\n") - for config, log_file in failed_configs: - md_file.write(f"- {config} ({log_file})\n") - if len(failed_configs) == 0: - md_file.write(" - no failures\n") - - md_file.write("\n## Passed Configurations\n") - for config in passed_configs: - md_file.write(f"- {config}\n") - - -def write_new_markdown(passed_configs, failed_configs): - timestamp = datetime.now().strftime("%Y-%m-%d") - output_file = f"/home/thkidd/nightly_runs/build-results/builds/coverage/wally_coverage_{timestamp}.md" - - with open(output_file, 'w') as md_file: - # Title - md_file.write(f"\n\n# Coverage Test Results - {timestamp}\n\n") - - ## File path - md_file.write(f"\n**File:** {output_file}\n") - - md_file.write("\n## Failed Configurations\n") - # add in if there were no failures - if len(failed_configs) == 0: - md_file.write(f"No Failures\n") - - for config, log_file in failed_configs: - md_file.write(f"- {config} ({log_file})\n") - - md_file.write("\n## Passed Configurations\n") - for config in passed_configs: - md_file.write(f"- {config}\n") - - - - -if __name__ == "__main__": - init(autoreset=True) # Initialize colorama - parser = argparse.ArgumentParser(description='Parse regression test output and append to a markdown file.') - parser.add_argument('-i', '--input', help='Input file containing regression test output', required=True) - parser.add_argument('-o', '--output', help='Output markdown file', default='regression_results.md') - args = parser.parse_args() - - with open(args.input, 'r') as input_file: - regression_output = input_file.read() - - passed_configs, failed_configs = parse_regression_output(regression_output) - write_to_markdown(passed_configs, failed_configs, args.output) - - print(f"Markdown file updated: {args.output}") - - write_new_markdown(passed_configs, failed_configs) - - print("New markdown file created") - diff --git a/bin/nightly_build/src/parse_regression.py b/bin/nightly_build/src/parse_regression.py deleted file mode 100644 index 0d8c9467d..000000000 --- a/bin/nightly_build/src/parse_regression.py +++ /dev/null @@ -1,102 +0,0 @@ -import argparse -import os -from datetime import datetime -import re -from colorama import init, Fore, Style - -def parse_regression_output(output): - passed_configs = [] - failed_configs = [] - - lines = output.split('\n') - index = 0 - - while index < len(lines): - # Remove ANSI escape codes - line = re.sub(r'\x1b\[[0-9;]*[mGK]', '', lines[index]) - #print("The cleaned line: ", line) - if "Success" in line: - passed_configs.append(line.split(':')[0].strip()) - elif "Failures detected in output" in line: - try: - config_name = line.split(':')[0].strip() - log_file = os.path.abspath(config_name+".log") - failed_configs.append((config_name, log_file)) - except: - failed_configs.append((config_name, "Log file not found")) - elif "Timeout" in line: - try: - config_name = line.split(':')[0].strip() - log_file = os.path.abspath(config_name+".log") - failed_configs.append((config_name, log_file)) - except: - failed_configs.append((config_name, "Log file not found")) - index += 1 - - # alphabetically sort the configurations - passed_configs.sort() - failed_configs.sort() - return passed_configs, failed_configs - -def write_to_markdown(passed_configs, failed_configs, output_file): - timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - with open(output_file, 'a') as md_file: - md_file.write(f"\n\n
\n# Regression Test Results - {timestamp}\n
\n\n") - #md_file.write(f"\n\n# Regression Test Results - {timestamp}\n\n") - - if failed_configs: - md_file.write("## Failed Configurations\n") - for config, log_file in failed_configs: - md_file.write(f"- {config} ({log_file})\n") - md_file.write("\n") - else: - md_file.write("## No Failed Configurations\n") - - md_file.write("\n## Passed Configurations\n") - for config in passed_configs: - md_file.write(f"- {config}\n") - -def write_new_markdown(passed_configs, failed_configs): - timestamp = datetime.now().strftime("%Y-%m-%d") - output_file = f"/home/thkidd/nightly_runs/build-results/builds/regression/wally_regression_{timestamp}.md" - with open(output_file, 'w') as md_file: - - # Title - md_file.write(f"\n\n# Regression Test Results - {timestamp}\n\n") - #md_file.write(f"\n\n
\n# Regression Test Results - {timestamp}\n
\n\n") - - # File Path - md_file.write(f"\n**File:** {output_file}\n\n") - - if failed_configs: - md_file.write("## Failed Configurations\n\n") - for config, log_file in failed_configs: - md_file.write(f"- {config} ({log_file})\n") - md_file.write("\n") - else: - md_file.write("## Failed Configurations\n") - md_file.write(f"No Failures\n") - - md_file.write("\n## Passed Configurations\n") - for config in passed_configs: - md_file.write(f"- {config}\n") - -if __name__ == "__main__": - init(autoreset=True) # Initialize colorama - parser = argparse.ArgumentParser(description='Parse regression test output and append to a markdown file.') - parser.add_argument('-i', '--input', help='Input file containing regression test output', required=True) - parser.add_argument('-o', '--output', help='Output markdown file containing formatted file', default='regression_results.md') - args = parser.parse_args() - - with open(args.input, 'r') as input_file: - regression_output = input_file.read() - - passed_configs, failed_configs = parse_regression_output(regression_output) - write_to_markdown(passed_configs, failed_configs, args.output) - - print(f"Markdown file updated: {args.output}") - - write_new_markdown(passed_configs, failed_configs) - - print("New markdown file created") - diff --git a/bin/nightly_build/src/send_mail_html.sh b/bin/nightly_build/src/send_mail_html.sh deleted file mode 100755 index 5c3735a3a..000000000 --- a/bin/nightly_build/src/send_mail_html.sh +++ /dev/null @@ -1,79 +0,0 @@ -current_date=$(date "+%Y-%m-%d") -email_address="thomas.kidd@okstate.edu" -#email_address="WALLY-REGRESSION@LISTSERV.OKSTATE.EDU" -subject="WALLY regression and coverage test report" -attachments="" -html_body="" -host_name=$(hostname) -os_info=$(lsb_release -a 2>/dev/null) -script_location=$WALLY/bin/nightly_build/ - -html_body="
-

System Information

-

Server Name: $host_name@okstate.edu

-

Operating System: $os_info

-

Script Origin: $script_location

-
- -

Testing sending HTML content through mutt

" - -# Iterate through the files and concatenate their content to the body -for file in $WALLY/../build-results/builds/*/wally_*_"$current_date"*.md; do - attachments+=" -a $file" - - # Convert Markdown to HTML using pandoc - html_content=$(pandoc "$file") - - # add the file full path - # html_body+="

File: $file

" - # Append the HTML content to the body - html_body+="$html_content" -done -echo "Sending email" - -# Get server hostname and OS information -host_name=$(hostname) -os_info=$(uname -a) - -# Define HTML body content - -# Use mutt to send the email with HTML body -#mutt -e "my_hdr From:James Stine " -s "$subject" $attachments \ -mutt -e "my_hdr From:Thomas Kidd " -s "$subject" \ - -e "set content_type=text/html" -- $email_address < - - - - Nightly Build Results - $current_date - - - -

Test Results - $current_date

- - $html_body - - -EOF - diff --git a/bin/wrapper_nightly_runs.sh b/bin/wrapper_nightly_runs.sh new file mode 100755 index 000000000..219e765df --- /dev/null +++ b/bin/wrapper_nightly_runs.sh @@ -0,0 +1,28 @@ +#!/bin/bash +date + + +# Variables +LOG=$HOME/nightly_runs/logs/from_wrapper.log # you can store your log file where you would like +PYTHON_SCRIPT=$HOME/nightly_runs/cvw/bin/ # cvw can be anywhere you would like it. Make sure to point your variable there +SETUP_SCRIPT=$HOME/nightly_runs/cvw/ # cvw can be anywhere you would like it. Make sure to point your variable there + + + +date > $LOG 2>&1 + +echo "Current directory" +pwd + +cd $SETUP_SCRIPT +echo "Current directory" +pwd + +echo "Sourcing setup_host" +source ./setup_host.sh >> $LOG 2>&1 +echo "Sourcing setup_tools" + +cd $PYTHON_SCRIPT +pwd +echo "Running python file" +python nightly_build.py >> $LOG 2>&1 From 4c84b9d819aa2637c8f19d5e7d68e167a5a14c50 Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Fri, 23 Feb 2024 14:54:03 -0600 Subject: [PATCH 10/20] updated nightly build, but ran into buildroot errors --- bin/nightly_build.py | 13 ++++++++----- bin/wrapper_nightly_runs.sh | 1 - 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/bin/nightly_build.py b/bin/nightly_build.py index 21d0ce025..7848198d2 100755 --- a/bin/nightly_build.py +++ b/bin/nightly_build.py @@ -615,8 +615,8 @@ test_runner.set_env_var("nightly_runs/repos/") # ensures that the new WALLY envi ############################################# -# target = "wally-riscv-arch-test" -target = "all" +target = "wally-riscv-arch-test" +# target = "all" if test_runner.execute_makefile(target = target): print(f"The {target} tests were made successfully") @@ -640,8 +640,8 @@ for test_type, test_name, test_exctention in test_list: print(f"Test extenction: {test_exctention}") check, output_location = test_runner.run_tests(test_type=test_type, test_name=test_name, test_exctention=test_exctention) - print(check) - print(output_location) + print(f"Did the tests run?: {check}") + print(f"The tests log files are saved to: {output_location}") if check: # this checks if the test actually ran successfully output_log_list.append(output_location) @@ -689,5 +689,8 @@ test_runner.convert_to_html() ############################################# sender_email = 'james.stine@okstate.edu' -receiver_emails = ['thomas.kidd@okstate.edu', 'james.stine@okstate.edu', 'harris@g.hmc.edu', 'rose.thompson10@okstate.edu'] +# sender_email = 'thomas.kidd@okstate.edu' + +# receiver_emails = ['thomas.kidd@okstate.edu', 'james.stine@okstate.edu', 'harris@g.hmc.edu', 'rose.thompson10@okstate.edu'] +receiver_emails = ['thomas.kidd@okstate.edu'] test_runner.send_email(sender_email=sender_email, receiver_emails=receiver_emails) \ No newline at end of file diff --git a/bin/wrapper_nightly_runs.sh b/bin/wrapper_nightly_runs.sh index 219e765df..55d2da04f 100755 --- a/bin/wrapper_nightly_runs.sh +++ b/bin/wrapper_nightly_runs.sh @@ -20,7 +20,6 @@ pwd echo "Sourcing setup_host" source ./setup_host.sh >> $LOG 2>&1 -echo "Sourcing setup_tools" cd $PYTHON_SCRIPT pwd From 9ccc93ff0e0c0e973beb875ea7c27a4af26ced57 Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Mon, 4 Mar 2024 18:21:03 -0600 Subject: [PATCH 11/20] over rides TIMEOUT on -nightly tag for regression since buildroot is not working --- bin/nightly_build.py | 45 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 39 insertions(+), 6 deletions(-) diff --git a/bin/nightly_build.py b/bin/nightly_build.py index 7848198d2..166b8b9f9 100755 --- a/bin/nightly_build.py +++ b/bin/nightly_build.py @@ -228,7 +228,29 @@ class TestRunner: # print(f"The new WALLY vairable is: {os.environ.get('WALLY')}") # print(f"The Base Directory is now : {self.base_dir}") # print(f"The Base Parent Directory is now : {self.base_parent_dir}") - + + def change_time_dur(self, time_duriation=1): + + # Prepare the command to execute the Makefile + make_file_path = os.path.join(self.base_dir, "sim") + os.chdir(make_file_path) + file_path = "regression-wally" + line_number = 450 # TIMEOUT_DUR = 1 day at this line in regression-wally + new_line = f" TIMEOUT_DUR = {60*time_duriation}" + + with open(file_path, 'r') as file: + lines = file.readlines() + + if line_number < 1 or line_number > len(lines): + print("Error: Line number out of range.") + return False + + lines[line_number - 1] = new_line + '\n' + + with open(file_path, 'w') as file: + file.writelines(lines) + return True + def execute_makefile(self, target=None): """ Execute a Makefile with optional target. @@ -610,13 +632,24 @@ test_runner = TestRunner() # creates the object test_runner.set_env_var("nightly_runs/repos/") # ensures that the new WALLY environmental variable is set correctly +############################################# +# TMP SETUP # +############################################# + +""" +The goal of this section is to replace the TIMEOUT_DUR for regression tests. + +""" +if test_runner.change_time_dur(): + print("The regression-wally file was successfully changed") + ############################################# # MAKE TESTS # ############################################# -target = "wally-riscv-arch-test" -# target = "all" +# target = "wally-riscv-arch-test" +target = "all" if test_runner.execute_makefile(target = target): print(f"The {target} tests were made successfully") @@ -691,6 +724,6 @@ test_runner.convert_to_html() sender_email = 'james.stine@okstate.edu' # sender_email = 'thomas.kidd@okstate.edu' -# receiver_emails = ['thomas.kidd@okstate.edu', 'james.stine@okstate.edu', 'harris@g.hmc.edu', 'rose.thompson10@okstate.edu'] -receiver_emails = ['thomas.kidd@okstate.edu'] -test_runner.send_email(sender_email=sender_email, receiver_emails=receiver_emails) \ No newline at end of file +receiver_emails = ['thomas.kidd@okstate.edu', 'james.stine@okstate.edu', 'harris@g.hmc.edu', 'rose.thompson10@okstate.edu', "sarah.harris@unlv.edu"] +# receiver_emails = ['thomas.kidd@okstate.edu'] +test_runner.send_email(sender_email=sender_email, receiver_emails=receiver_emails) From 22947e5b5eefece7b0308b4282a7444d9f841d8a Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Tue, 5 Mar 2024 14:26:35 -0600 Subject: [PATCH 12/20] udpated readme by adding how to add crontab section --- README.md | 8 +++++++ setup_host.sh | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) create mode 100644 setup_host.sh diff --git a/README.md b/README.md index f0b1a9f4b..d75e8fb98 100644 --- a/README.md +++ b/README.md @@ -128,4 +128,12 @@ If you want to implement your own version of the chip, your tool and license com Startups can expect to spend more than $1 million on CAD tools to get a chip to market. Commercial CAD tools are not realistically available to individuals without a university or company connection. +## Adding Cron Job for nightly builds + +If you want to add a cronjob you can do the following: +1) `crontab -e` +2) add this code: +``` +0 3 * * * BASH_ENV=~/.bashrc bash -l -c "PATH_TO_CVW/cvw/bin/wrapper_nightly_runs.sh > PATH_TO_LOG_FOLDER/cron.log" +``` diff --git a/setup_host.sh b/setup_host.sh new file mode 100644 index 000000000..55854a71c --- /dev/null +++ b/setup_host.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +# setup.sh +# David_Harris@hmc.edu and kekim@hmc.edu 1 December 2021 +# Set up tools for riscv-wally + +echo "Executing Wally setup.sh" + +# Path to Wally repository +WALLY=$(dirname ${BASH_SOURCE[0]:-$0}) +export WALLY=$(cd "$WALLY" && pwd) +echo \$WALLY set to ${WALLY} + +# Path to RISC-V Tools +export RISCV=/opt/riscv # change this if you installed the tools in a different location + +# Tools +# GCC +#export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$RISCV/riscv-gnu-toolchain/lib:$RISCV/riscv-gnu-toolchain/riscv64-unknown-elf/lib +#export PATH=$PATH:$RISCV/riscv-gnu-toolchain/bin:$RISCV/riscv-gnu-toolchain/riscv64-unknown-elf/bin # GCC tools +# Spike +#export LD_LIBRARY_PATH=$RISCV/lib:$LD_LIBRARY_PATH +export PATH=$PATH:$RISCV/bin +# utility functions in Wally repository +export PATH=$WALLY/bin:$PATH +# Verilator +export PATH=$RICKV/verilator:$PATH # Change this for your path to Verilator +# ModelSim/Questa (vsim) +# Note: 2022.1 complains on cache/sram1p1r1w about StoredData cannot be driven by multiple always_ff blocks. Ues 2021.2 for now +#export PATH=/cad/mentor/questa_sim-2022.1_1/questasim/bin:$PATH # Change this for your path to Modelsim +#export PATH=/cad/mentor/questa_sim-2021.2_1/questasim/bin:$PATH # Change this for your path to Modelsim, or delete +#export MGLS_LICENSE_FILE=1717@solidworks.eng.hmc.edu # Change this to your Siemens license server +#export PATH=/cad/synopsys/SYN/bin:$PATH # Change this for your path to Design Compiler +#export SNPSLMD_LICENSE_FILE=27020@134.173.38.184 # Change this to your license manager file + +# Imperas; put this in if you are using it +#export PATH=$RISCV/imperas-riscv-tests/riscv-ovpsim-plus/bin/Linux64:$PATH +#export LD_LIBRARY_PATH=$RISCV/imperas_riscv_tests/riscv-ovpsim-plus/bin/Linux64:$LD_LIBRARY_PATH # remove if no imperas + +export MODSIM=/opt/ModelSim/questasim +export PATH=$PATH:$MODSIM/bin +export LD_LIBRARY_PATH=/usr/lib:/lib +export MGC_DOC_PATH=$MODSIM/docs +export MGC_PDF_READER=evince +export MGC_HTML_BROWSER=firefox +export MGLS_LICENSE_FILE=1717@trelaina.ecen.okstate.edu +export IMPERASD_LICENSE_FILE=2700@trelaina.ecen.okstate.edu + +export IDV=$RISCV/ImperasDV-OpenHW +if [ -e "$IDV" ]; then +# echo "Imperas exists" + export IMPERAS_HOME=$IDV/Imperas + export IMPERAS_PERSONALITY=CPUMAN_DV_ASYNC + export ROOTDIR=${WALLY}/.. + source ${IMPERAS_HOME}/bin/setup.sh + setupImperas ${IMPERAS_HOME} + export PATH=$IDV/scripts/cvw:$PATH +fi From 4addee0fc0b450ac9102dc9e6b41ff08d385c380 Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Sat, 9 Mar 2024 14:21:04 -0600 Subject: [PATCH 13/20] updated nightly runs with try statement in email sending --- bin/nightly_build.py | 69 +++++++++++++++++++------------------ bin/wrapper_nightly_runs.sh | 7 ++-- 2 files changed, 39 insertions(+), 37 deletions(-) diff --git a/bin/nightly_build.py b/bin/nightly_build.py index 166b8b9f9..ee4988736 100755 --- a/bin/nightly_build.py +++ b/bin/nightly_build.py @@ -36,7 +36,7 @@ Usage: 4) Syntax: Our cron job has the following syntax: - 0 3 * * * BASH_ENV=~/.bashrc bash -l -c "*WHERE YOUR CVW IS MUST PUT FULL PATH*/cvw/bin/wrapper_nightly_runs.sh > *WHERE YOU WANT TO STORE LOG FILES/cron.log 2>&1" + 0 3 * * * BASH_ENV=~/.bashrc bash -l -c "*WHERE YOUR CVW IS MUST PUT FULL PATH*/cvw/bin/wrapper_nightly-runs.sh > *WHERE YOU WANT TO STORE LOG FILES/cron.log 2>&1" This cronjob sources the .bashrc file and executes the wrapper script as a user. @@ -96,8 +96,8 @@ class FolderManager: """ Create preliminary folders if they do not exist. These folders are: - nightly_runs/repos/ - nightly_runs/results/ + nightly-runs/repos/ + nightly-runs/results/ Args: folders (list): A list of folder names to be created. @@ -171,7 +171,7 @@ class TestRunner: The setup script will be copied from the base directory to a specific folder structure inside the base directory. Args: - folder: the "nightly_runs/repos/" + folder: the "nightly-runs/repos/" Returns: bool: True if the script is copied successfully, False otherwise. @@ -592,35 +592,36 @@ class TestRunner: - - for receiver_email in receiver_emails: - # Compose the mutt command for each receiver email - command = [ - 'mutt', - '-s', subject, - '-e', 'set content_type=text/html', - '-e', 'my_hdr From: James Stine ', - '--', receiver_email - ] - - # Open a subprocess to run the mutt command - process = subprocess.Popen(command, stdin=subprocess.PIPE) - - # Write the email body to the subprocess - process.communicate(body.encode('utf-8')) - - + try: + for receiver_email in receiver_emails: + # Compose the mutt command for each receiver email + command = [ + '/usr/bin/mutt', + '-s', subject, + '-e', 'set content_type=text/html', + '-e', 'my_hdr From: James Stine ', + '--', receiver_email + ] + try: + # Open a subprocess to run the mutt command + process = subprocess.Popen(command, stdin=subprocess.PIPE) + # Write the email body to the subprocess + process.communicate(body.encode('utf-8')) + except expression as identifier: + print(f"Error sending email: {identifier}") + except expression as identifier: + print(f"Error sending email: {identifier}") ############################################# # SETUP # ############################################# folder_manager = FolderManager() # creates the object # setting the path on where to clone new repositories of cvw -path = folder_manager.create_preliminary_folders(["nightly_runs/repos/", "nightly_runs/results/"]) -new_folder = folder_manager.create_new_folder(["nightly_runs/repos/", "nightly_runs/results/"]) +path = folder_manager.create_preliminary_folders(["nightly-runs/repos/", "nightly-runs/results/"]) +new_folder = folder_manager.create_new_folder(["nightly-runs/repos/", "nightly-runs/results/"]) # clone the cvw repo -folder_manager.clone_repository("nightly_runs/repos/", "https://github.com/openhwgroup/cvw.git") +folder_manager.clone_repository("nightly-runs/repos/", "https://github.com/openhwgroup/cvw.git") @@ -629,7 +630,7 @@ folder_manager.clone_repository("nightly_runs/repos/", "https://github.com/openh ############################################# test_runner = TestRunner() # creates the object -test_runner.set_env_var("nightly_runs/repos/") # ensures that the new WALLY environmental variable is set correctly +test_runner.set_env_var("nightly-runs/repos/") # ensures that the new WALLY environmental variable is set correctly ############################################# @@ -650,8 +651,8 @@ if test_runner.change_time_dur(): # target = "wally-riscv-arch-test" target = "all" -if test_runner.execute_makefile(target = target): - print(f"The {target} tests were made successfully") +# if test_runner.execute_makefile(target = target): +# print(f"The {target} tests were made successfully") ############################################# # RUN TESTS # @@ -671,7 +672,7 @@ for test_type, test_name, test_exctention in test_list: print(f"Test type: {test_type}") print(f"Test name: {test_name}") print(f"Test extenction: {test_exctention}") - + check, output_location = test_runner.run_tests(test_type=test_type, test_name=test_name, test_exctention=test_exctention) print(f"Did the tests run?: {check}") print(f"The tests log files are saved to: {output_location}") @@ -683,16 +684,16 @@ for test_type, test_name, test_exctention in test_list: passed, failed = test_runner.clean_format_output(input_file = output_location) except: print("There was an error cleaning the data") - + print(f"The # of failures are for {test_name}: {len(failed)}") total_number_failures+= len(failed) total_failures.append(failed) - + print(f"The # of sucesses are for {test_name}: {len(passed)}") total_number_success += len(passed) total_success.append(passed) test_runner.rewrite_to_markdown(test_name, passed, failed) - + print(f"The total sucesses are: {total_number_success}") print(f"The total failures are: {total_number_failures}") @@ -724,6 +725,6 @@ test_runner.convert_to_html() sender_email = 'james.stine@okstate.edu' # sender_email = 'thomas.kidd@okstate.edu' -receiver_emails = ['thomas.kidd@okstate.edu', 'james.stine@okstate.edu', 'harris@g.hmc.edu', 'rose.thompson10@okstate.edu', "sarah.harris@unlv.edu"] -# receiver_emails = ['thomas.kidd@okstate.edu'] +# receiver_emails = ['thomas.kidd@okstate.edu', 'james.stine@okstate.edu', 'harris@g.hmc.edu', 'rose.thompson10@okstate.edu', 'sarah.harris@unlv.edu', 'nlucio@hmc.edu'] +receiver_emails = ['thomas.kidd@okstate.edu'] test_runner.send_email(sender_email=sender_email, receiver_emails=receiver_emails) diff --git a/bin/wrapper_nightly_runs.sh b/bin/wrapper_nightly_runs.sh index 55d2da04f..e6d198fcd 100755 --- a/bin/wrapper_nightly_runs.sh +++ b/bin/wrapper_nightly_runs.sh @@ -3,9 +3,9 @@ date # Variables -LOG=$HOME/nightly_runs/logs/from_wrapper.log # you can store your log file where you would like -PYTHON_SCRIPT=$HOME/nightly_runs/cvw/bin/ # cvw can be anywhere you would like it. Make sure to point your variable there -SETUP_SCRIPT=$HOME/nightly_runs/cvw/ # cvw can be anywhere you would like it. Make sure to point your variable there +LOG=$HOME/nightly-runs/logs/from_wrapper.log # you can store your log file where you would like +PYTHON_SCRIPT=$HOME/nightly-runs/cvw/bin/ # cvw can be anywhere you would like it. Make sure to point your variable there +SETUP_SCRIPT=$HOME/nightly-runs/cvw/ # cvw can be anywhere you would like it. Make sure to point your variable there @@ -25,3 +25,4 @@ cd $PYTHON_SCRIPT pwd echo "Running python file" python nightly_build.py >> $LOG 2>&1 +echo "Finished" From 0e3341c1dc692be85042dabe5564594adc88bdbe Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Sat, 9 Mar 2024 16:27:35 -0600 Subject: [PATCH 14/20] added argstrings to make it a better CLI tool --- bin/nightly_build.py | 226 ++++++++++++++++++++---------------- bin/wrapper_nightly_runs.sh | 2 +- 2 files changed, 128 insertions(+), 100 deletions(-) diff --git a/bin/nightly_build.py b/bin/nightly_build.py index ee4988736..ff92704d0 100755 --- a/bin/nightly_build.py +++ b/bin/nightly_build.py @@ -54,6 +54,7 @@ Dependencies: - re - markdown - subprocess + - argparse Bash: - mutt (email sender) @@ -69,6 +70,8 @@ from datetime import datetime import re import markdown import subprocess +import argparse + @@ -83,7 +86,7 @@ class FolderManager: base_dir (str): The base directory where folders will be managed and repository will be cloned. """ env_extract_var = 'WALLY' - print(f"The environemntal variable is {env_extract_var}") + # print(f"The environemntal variable is {env_extract_var}") self.base_dir = os.environ.get(env_extract_var) print(f"The base directory is: {self.base_dir}") self.base_parent_dir = os.path.dirname(self.base_dir) @@ -611,120 +614,145 @@ class TestRunner: print(f"Error sending email: {identifier}") except expression as identifier: print(f"Error sending email: {identifier}") -############################################# -# SETUP # -############################################# -folder_manager = FolderManager() # creates the object - -# setting the path on where to clone new repositories of cvw -path = folder_manager.create_preliminary_folders(["nightly-runs/repos/", "nightly-runs/results/"]) -new_folder = folder_manager.create_new_folder(["nightly-runs/repos/", "nightly-runs/results/"]) - -# clone the cvw repo -folder_manager.clone_repository("nightly-runs/repos/", "https://github.com/openhwgroup/cvw.git") - -############################################# -# SETUP # -############################################# -test_runner = TestRunner() # creates the object -test_runner.set_env_var("nightly-runs/repos/") # ensures that the new WALLY environmental variable is set correctly +def main(): + ############################################# + # ARG PARSER # + ############################################# + + parser = argparse.ArgumentParser(description='Nightly Verification Testing for WALLY.') + + parser.add_argument('--path', help='specify the path for where the nightly repositories will be cloned ex: "nightly-runs') + parser.add_argument('--repository', help='specify which github repository you want to clone') + parser.add_argument('--target', help='types of tests you can make are: all, wally-riscv-arch-test') + parser.add_argument('--send_email', help='do you want to send emails: "yes" or "y"') + + args = parser.parse_args() + + # file paths for where the results and repos will be saved: repos and results can be changed to whatever + repos_path = f"{args.path}/repos/" + results_path = f"{args.path}/results/" + ############################################# + # SETUP # + ############################################# + folder_manager = FolderManager() # creates the object + + # setting the path on where to clone new repositories of cvw + folder_manager.create_preliminary_folders([repos_path, results_path]) + new_folder = folder_manager.create_new_folder([repos_path, results_path]) + + # clone the cvw repo + folder_manager.clone_repository(repos_path, args.repository) + + ############################################# + # SETUP # + ############################################# + + test_runner = TestRunner() # creates the object + test_runner.set_env_var(repos_path) # ensures that the new WALLY environmental variable is set correctly -############################################# -# TMP SETUP # -############################################# + ############################################# + # TMP SETUP # + ############################################# -""" -The goal of this section is to replace the TIMEOUT_DUR for regression tests. - -""" -if test_runner.change_time_dur(): - print("The regression-wally file was successfully changed") - -############################################# -# MAKE TESTS # -############################################# - - -# target = "wally-riscv-arch-test" -target = "all" -# if test_runner.execute_makefile(target = target): -# print(f"The {target} tests were made successfully") - -############################################# -# RUN TESTS # -############################################# - - -test_list = [["python", "regression-wally", "-nightly"], ["bash", "lint-wally", "-nightly"], ["bash", "coverage", "--search"]] -output_log_list = [] # a list where the output markdown file lcoations will be saved to -total_number_failures = 0 # an integer where the total number failures from all of the tests will be collected -total_number_success = 0 # an integer where the total number of sucess will be collected - -total_failures = [] -total_success = [] - -for test_type, test_name, test_exctention in test_list: - print("--------------------------------------------------------------") - print(f"Test type: {test_type}") - print(f"Test name: {test_name}") - print(f"Test extenction: {test_exctention}") - - check, output_location = test_runner.run_tests(test_type=test_type, test_name=test_name, test_exctention=test_exctention) - print(f"Did the tests run?: {check}") - print(f"The tests log files are saved to: {output_location}") - if check: # this checks if the test actually ran successfully - output_log_list.append(output_location) - - # format tests to markdown - try: - passed, failed = test_runner.clean_format_output(input_file = output_location) - except: - print("There was an error cleaning the data") - - print(f"The # of failures are for {test_name}: {len(failed)}") - total_number_failures+= len(failed) - total_failures.append(failed) - - print(f"The # of sucesses are for {test_name}: {len(passed)}") - total_number_success += len(passed) - total_success.append(passed) - test_runner.rewrite_to_markdown(test_name, passed, failed) - -print(f"The total sucesses are: {total_number_success}") -print(f"The total failures are: {total_number_failures}") + """ + The goal of this section is to replace the TIMEOUT_DUR for regression tests. + """ + if test_runner.change_time_dur(time_duriation=2): + pass + else: + print("Error occured changing the TIMEOUT duration in './regression-wally'") + ############################################# + # MAKE TESTS # + ############################################# + # if test_runner.execute_makefile(target = args.target): + # print(f"The {args.target} tests were made successfully") -############################################# -# FORMAT TESTS # -############################################# - -# Combine multiple markdown files into one file - -test_runner.combine_markdown_files(passed_tests = total_success, failed_tests = total_failures, test_list = test_list, total_number_failures = total_number_failures, total_number_success = total_number_success, test_type=target, markdown_file=None) + ############################################# + # RUN TESTS # + ############################################# -############################################# -# WRITE MD TESTS # -############################################# -test_runner.convert_to_html() + test_list = [["python", "regression-wally", "-nightly"], ["bash", "lint-wally", "-nightly"], ["bash", "coverage", "--search"]] + output_log_list = [] # a list where the output markdown file lcoations will be saved to + total_number_failures = 0 # an integer where the total number failures from all of the tests will be collected + total_number_success = 0 # an integer where the total number of sucess will be collected + + total_failures = [] + total_success = [] + + for test_type, test_name, test_exctention in test_list: + print("--------------------------------------------------------------") + print(f"Test type: {test_type}") + print(f"Test name: {test_name}") + print(f"Test extention: {test_exctention}") + + check, output_location = test_runner.run_tests(test_type=test_type, test_name=test_name, test_exctention=test_exctention) + print(f"Did the tests run?: {check}") + print(f"The tests log files are saved to: {output_location}") + if check: # this checks if the test actually ran successfully + output_log_list.append(output_location) + + # format tests to markdown + try: + passed, failed = test_runner.clean_format_output(input_file = output_location) + except: + print("There was an error cleaning the data") + + print(f"The # of failures are for {test_name}: {len(failed)}") + total_number_failures+= len(failed) + total_failures.append(failed) + + print(f"The # of sucesses are for {test_name}: {len(passed)}") + total_number_success += len(passed) + total_success.append(passed) + test_runner.rewrite_to_markdown(test_name, passed, failed) + + print(f"The total sucesses are: {total_number_success}") + print(f"The total failures are: {total_number_failures}") -############################################# -# SEND EMAIL # -############################################# -sender_email = 'james.stine@okstate.edu' -# sender_email = 'thomas.kidd@okstate.edu' + -# receiver_emails = ['thomas.kidd@okstate.edu', 'james.stine@okstate.edu', 'harris@g.hmc.edu', 'rose.thompson10@okstate.edu', 'sarah.harris@unlv.edu', 'nlucio@hmc.edu'] -receiver_emails = ['thomas.kidd@okstate.edu'] -test_runner.send_email(sender_email=sender_email, receiver_emails=receiver_emails) + ############################################# + # FORMAT TESTS # + ############################################# + + # Combine multiple markdown files into one file + + test_runner.combine_markdown_files(passed_tests = total_success, failed_tests = total_failures, test_list = test_list, total_number_failures = total_number_failures, total_number_success = total_number_success, test_type=args.target, markdown_file=None) + + + ############################################# + # WRITE MD TESTS # + ############################################# + test_runner.convert_to_html() + + + + ############################################# + # SEND EMAIL # + ############################################# + + sender_email = 'james.stine@okstate.edu' + + receiver_emails = ['thomas.kidd@okstate.edu', 'james.stine@okstate.edu', 'harris@g.hmc.edu', 'rose.thompson10@okstate.edu', 'sarah.harris@unlv.edu', 'nlucio@hmc.edu'] + testing_emails = ['thomas.kidd@okstate.edu'] + + if (args.send_email == "yes" or args.send_email == "y"): + test_runner.send_email(sender_email=sender_email, receiver_emails=receiver_emails) + if (args.send_email == "test"): + test_runner.send_email(sender_email=sender_email, receiver_emails=testing_emails) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/bin/wrapper_nightly_runs.sh b/bin/wrapper_nightly_runs.sh index e6d198fcd..91e57d091 100755 --- a/bin/wrapper_nightly_runs.sh +++ b/bin/wrapper_nightly_runs.sh @@ -24,5 +24,5 @@ source ./setup_host.sh >> $LOG 2>&1 cd $PYTHON_SCRIPT pwd echo "Running python file" -python nightly_build.py >> $LOG 2>&1 +python nightly_build.py --path "nightly-runs" --repository "https://github.com/openhwgroup/cvw" --target "all" --send_email "yes" >> $LOG 2>&1 echo "Finished" From 0caed8f8c6c335a0d61564c5fa9225e6f2264879 Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Sat, 9 Mar 2024 19:23:24 -0600 Subject: [PATCH 15/20] undo changing TIMEOUT duration for regression-wally --- bin/nightly_build.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/nightly_build.py b/bin/nightly_build.py index ff92704d0..fdb04e9d5 100755 --- a/bin/nightly_build.py +++ b/bin/nightly_build.py @@ -662,10 +662,10 @@ def main(): The goal of this section is to replace the TIMEOUT_DUR for regression tests. """ - if test_runner.change_time_dur(time_duriation=2): - pass - else: - print("Error occured changing the TIMEOUT duration in './regression-wally'") + # if test_runner.change_time_dur(time_duriation=2): + # pass + # else: + # print("Error occured changing the TIMEOUT duration in './regression-wally'") ############################################# # MAKE TESTS # From c8df291d488a9f08bb22c6fbf2d2b947d9483927 Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Sat, 9 Mar 2024 14:21:04 -0600 Subject: [PATCH 16/20] updated nightly runs with try statement in email sending --- bin/nightly_build.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bin/nightly_build.py b/bin/nightly_build.py index fdb04e9d5..63c7a9808 100755 --- a/bin/nightly_build.py +++ b/bin/nightly_build.py @@ -37,6 +37,7 @@ Usage: Our cron job has the following syntax: 0 3 * * * BASH_ENV=~/.bashrc bash -l -c "*WHERE YOUR CVW IS MUST PUT FULL PATH*/cvw/bin/wrapper_nightly-runs.sh > *WHERE YOU WANT TO STORE LOG FILES/cron.log 2>&1" + 0 3 * * * BASH_ENV=~/.bashrc bash -l -c "*WHERE YOUR CVW IS MUST PUT FULL PATH*/cvw/bin/wrapper_nightly-runs.sh > *WHERE YOU WANT TO STORE LOG FILES/cron.log 2>&1" This cronjob sources the .bashrc file and executes the wrapper script as a user. @@ -101,6 +102,8 @@ class FolderManager: These folders are: nightly-runs/repos/ nightly-runs/results/ + nightly-runs/repos/ + nightly-runs/results/ Args: folders (list): A list of folder names to be created. @@ -175,6 +178,7 @@ class TestRunner: Args: folder: the "nightly-runs/repos/" + folder: the "nightly-runs/repos/" Returns: bool: True if the script is copied successfully, False otherwise. From 7f65718297d6734200fb5aa613075f738f1db0e8 Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Sat, 9 Mar 2024 20:18:55 -0600 Subject: [PATCH 17/20] fixed documentation typo --- bin/nightly_build.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/nightly_build.py b/bin/nightly_build.py index 63c7a9808..38cfa74fc 100755 --- a/bin/nightly_build.py +++ b/bin/nightly_build.py @@ -36,8 +36,7 @@ Usage: 4) Syntax: Our cron job has the following syntax: - 0 3 * * * BASH_ENV=~/.bashrc bash -l -c "*WHERE YOUR CVW IS MUST PUT FULL PATH*/cvw/bin/wrapper_nightly-runs.sh > *WHERE YOU WANT TO STORE LOG FILES/cron.log 2>&1" - 0 3 * * * BASH_ENV=~/.bashrc bash -l -c "*WHERE YOUR CVW IS MUST PUT FULL PATH*/cvw/bin/wrapper_nightly-runs.sh > *WHERE YOU WANT TO STORE LOG FILES/cron.log 2>&1" + 0 3 * * * BASH_ENV=~/.bashrc bash -l -c "*WHERE YOUR CVW IS MUST PUT FULL PATH*/cvw/bin/wrapper_nightly_runs.sh > *WHERE YOU WANT TO STORE LOG FILES/cron.log 2>&1" This cronjob sources the .bashrc file and executes the wrapper script as a user. From e568f931878151740f29b8abaea83426accdaa4b Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Wed, 13 Mar 2024 13:56:05 -0500 Subject: [PATCH 18/20] typo fixes --- bin/nightly_build.py | 173 ++++++++++++++++++++++++++----------------- 1 file changed, 105 insertions(+), 68 deletions(-) diff --git a/bin/nightly_build.py b/bin/nightly_build.py index 38cfa74fc..c4cbc0e77 100755 --- a/bin/nightly_build.py +++ b/bin/nightly_build.py @@ -55,6 +55,7 @@ Dependencies: - markdown - subprocess - argparse + - logging Bash: - mutt (email sender) @@ -71,7 +72,30 @@ import re import markdown import subprocess import argparse +import logging +# Logger + +# Set up the logger +logger = logging.getLogger(__name__) +logger.setLevel(logging.DEBUG) + +# Create a file handler +file_handler = logging.FileHandler('../../logs/nightly_build.log') +file_handler.setLevel(logging.DEBUG) + +# Create a console handler +console_handler = logging.StreamHandler() +console_handler.setLevel(logging.INFO) + +# Create a formatter and add it to the handlers +formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') +file_handler.setFormatter(formatter) +console_handler.setFormatter(formatter) + +# Add the handlers to the logger +logger.addHandler(file_handler) +logger.addHandler(console_handler) @@ -86,14 +110,12 @@ class FolderManager: base_dir (str): The base directory where folders will be managed and repository will be cloned. """ env_extract_var = 'WALLY' - # print(f"The environemntal variable is {env_extract_var}") self.base_dir = os.environ.get(env_extract_var) - print(f"The base directory is: {self.base_dir}") self.base_parent_dir = os.path.dirname(self.base_dir) - # print(f"The new WALLY vairable is: {os.environ.get('WALLY')}") - # print(f"The Base Directory is now : {self.base_dir}") - # print(f"The Base Parent Directory is now : {self.base_parent_dir}") + logger.info(f"Base directory: {self.base_dir}") + logger.info(f"Parent Base directory: {self.base_parent_dir}") + def create_preliminary_folders(self, folders): """ @@ -115,6 +137,7 @@ class FolderManager: folder_path = os.path.join(self.base_parent_dir, folder) if not os.path.exists(folder_path): os.makedirs(folder_path) + logger.info(f"Preliminary folders created: {folders}") def create_new_folder(self, folders): """ @@ -136,7 +159,7 @@ class FolderManager: return_folder_path.append(folder_path) else: return_folder_path.append(None) # Folder already exists - + logger.info(f"New folder created. Path: {folder_path}") return return_folder_path def clone_repository(self, folder, repo_url): @@ -157,7 +180,8 @@ class FolderManager: os.makedirs(repo_folder) os.system(f"git clone --recurse-submodules {repo_url} {repo_folder}") os.makedirs(tmp_folder) - + + logger.info(f"Repository cloned: {repo_url}") class TestRunner: """A class for making, running, and formatting test results.""" @@ -166,8 +190,9 @@ class TestRunner: self.base_dir = os.environ.get('WALLY') self.base_parent_dir = os.path.dirname(self.base_dir) self.current_datetime = datetime.now() - #self.temp_dir = self.base_parent_dir - #print(f"Base Directory: {self.base_parent_dir}") + + logger.info("Test runner object is initialized") + def copy_setup_script(self, folder): """ @@ -180,7 +205,7 @@ class TestRunner: folder: the "nightly-runs/repos/" Returns: - bool: True if the script is copied successfully, False otherwise. + bool: True if the script is copied successfuly, False otherwise. """ # Get today's date in YYYY-MM-DD format todays_date = datetime.now().strftime("%Y-%m-%d") @@ -191,22 +216,22 @@ class TestRunner: # Check if the source script exists if not os.path.exists(source_script): - print(f"Error: Source script '{source_script}' not found.") + logger.error(f"Error: Source script '{source_script}' not found.") return False # Check if the destination folder exists, create it if necessary if not os.path.exists(destination_folder): - print(f"Error: Destination folder '{destination_folder}' not found.") + logger.error(f"Error: Destination folder '{destination_folder}' not found.") return False # Copy the script to the destination folder try: shutil.copy(source_script, destination_folder) - #print(f"Setup script copied to: {destination_folder}") + logger.info(f"Setup script copied to: {destination_folder}") return True except Exception as e: - print(f"Error copying setup script: {e}") + logger.error(f"Error copying setup script: {e}") return False @@ -231,15 +256,19 @@ class TestRunner: self.base_parent_dir = os.path.dirname(self.base_dir) self.temp_dir = self.base_parent_dir - # print(f"The new WALLY vairable is: {os.environ.get('WALLY')}") - # print(f"The Base Directory is now : {self.base_dir}") - # print(f"The Base Parent Directory is now : {self.base_parent_dir}") + logger.info(f"Tests are going to be ran from: {self.base_dir}") + logger.info(f"WALLY environmental variable is: {os.environ.get('WALLY')}") + def change_time_dur(self, time_duriation=1): # Prepare the command to execute the Makefile make_file_path = os.path.join(self.base_dir, "sim") - os.chdir(make_file_path) + logger.info(f"Make file path is set to: {make_file_path}") + try: + os.chdir(make_file_path) + except Exception as e: + logger.error(f"Error nagivating to the make file path. Error: {e}") file_path = "regression-wally" line_number = 450 # TIMEOUT_DUR = 1 day at this line in regression-wally new_line = f" TIMEOUT_DUR = {60*time_duriation}" @@ -248,13 +277,14 @@ class TestRunner: lines = file.readlines() if line_number < 1 or line_number > len(lines): - print("Error: Line number out of range.") + logger.error("Error: Line number out of range.") return False lines[line_number - 1] = new_line + '\n' with open(file_path, 'w') as file: file.writelines(lines) + logger.info(f"Timeduration in ./regression-wally has been changed to: {time_duriation*60} seconds") return True def execute_makefile(self, target=None): @@ -280,7 +310,9 @@ class TestRunner: # Add target to the command if specified if target: command.append(target) - #print(f"The command is: {command}") + logger.info(f"Command used: {command[0]} {command[1]}") + else: + logger.info(f"Command used: {command[0]}") # Execute the command using subprocess and save the output into a file with open(output_file, "w") as f: @@ -294,10 +326,10 @@ class TestRunner: # Check the result if result.returncode == 0: - #print(f"Makefile executed successfully{' with target ' + target if target else ''}.") + logger.info(f"Tests have been made with tag target: {target}") return True else: - #print("Error executing Makefile.") + logger.error(f"Error making the tests. Target: {target}") return False def run_tests(self, test_type=None, test_name=None, test_exctention=None): @@ -319,8 +351,11 @@ class TestRunner: if test_exctention: command = [test_type, test_name, test_exctention] + logger.info(f"Command used to run tests: {test_type} {test_name} -{test_exctention}") else: command = [test_type, test_name] + logger.info(f"Command used to run tests: {test_type} {test_name}") + # Execute the command using subprocess and save the output into a file with open(output_file, "w") as f: @@ -329,11 +364,12 @@ class TestRunner: f.write("\n\n") result = subprocess.run(command, stdout=f, stderr=subprocess.STDOUT, text=True) - # Check if the command executed successfully + # Check if the command executed successfuly if result.returncode or result.returncode == 0: + logger.info(f"Test ran successfuly. Test type: {test_type}, test name: {test_name}, test extention: {test_exctention}") return True, output_file else: - print("Error:", result.returncode) + logger.error(f"Error making test. Test type: {test_type}, test name: {test_name}, test extention: {test_exctention}") return False, output_file @@ -364,11 +400,10 @@ class TestRunner: while index < len(lines): # Remove ANSI escape codes line = re.sub(r'\x1b\[[0-9;]*[mGK]', '', lines[index]) - #print(line) + if "Success" in line: passed_configs.append(line.split(':')[0].strip()) elif "passed lint" in line: - #print(line) passed_configs.append(line.split(' ')[0].strip()) #passed_configs.append(line) # potentially use a space elif "failed lint" in line: @@ -379,7 +414,6 @@ class TestRunner: try: config_name = line.split(':')[0].strip() log_file = os.path.abspath("logs/"+config_name+".log") - #print(f"The log file saving to: {log_file} in the current working directory: {os.getcwd()}") failed_configs.append((config_name, log_file)) except: failed_configs.append((config_name, "Log file not found")) @@ -393,8 +427,7 @@ class TestRunner: if len(failed_configs) != 0: failed_configs.sort() - #print(f"The passed configs are: {passed_configs}") - #print(f"The failed configs are {failed_configs}") + logger.info(f"Cleaned test results. Passed configs {passed_configs}. Failed configs: {failed_configs}") return passed_configs, failed_configs def rewrite_to_markdown(self, test_name, passed_configs, failed_configs): @@ -415,8 +448,7 @@ class TestRunner: os.chdir(output_directory) current_directory = os.getcwd() output_file = os.path.join(current_directory, f"{test_name}.md") - #print("Current directory:", current_directory) - #print("Output File:", output_file) + with open(output_file, 'w') as md_file: @@ -440,7 +472,9 @@ class TestRunner: for config in passed_configs: md_file.write(f"- {config}\n") - def combine_markdown_files(self, passed_tests, failed_tests, test_list, total_number_failures, total_number_success, test_type="default", markdown_file=None): + logger.info("writing test outputs to markdown") + + def combine_markdown_files(self, passed_tests, failed_tests, test_list, total_number_failures, total_number_success, test_type="default", markdown_file=None, args=None): """ First we want to display the server properties like: - Server full name @@ -479,6 +513,9 @@ class TestRunner: os_info = subprocess.check_output(['uname', '-a']).strip().decode('utf-8') md_file.write(f"\n**Operating System Information:** {os_info}") md_file.write("\n") + + md_file.write(f"\n**Command used to execute test:** python nightly_build.py --path {args.path} --repository {args.repository} --target {args.target} --send_email {args.send_email}") + md_file.write("\n") except subprocess.CalledProcessError as e: # Handle if the command fails md_file.write(f"Failed to identify host and Operating System information: {str(e)}") @@ -516,17 +553,17 @@ class TestRunner: md_file.write("\n") md_file.write(f"* {config} ({log_file})\n") md_file.write("\n") - # Successfull Tests + # Successful Tests - md_file.write(f"\n\n## Successfull Tests") - md_file.write(f"\n**Total successfull tests: {total_number_success}**") + md_file.write(f"\n\n## Successful Tests") + md_file.write(f"\n**Total successful tests: {total_number_success}**") for (test_item, item) in zip(test_list, passed_tests): md_file.write(f"\n\n### {test_item[1]} test") md_file.write(f"\n**General Information**\n") md_file.write(f"\n* Test type: {test_item[0]}") md_file.write(f"\n* Test name: {test_item[1]}") md_file.write(f"\n* Test extension: {test_item[2]}\n\n") - md_file.write(f"\n**Successfull Tests:**\n") + md_file.write(f"\n**Successful Tests:**\n") @@ -542,7 +579,8 @@ class TestRunner: md_file.write(f"* {config}\n") md_file.write("\n") - + logger.info("Combining markdown files") + def convert_to_html(self, markdown_file="results.md", html_file="results.html"): """ @@ -567,7 +605,7 @@ class TestRunner: with open(html_file, 'w') as html_file: html_file.write(html_content) - + logger.info("Converting markdown file to html file.") def send_email(self, sender_email=None, receiver_emails=None, subject="Nightly Regression Test"): """ @@ -585,7 +623,7 @@ class TestRunner: # check if there are any emails if not receiver_emails: - print("No receiver emails provided.") + logger.ERROR("No receiver emails provided.") return # grab thge html file todays_date = self.current_datetime.strftime("%Y-%m-%d") @@ -613,10 +651,11 @@ class TestRunner: process = subprocess.Popen(command, stdin=subprocess.PIPE) # Write the email body to the subprocess process.communicate(body.encode('utf-8')) + logger.info("Sent email") except expression as identifier: - print(f"Error sending email: {identifier}") + logger.error(f"Error sending email with error: {identifier}") except expression as identifier: - print(f"Error sending email: {identifier}") + logger.error(f"Error sending email with error: {identifier}") @@ -634,6 +673,11 @@ def main(): args = parser.parse_args() + logger.info(f"path: {args.path}") + logger.info(f"repository: {args.repository}") + logger.info(f"target: {args.target}") + logger.info(f"send_email: {args.send_email}") + # file paths for where the results and repos will be saved: repos and results can be changed to whatever repos_path = f"{args.path}/repos/" results_path = f"{args.path}/results/" @@ -649,9 +693,7 @@ def main(): # clone the cvw repo folder_manager.clone_repository(repos_path, args.repository) - ############################################# - # SETUP # - ############################################# + test_runner = TestRunner() # creates the object test_runner.set_env_var(repos_path) # ensures that the new WALLY environmental variable is set correctly @@ -665,19 +707,18 @@ def main(): The goal of this section is to replace the TIMEOUT_DUR for regression tests. """ - # if test_runner.change_time_dur(time_duriation=2): - # pass - # else: - # print("Error occured changing the TIMEOUT duration in './regression-wally'") + if test_runner.change_time_dur(time_duriation=2): + pass + else: + logger.error("Error occured changing the TIMEOUT duration in './regression-wally'") ############################################# # MAKE TESTS # ############################################# - - - # if test_runner.execute_makefile(target = args.target): - # print(f"The {args.target} tests were made successfully") + if args.target != "no": + # test_runner.execute_makefile(target = "deriv") + test_runner.execute_makefile(target = args.target) ############################################# # RUN TESTS # @@ -693,34 +734,30 @@ def main(): total_success = [] for test_type, test_name, test_exctention in test_list: - print("--------------------------------------------------------------") - print(f"Test type: {test_type}") - print(f"Test name: {test_name}") - print(f"Test extention: {test_exctention}") - + check, output_location = test_runner.run_tests(test_type=test_type, test_name=test_name, test_exctention=test_exctention) - print(f"Did the tests run?: {check}") - print(f"The tests log files are saved to: {output_location}") - if check: # this checks if the test actually ran successfully + + if check: # this checks if the test actually ran successfuly output_log_list.append(output_location) - + logger.info(f"{test_name} ran successfuly. Output location: {output_location}") # format tests to markdown try: passed, failed = test_runner.clean_format_output(input_file = output_location) + logger.info(f"{test_name} has been formatted to markdown") except: - print("There was an error cleaning the data") + logger.ERROR(f"Error occured with formatting {test_name}") - print(f"The # of failures are for {test_name}: {len(failed)}") + logger.info(f"The # of failures are for {test_name}: {len(failed)}") total_number_failures+= len(failed) total_failures.append(failed) - print(f"The # of sucesses are for {test_name}: {len(passed)}") + logger.info(f"The # of sucesses are for {test_name}: {len(passed)}") total_number_success += len(passed) total_success.append(passed) test_runner.rewrite_to_markdown(test_name, passed, failed) - print(f"The total sucesses are: {total_number_success}") - print(f"The total failures are: {total_number_failures}") + logger.info(f"The total sucesses for all tests ran are: {total_number_success}") + logger.info(f"The total failures for all tests ran are: {total_number_failures}") @@ -733,7 +770,7 @@ def main(): # Combine multiple markdown files into one file - test_runner.combine_markdown_files(passed_tests = total_success, failed_tests = total_failures, test_list = test_list, total_number_failures = total_number_failures, total_number_success = total_number_success, test_type=args.target, markdown_file=None) + test_runner.combine_markdown_files(passed_tests = total_success, failed_tests = total_failures, test_list = test_list, total_number_failures = total_number_failures, total_number_success = total_number_success, test_type=args.target, markdown_file=None, args=args) ############################################# @@ -758,4 +795,4 @@ def main(): test_runner.send_email(sender_email=sender_email, receiver_emails=testing_emails) if __name__ == "__main__": - main() \ No newline at end of file + main() From 90026a5da15b7be23e0ca2f891eedd4eeca4347b Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Wed, 13 Mar 2024 22:13:20 -0500 Subject: [PATCH 19/20] showing commands used for executing each specific test --- bin/nightly_build.py | 74 ++++++++++++++++++++++---------------------- setup_host.sh | 58 ---------------------------------- 2 files changed, 37 insertions(+), 95 deletions(-) delete mode 100644 setup_host.sh diff --git a/bin/nightly_build.py b/bin/nightly_build.py index c4cbc0e77..a82c1496f 100755 --- a/bin/nightly_build.py +++ b/bin/nightly_build.py @@ -358,12 +358,14 @@ class TestRunner: # Execute the command using subprocess and save the output into a file - with open(output_file, "w") as f: - formatted_datetime = self.current_datetime.strftime("%Y-%m-%d %H:%M:%S") - f.write(formatted_datetime) - f.write("\n\n") - result = subprocess.run(command, stdout=f, stderr=subprocess.STDOUT, text=True) - + try: + with open(output_file, "w") as f: + formatted_datetime = self.current_datetime.strftime("%Y-%m-%d %H:%M:%S") + f.write(formatted_datetime) + f.write("\n\n") + result = subprocess.run(command, stdout=f, stderr=subprocess.STDOUT, text=True) + except Exception as e: + logger.error("There was an error in running the tests in the run_tests function: {e}") # Check if the command executed successfuly if result.returncode or result.returncode == 0: logger.info(f"Test ran successfuly. Test type: {test_type}, test name: {test_name}, test extention: {test_exctention}") @@ -530,13 +532,10 @@ class TestRunner: # Failed Tests md_file.write(f"\n\n## Failed Tests") - md_file.write(f"\nTotal failed tests: {total_number_failures}") + md_file.write(f"\n**Total failed tests: {total_number_failures}**") for (test_item, item) in zip(test_list, failed_tests): md_file.write(f"\n\n### {test_item[1]} test") - md_file.write(f"\n**General Information**\n") - md_file.write(f"\n* Test type: {test_item[0]}\n") - md_file.write(f"\n* Test name: {test_item[1]}\n") - md_file.write(f"\n* Test extension: {test_item[2]}\n\n") + md_file.write(f"\n**Command used:** {test_item[0]} {test_item[1]} {test_item[2]}\n\n") md_file.write(f"**Failed Tests:**\n") @@ -559,10 +558,7 @@ class TestRunner: md_file.write(f"\n**Total successful tests: {total_number_success}**") for (test_item, item) in zip(test_list, passed_tests): md_file.write(f"\n\n### {test_item[1]} test") - md_file.write(f"\n**General Information**\n") - md_file.write(f"\n* Test type: {test_item[0]}") - md_file.write(f"\n* Test name: {test_item[1]}") - md_file.write(f"\n* Test extension: {test_item[2]}\n\n") + md_file.write(f"\n**Command used:** {test_item[0]} {test_item[1]} {test_item[2]}\n\n") md_file.write(f"\n**Successful Tests:**\n") @@ -707,7 +703,7 @@ def main(): The goal of this section is to replace the TIMEOUT_DUR for regression tests. """ - if test_runner.change_time_dur(time_duriation=2): + if test_runner.change_time_dur(time_duriation=1): pass else: logger.error("Error occured changing the TIMEOUT duration in './regression-wally'") @@ -736,26 +732,29 @@ def main(): for test_type, test_name, test_exctention in test_list: check, output_location = test_runner.run_tests(test_type=test_type, test_name=test_name, test_exctention=test_exctention) + try: + if check: # this checks if the test actually ran successfuly + output_log_list.append(output_location) + logger.info(f"{test_name} ran successfuly. Output location: {output_location}") + # format tests to markdown + try: + passed, failed = test_runner.clean_format_output(input_file = output_location) + logger.info(f"{test_name} has been formatted to markdown") + except: + logger.ERROR(f"Error occured with formatting {test_name}") - if check: # this checks if the test actually ran successfuly - output_log_list.append(output_location) - logger.info(f"{test_name} ran successfuly. Output location: {output_location}") - # format tests to markdown - try: - passed, failed = test_runner.clean_format_output(input_file = output_location) - logger.info(f"{test_name} has been formatted to markdown") - except: - logger.ERROR(f"Error occured with formatting {test_name}") - - logger.info(f"The # of failures are for {test_name}: {len(failed)}") - total_number_failures+= len(failed) - total_failures.append(failed) - - logger.info(f"The # of sucesses are for {test_name}: {len(passed)}") - total_number_success += len(passed) - total_success.append(passed) - test_runner.rewrite_to_markdown(test_name, passed, failed) + logger.info(f"The # of failures are for {test_name}: {len(failed)}") + total_number_failures+= len(failed) + total_failures.append(failed) + + logger.info(f"The # of sucesses are for {test_name}: {len(passed)}") + total_number_success += len(passed) + total_success.append(passed) + test_runner.rewrite_to_markdown(test_name, passed, failed) + except Exception as e: + logger.error("There was an error in running the tests: {e}") + logger.info(f"The total sucesses for all tests ran are: {total_number_success}") logger.info(f"The total failures for all tests ran are: {total_number_failures}") @@ -769,9 +768,10 @@ def main(): ############################################# # Combine multiple markdown files into one file - - test_runner.combine_markdown_files(passed_tests = total_success, failed_tests = total_failures, test_list = test_list, total_number_failures = total_number_failures, total_number_success = total_number_success, test_type=args.target, markdown_file=None, args=args) - + try: + test_runner.combine_markdown_files(passed_tests = total_success, failed_tests = total_failures, test_list = test_list, total_number_failures = total_number_failures, total_number_success = total_number_success, test_type=args.target, markdown_file=None, args=args) + except Exception as e: + logger.error(f"Error combining the markdown tests called from main: {e}") ############################################# # WRITE MD TESTS # diff --git a/setup_host.sh b/setup_host.sh deleted file mode 100644 index 55854a71c..000000000 --- a/setup_host.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -# setup.sh -# David_Harris@hmc.edu and kekim@hmc.edu 1 December 2021 -# Set up tools for riscv-wally - -echo "Executing Wally setup.sh" - -# Path to Wally repository -WALLY=$(dirname ${BASH_SOURCE[0]:-$0}) -export WALLY=$(cd "$WALLY" && pwd) -echo \$WALLY set to ${WALLY} - -# Path to RISC-V Tools -export RISCV=/opt/riscv # change this if you installed the tools in a different location - -# Tools -# GCC -#export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$RISCV/riscv-gnu-toolchain/lib:$RISCV/riscv-gnu-toolchain/riscv64-unknown-elf/lib -#export PATH=$PATH:$RISCV/riscv-gnu-toolchain/bin:$RISCV/riscv-gnu-toolchain/riscv64-unknown-elf/bin # GCC tools -# Spike -#export LD_LIBRARY_PATH=$RISCV/lib:$LD_LIBRARY_PATH -export PATH=$PATH:$RISCV/bin -# utility functions in Wally repository -export PATH=$WALLY/bin:$PATH -# Verilator -export PATH=$RICKV/verilator:$PATH # Change this for your path to Verilator -# ModelSim/Questa (vsim) -# Note: 2022.1 complains on cache/sram1p1r1w about StoredData cannot be driven by multiple always_ff blocks. Ues 2021.2 for now -#export PATH=/cad/mentor/questa_sim-2022.1_1/questasim/bin:$PATH # Change this for your path to Modelsim -#export PATH=/cad/mentor/questa_sim-2021.2_1/questasim/bin:$PATH # Change this for your path to Modelsim, or delete -#export MGLS_LICENSE_FILE=1717@solidworks.eng.hmc.edu # Change this to your Siemens license server -#export PATH=/cad/synopsys/SYN/bin:$PATH # Change this for your path to Design Compiler -#export SNPSLMD_LICENSE_FILE=27020@134.173.38.184 # Change this to your license manager file - -# Imperas; put this in if you are using it -#export PATH=$RISCV/imperas-riscv-tests/riscv-ovpsim-plus/bin/Linux64:$PATH -#export LD_LIBRARY_PATH=$RISCV/imperas_riscv_tests/riscv-ovpsim-plus/bin/Linux64:$LD_LIBRARY_PATH # remove if no imperas - -export MODSIM=/opt/ModelSim/questasim -export PATH=$PATH:$MODSIM/bin -export LD_LIBRARY_PATH=/usr/lib:/lib -export MGC_DOC_PATH=$MODSIM/docs -export MGC_PDF_READER=evince -export MGC_HTML_BROWSER=firefox -export MGLS_LICENSE_FILE=1717@trelaina.ecen.okstate.edu -export IMPERASD_LICENSE_FILE=2700@trelaina.ecen.okstate.edu - -export IDV=$RISCV/ImperasDV-OpenHW -if [ -e "$IDV" ]; then -# echo "Imperas exists" - export IMPERAS_HOME=$IDV/Imperas - export IMPERAS_PERSONALITY=CPUMAN_DV_ASYNC - export ROOTDIR=${WALLY}/.. - source ${IMPERAS_HOME}/bin/setup.sh - setupImperas ${IMPERAS_HOME} - export PATH=$IDV/scripts/cvw:$PATH -fi From 7ed2d0c13d0b8d34387544cd90b8ff52b3c59c27 Mon Sep 17 00:00:00 2001 From: Thomas Kidd Date: Wed, 13 Mar 2024 22:40:00 -0500 Subject: [PATCH 20/20] typo fix for displaying commands in email --- bin/nightly_build.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nightly_build.py b/bin/nightly_build.py index a82c1496f..8f806d119 100755 --- a/bin/nightly_build.py +++ b/bin/nightly_build.py @@ -351,7 +351,7 @@ class TestRunner: if test_exctention: command = [test_type, test_name, test_exctention] - logger.info(f"Command used to run tests: {test_type} {test_name} -{test_exctention}") + logger.info(f"Command used to run tests: {test_type} {test_name} {test_exctention}") else: command = [test_type, test_name] logger.info(f"Command used to run tests: {test_type} {test_name}")