上传所有文件

This commit is contained in:
zice6688
2026-03-30 16:46:48 +08:00
parent 8c2008c738
commit 35c99bac58
110 changed files with 23243 additions and 0 deletions

0
utils/__init__.py Normal file
View File

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

54
utils/json_utils.py Normal file
View File

@@ -0,0 +1,54 @@
"""
Description : This file is used to handle json files
Author : Ruidi Qiu (ruidi.qiu@tum.de)
Time : 2023/11/19 21:49:11
LastEdited :
"""
import json
PROMPT_JSON = "preliminary_EXP/7420/prompt.json"
OUTPUT_TXT = "generated_prompt.txt"
"""
prompt.json template:
{
"description" : "The 7400-series integrated circuits are a series of digital chips with a few gates each. The 7420 is a chip with two 4-input NAND gates. Create a module with the same functionality as the 7420 chip. It has 8 inputs and 2 outputs.",
"headmodule" : "module top_module (\ninput p1a, p1b, p1c, p1d,\noutput p1y,\ninput p2a, p2b, p2c, p2d,\noutput p2y );\nendmodule",
"tb_property" : {"composition" : "this module is composed of 2 4-input NAND gates", "test case 1" : "NAND gate will output 1 if all inputs are 0, otherwise it will output 0"},
"rules" : ["Attention! you should print a message after each test case. The message should contain 'the explanation of your test case' and 'error source'", "Attention! Your test cases should be as exhaustive as possible.", "your response should only contain the code"]
}
"""
def json_read(filename):
with open(filename, 'r') as f:
data = json.load(f)
return data
def txt_write(filename, content):
with open(filename, 'w') as f:
f.write(content)
def prompt_gen_from_jsonprompt(json_data):
prompt_header = "You are the strongest AI agent I have ever met. You can perfect handle the job I give you. please generate a verilog testbench to test the verilog code of the design under test (DUT).\n"
prompt_description = "The description for the DUT is: '%s'\n" % (json_data["description"])
prompt_headmodule = "The input and output interface of this verilog code is: \n%s\n" % (json_data["headmodule"])
prompt_rules = "The rules for this task are:\n"
for rule in json_data["rules"]:
prompt_rules += " %s\n" % (rule)
prompt_property = "to help you better generate the testbench for the DUT, we will give you some tips that you should consider when generating the testbench.\n"
prompt_property += "The composition of the DUT is '%s'\n" % (json_data["tb_property"]["composition"])
for key in json_data["tb_property"].keys():
if key != "composition":
prompt_property += " %s: %s\n" % (key, json_data["tb_property"][key])
prompt = prompt_header + prompt_description + prompt_headmodule + prompt_rules + prompt_property
return prompt
def main():
json_file = PROMPT_JSON
output_txt = OUTPUT_TXT
json_data = json_read(json_file)
prompt = prompt_gen_from_jsonprompt(json_data)
txt_write(output_txt, prompt)
if __name__ == "__main__":
main()

45
utils/subproc.py Normal file
View File

@@ -0,0 +1,45 @@
"""
Description : This file is related to auto subprocess running
Author : Ruidi Qiu (r.qiu@tum.de)
Time : 2023/12/11 14:06:27
LastEdited : 2024/4/28 13:26:18
"""
import subprocess as sp
def subproc_call(cmd, timeout=120):
"""
run a cmd in shell and return the output and error
#### input:
- cmd: str
- timeout: int, seconds
#### output:
- {"out": out_reg, "err": err_reg, "haserror": error_exist}
- out_reg: str, output of cmd
- err_reg: str, error of cmd
- error_exist: int, 0 if no error, 1 if error
cmd can at most run 2 minutes and if it exceeds, will return {"out": "timeout", "err": "program is timeout", "haserror": 1}
"""
# p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
# out, err = p.communicate()
# error_exist = p.returncode
# out_reg = out.decode("utf-8")
# err_reg = err.decode("utf-8")
timeouterror = "program is timeout (time > %ds). please check your code. Hints: there might be some infinite loop, please check all the loops in your programm. If it is a verilog code, please check if there is a $finish in the code."%(timeout)
p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
out_reg = ""
err_reg = ""
error_exist = 0
try:
out, err = p.communicate(timeout=timeout)
out_reg = out.decode("utf-8")
err_reg = err.decode("utf-8")
error_exist = p.returncode
except sp.TimeoutExpired:
p.kill()
out_reg = ""
err_reg = timeouterror
error_exist = 1
return {"out": out_reg, "err": err_reg, "haserror": error_exist}

269
utils/utils.py Normal file
View File

@@ -0,0 +1,269 @@
"""
Description : some utils for project
Author : Ruidi Qiu (ruidi.qiu@tum.de)
Time : 2022/11/08 13:00:00
LastEdited : 2024/9/3 17:33:26
"""
import time
import datetime
import collections
import os
import tiktoken
import threading
from functools import wraps
from itertools import repeat
from datetime import datetime, timedelta
def str_list(list, precision=4) -> str:
"""
convert a list of string/number to a string;
to show the list in the way what we see it in the code
if string, add '' around it; if number, do nothing
Example:
::
str_list(['a', 2, '3']) -> "['a', 2, '3']"
"""
if len(list) == 0:
return '[]'
str_list = '['
for i in list:
if isinstance(i, str):
str_list += "'%s', " % (i)
elif isinstance(i, int): # number
str_list += "%d, " % (i)
else: # number
str_list += "%.*f, " % (precision, i)
str_list = str_list[:-2] + ']'
return str_list
###################### decorators ######################
def print_time(en=True):
"""
print the running time of a function
For example:
::
@print_time()
def forward(self, input):
return self.top_level(input)
"""
def decorator_nopara(func):
def wrapper(*args, **kwargs):
if en:
old_time = time.time()
result = func(*args, **kwargs)
func_name = str(func).split(' ')[1]
run_time = time.time() - old_time
print('{} use time: {}s'.format(func_name, run_time))
else:
result = func(*args, **kwargs)
return result
return wrapper
return decorator_nopara
def raise_error(func):
"""
decorator
raise error after a function
"""
def wrapper(*args, **kwargs):
func(*args, **kwargs)
raise Exception('this error is raised by debug decorator "raise_error"')
return wrapper
##########################################################
class Timer:
"""
print the running time of a code block
Args:
- code_name (str): the name of the code block; default: None
- print_en (bool): whether to print the running time; default: True
Example 1 (print time on the console):
::
with Timer('test') as t:
loss.backward() # some code
# this will print 'test: time cost = 0.1s' on the console
Example 2 (get time of a code block):
::
with Timer(print_en=False) as t:
loss.backward() # some code
time_cost = t.interval # time_cost = 0.1
"""
def __init__(self, code_name=None, print_en=True):
self.code_name = code_name
self.print_en = print_en
def __enter__(self):
self.start = time.time()
return self
def __exit__(self , *args):
self.end = time.time()
self.interval_time = self.end - self.start
print_line = 'time cost = %.4fs'%(self.interval_time)
if self.code_name is not None:
print_line = self.code_name + ': ' + print_line
if self.print_en:
print(print_line)
self.print_line = print_line
@property
def interval(self):
return self.interval_time
@property
def name(self):
return self.code_name
@property
def info(self):
return self.print_line
@property
def message(self):
return self.print_line
def get_time(compact=False):
"""
get the string of current time, format: '%H:%M:%S %Y-%m-%d'
"""
if compact:
return get_time_compact()
else:
return time.strftime('%H:%M:%S %Y-%m-%d', time.localtime(time.time()))
def get_time_compact():
now = datetime.now()
time_str = now.strftime("%Y%m%d_%H%M%S")
return time_str
class run_in_dir:
"""
change the current directory to a new directory, and then change it back after the code block
Args:
dir (str): the new directory (relative path to the current directory)
"""
def __init__(self, dir):
self.new_dir_relative = dir
def __enter__(self):
self.old_dir = os.getcwd()
self.new_dir = os.path.join(self.old_dir, self.new_dir_relative)
os.chdir(self.new_dir)
def __exit__(self, *args):
os.chdir(self.old_dir)
################# utils from pytorch ###############
def _ntuple(n, name="parse"):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return tuple(x)
return tuple(repeat(x, n))
parse.__name__ = name
return parse
_single = _ntuple(1, "_single")
_pair = _ntuple(2, "_pair")
_triple = _ntuple(3, "_triple")
_quadruple = _ntuple(4, "_quadruple")
################# some tools #################
def clean_wave_vcd(clean_dir, cnt_en=False):
"""
remove all the "wave.vcd" files in the directory
"""
cnt = 0
for root, dirs, files in os.walk(clean_dir):
for file in files:
# must be strictly equal to "wave.vcd"
if file == "wave.vcd":
os.remove(os.path.join(root, file))
if cnt_en:
cnt += 1
if cnt % 100 == 0:
print("%d files cleaned" % (cnt))
def get_week_range(start_day:str|int="Monday", today=None)->str:
"""
- function:
- return the week range of the current week, the start day can be any day of the week
- for example, if today is 20240807, which is wednesday, if the start_day is "Monday", the output will be "0805~0811"; if the start day is "Tuesday", the output will be "0806~0812"; if the start day is "Thursday", the output will be "0801~0807"
- input:
- start_day: the start day of the week, can be a string or an integer
- string: the name of the day, for example, "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"
- integer: the index of the day, 0 is Monday, 1 is Tuesday, 2 is Wednesday, 3 is Thursday, 4 is Friday, 5 is Saturday, 6 is Sunday, invalid index will be mod 7
- today: the date of the day, if None, the current date will be used;
- formart: "%Y%m%d", e.g. "20240807"
"""
weekday_map = {"Monday": 0, "Tuesday": 1, "Wednesday": 2, "Thursday": 3, "Friday": 4, "Saturday": 5, "Sunday": 6}
start_day = weekday_map[start_day] if isinstance(start_day, str) else start_day % 7
# Get the current date
# today = datetime.today()
if today is None:
today = datetime.today()
else:
today = datetime.strptime(today, "%Y%m%d")
# Calculate the current day of the week (0 is Monday, 6 is Sunday)
current_weekday = today.weekday()
# Calculate the number of days to subtract to get to the start day
days_to_subtract = (current_weekday - start_day) % 7
# Subtract the days to get to the start day
start = today - timedelta(days=days_to_subtract)
# the output format is like "0805~0811"
end = start + timedelta(days=6)
return start.strftime("%m%d") + "~" + end.strftime("%m%d")
def run_with_timeout(timeout):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
# Define a thread target function to run the target code
def target(result):
try:
result.append(func(*args, **kwargs))
except Exception as e:
result.append(e)
# List used to store the function result
result = []
# Create a thread
thread = threading.Thread(target=target, args=(result,))
# Start the thread
thread.start()
# Wait for the thread to complete, with a specified timeout
thread.join(timeout)
# If the thread is still alive, it timed out
if thread.is_alive():
raise TimeoutError(f"Function call timed out after {timeout} seconds")
# If the thread finished, check if there was an exception
if isinstance(result[0], Exception):
raise result[0]
# Return the function result
return result[0]
return wrapper
return decorator

406
utils/verilator_call.py Normal file
View File

@@ -0,0 +1,406 @@
# """
# Description : Verilator wrapper for CGA (Coverage-Guided Agent)
# Author : CorrectBench Integration
# """
# import os
# import sys
# # === [Path Auto-Configuration] ===
# # 获取当前脚本的目录
# script_dir = os.path.dirname(os.path.abspath(__file__))
# # 智能判断项目根目录:
# # 如果当前目录下有 loader_saver.py说明我们就在根目录
# if os.path.exists(os.path.join(script_dir, "loader_saver.py")):
# project_root = script_dir
# in_utils_folder = False
# else:
# # 否则假设我们在 utils/ 子目录下,根目录在上级
# project_root = os.path.dirname(script_dir)
# in_utils_folder = True
# # 1. 确保项目根目录在 sys.path 中 (以便能 import utils, config 等)
# if project_root not in sys.path:
# sys.path.insert(0, project_root)
# # 2. 只有当我们确实在 utils/ 子目录下运行时,才需要移除 script_dir
# # 这样可以避免 "import utils" 错误地导入了当前目录而不是 utils 包
# if in_utils_folder and script_dir in sys.path:
# try:
# sys.path.remove(script_dir)
# except ValueError:
# pass
# # =================================
# from utils.utils import run_in_dir
# from utils.subproc import subproc_call
# from loader_saver import autologger as logger
# # 假设 Verilator 在系统 PATH 中
# VERILATOR_BIN = "verilator"
# COVERAGE_BIN = "verilator_coverage"
# def verilator_run_coverage(run_dir, dut_file="DUT.v", tb_file="driver.v", top_module="top_module", timeout=120):
# """
# 运行 Verilator 仿真流程:编译 -> 运行 -> 生成覆盖率 -> 标注
# 返回: [success, coverage_score, annotated_file_path]
# """
# # 1. 编译
# # cmd_compile = f"{VERILATOR_BIN} --binary -j 0 --coverage {dut_file} {tb_file} --top-module {top_module}"
# cmd_compile = f"{VERILATOR_BIN} --binary -j 0 --coverage -Wno-TIMESCALEMOD -Wno-fatal {dut_file} {tb_file} --top-module {top_module}"
# # 2. 运行
# cmd_run = f"./obj_dir/V{top_module}"
# # 3. 标注
# cmd_annotate = f"{COVERAGE_BIN} --annotate logs/annotated logs/coverage.dat"
# with run_in_dir(run_dir):
# # Step 1: Compile
# res = subproc_call(cmd_compile, timeout)
# if res["haserror"]:
# logger.error(f"Verilator Compile Failed: {res['err']}")
# return False, 0.0, None
# # Step 2: Run
# res = subproc_call(cmd_run, timeout)
# if res["haserror"]:
# logger.warning(f"Verilator Run Output: {res['err']}")
# # Step 3: Annotate
# if not os.path.exists("logs"):
# os.makedirs("logs")
# res = subproc_call(cmd_annotate, timeout)
# if res["haserror"]:
# logger.error(f"Verilator Annotation Failed: {res['err']}")
# return False, 0.0, None
# # Step 4: Find Annotated File & Calculate Score
# annotated_dir = os.path.join(run_dir, "logs", "annotated")
# target_file = None
# if os.path.exists(annotated_dir):
# for f in os.listdir(annotated_dir):
# # 排除 driver/testbench只找 DUT
# if (os.path.basename(dut_file) in f or "DUT" in f) and "driver" not in f and "tb" not in f.lower():
# target_file = os.path.join(annotated_dir, f)
# break
# if not target_file:
# logger.error(f"Could not find annotated DUT file in {annotated_dir}")
# return False, 0.0, None
# score = _quick_calc_score(target_file)
# return True, score, target_file
# def _quick_calc_score(filepath):
# try:
# with open(filepath, 'r') as f:
# lines = f.readlines()
# total = 0
# covered = 0
# for line in lines:
# if line.startswith('%') or line.strip().startswith('#'):
# total += 1
# if not (line.startswith('%000000') or line.strip().startswith('#')):
# covered += 1
# return (covered / total * 100.0) if total > 0 else 0.0
# except Exception:
# return 0.0
# if __name__ == "__main__":
# print("--- Self-Test Mode: Initializing CorrectBench Environment ---")
# try:
# from config import Config
# from loader_saver import AutoLogger
# # 初始化配置,优先寻找 custom.yaml
# custom_cfg_path = os.path.join(project_root, "config/custom.yaml")
# if os.path.exists(custom_cfg_path):
# Config(custom_cfg_path)
# else:
# Config() # 使用默认配置
# # 启动日志
# AutoLogger()
# print("--- Environment Initialized. Starting Verilator Test ---")
# except Exception as e:
# print(f"Environment Init Failed: {e}")
# # 如果不是在 CorrectBench 环境下,可能无法继续
# sys.exit(1)
# # === 开始测试 ===
# if len(sys.argv) < 3:
# print("Usage: python3 verilator_call.py <run_dir> <dut_file> <tb_file>")
# print("Example: python3 verilator_call.py saves/lemmings4 prob_lemmings4.v final_TB.v")
# else:
# run_dir = sys.argv[1]
# dut = sys.argv[2]
# tb = sys.argv[3]
# success, score, path = verilator_run_coverage(run_dir, dut, tb)
# print(f"\n[Test Result]\nSuccess: {success}\nScore: {score:.2f}%\nAnnotated File: {path}")
"""
Description : Verilator wrapper for CGA - AUTO TOP-MODULE DETECTION
Author : CorrectBench Integration
"""
import os
import sys
import shutil
import re # 引入正则
# === [Path Auto-Configuration] ===
script_dir = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(script_dir, "loader_saver.py")):
project_root = script_dir
in_utils_folder = False
else:
project_root = os.path.dirname(script_dir)
in_utils_folder = True
if project_root not in sys.path:
sys.path.insert(0, project_root)
if in_utils_folder and script_dir in sys.path:
try:
sys.path.remove(script_dir)
except ValueError:
pass
# =================================
from utils.utils import run_in_dir
from utils.subproc import subproc_call
from loader_saver import autologger as logger
VERILATOR_BIN = "verilator"
COVERAGE_BIN = "verilator_coverage"
def get_module_name(file_path):
"""
从 Verilog 文件中解析 module name
"""
if not os.path.exists(file_path):
return None
try:
with open(file_path, 'r') as f:
content = f.read()
# 匹配: module 名字 (忽略前面的空白,甚至可能有参数)
# 简单匹配 module xxx; 或 module xxx (
match = re.search(r'^\s*module\s+(\w+)', content, re.MULTILINE)
if match:
return match.group(1)
except Exception:
pass
return None
def verilator_run_coverage(run_dir, dut_file="DUT.v", tb_file="driver.v", top_module="top_module", timeout=120):
abs_run_dir = os.path.abspath(run_dir)
abs_dut = os.path.abspath(os.path.join(run_dir, dut_file))
abs_tb = os.path.abspath(os.path.join(run_dir, tb_file))
abs_obj_dir = os.path.join(abs_run_dir, "obj_dir")
abs_annotated = os.path.join(abs_run_dir, "logs", "annotated")
# 如果 tb_file 存在,优先从 tb_file 里找模块名,而不是用默认的 "top_module"
# 因为想仿真 Testbench而不是裸的 DUT
detected_top = get_module_name(abs_tb)
if detected_top:
print(f"[DEBUG] Auto-detected top module from {os.path.basename(tb_file)}: '{detected_top}'")
real_top_module = detected_top
else:
print(f"[DEBUG] Could not detect top module, using default: '{top_module}'")
real_top_module = top_module
# 清理旧编译
if os.path.exists(abs_obj_dir):
shutil.rmtree(abs_obj_dir)
# 构建编译命令
# 注意:这里只通过 --top-module 指定顶层
cmd_compile = (
f"{VERILATOR_BIN} --binary -j 0 --coverage --timing "
f"-Wno-TIMESCALEMOD -Wno-fatal -Wno-STMTDLY "
f"--Mdir {abs_obj_dir} "
f"{abs_dut} {abs_tb} --top-module {real_top_module}"
)
cmd_run = f"{abs_obj_dir}/V{real_top_module}"
cmd_annotate = f"{COVERAGE_BIN} --annotate {abs_annotated} coverage.dat"
with run_in_dir(abs_run_dir):
# Step 1: Compile
if os.path.exists("coverage.dat"): os.remove("coverage.dat")
# print(f"[DEBUG] Compiling...")
res = subproc_call(cmd_compile, timeout)
if not os.path.exists(f"{abs_obj_dir}/V{real_top_module}"):
logger.error(f"Verilator Compile Failed.")
if res['err']: print(f"[COMPILE STDERR]:\n{res['err']}")
return False, 0.0, None
# Step 2: Run
# print(f"[DEBUG] Running Simulation...")
res = subproc_call(cmd_run, timeout)
# 打印输出,确认时间是否走动
print(f"--- Simulation Output ({real_top_module}) ---")
if res['out']: print(res['out'])
# if res['err']: print(res['err']) # Warnings
if not os.path.exists("coverage.dat"):
logger.error("coverage.dat not created.")
return False, 0.0, None
# Step 3: Annotate
if not os.path.exists(abs_annotated):
os.makedirs(abs_annotated)
res = subproc_call(cmd_annotate, timeout)
# Step 4: Find Annotated File (Target: DUT)
target_file = None
generated_files = os.listdir(abs_annotated) if os.path.exists(abs_annotated) else []
if generated_files:
for f in generated_files:
# 我们的目标是看 DUT 的覆盖率
# 排除 TB 文件
is_dut = (os.path.basename(dut_file) in f) or \
(top_module in f) or \
("DUT" in f)
is_tb = ("driver" in f) or \
("tb" in f.lower() and "tb" not in os.path.basename(dut_file).lower())
# 如果自动侦测的顶层名出现在文件名里(例如 testbench.v也要排除
if real_top_module in f:
is_tb = True
if is_dut and not is_tb:
target_file = os.path.join(abs_annotated, f)
break
if not target_file:
logger.error(f"Could not find annotated DUT file in {generated_files}")
return False, 0.0, None
score = _quick_calc_score(target_file)
return True, score, target_file
# def _quick_calc_score(filepath):
# try:
# with open(filepath, 'r') as f:
# lines = f.readlines()
# total = 0
# covered = 0
# for line in lines:
# if line.startswith('%') or line.strip().startswith('#'):
# total += 1
# if not (line.startswith('%000000') or line.strip().startswith('#')):
# covered += 1
# return (covered / total * 100.0) if total > 0 else 0.0
# except Exception:
# return 0.0
def _quick_calc_score(filepath):
"""
计算 Verilator 覆盖率文件的覆盖率分数
支持的格式:
- %NNNNNN: 行覆盖计数(%000000 表示未执行)
- ~NNNNNN: 分支/条件覆盖计数
- NNNNNN: 空格开头+数字(某些 Verilator 版本)
- ^NNNNNN: 未覆盖分支标记
"""
import re
try:
with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
lines = f.readlines()
# 匹配各种覆盖率标记格式
pct_pattern = re.compile(r'^%(\d+)\s+') # %NNNNNN code
tilde_pattern = re.compile(r'^~(\d+)\s+') # ~NNNNNN code
caret_pattern = re.compile(r'^\^(\d+)\s+') # ^NNNNNN code
plain_pattern = re.compile(r'^\s*(\d+)\s+') # "NNNNNN" or " NNNNNN"
# 过滤声明语句(不计入覆盖率)
decl_pattern = re.compile(r'^\s*(input|output|inout|wire|reg|logic|parameter|localparam|assign)\b')
total = 0
covered = 0
for line in lines:
line_stripped = line.strip()
if not line_stripped:
continue
count = -1
is_covered = False
# 尝试匹配各种格式
match_pct = pct_pattern.match(line_stripped)
match_tilde = tilde_pattern.match(line_stripped)
match_caret = caret_pattern.match(line_stripped)
match_plain = plain_pattern.match(line_stripped)
if match_pct:
count = int(match_pct.group(1))
# 获取代码部分用于过滤
code_part = line_stripped[7:].strip() if len(line_stripped) > 7 else ""
if not decl_pattern.match(code_part):
total += 1
if count > 0:
covered += 1
elif match_tilde:
count = int(match_tilde.group(1))
code_part = line_stripped[7:].strip() if len(line_stripped) > 7 else ""
if not decl_pattern.match(code_part):
total += 1
if count > 0:
covered += 1
elif match_caret:
# ^ 表示未覆盖分支
code_part = line_stripped[7:].strip() if len(line_stripped) > 7 else ""
if not decl_pattern.match(code_part):
total += 1
# caret 表示未覆盖,不计入 covered
elif match_plain:
count = int(match_plain.group(1))
# 计算数字部分的长度
num_str = match_plain.group(1)
code_part = line_stripped[len(num_str):].strip()
if not decl_pattern.match(code_part):
total += 1
if count > 0:
covered += 1
return (covered / total * 100.0) if total > 0 else 0.0
except Exception as e:
print(f"[DEBUG] _quick_calc_score error: {e}")
return 0.0
if __name__ == "__main__":
try:
from config import Config
from loader_saver import AutoLogger
custom_cfg_path = os.path.join(project_root, "config/custom.yaml")
if os.path.exists(custom_cfg_path): Config(custom_cfg_path)
else: Config()
AutoLogger()
except Exception: sys.exit(1)
if len(sys.argv) < 3:
print("Usage: python3 verilator_call.py <run_dir> <dut_file> <tb_file>")
else:
run_dir = sys.argv[1]
dut = sys.argv[2]
tb = sys.argv[3]
success, score, path = verilator_run_coverage(run_dir, dut, tb)
print(f"\n[Test Result]\nSuccess: {success}\nScore: {score:.2f}%\nAnnotated File: {path}")