上传所有文件
This commit is contained in:
799
LLM_call.py
Normal file
799
LLM_call.py
Normal file
@@ -0,0 +1,799 @@
|
||||
"""
|
||||
Description : This file is related to GPT call, include the function of calling GPT and the function of running GPT in chatgpt mode
|
||||
Author : Ruidi Qiu (ruidi.qiu@tum.de)
|
||||
Time : 2023/11/17 15:01:06
|
||||
LastEdited : 2024/9/3 16:52:31
|
||||
"""
|
||||
from config import Config
|
||||
from openai import OpenAI
|
||||
from anthropic import Anthropic
|
||||
import loader_saver as ls
|
||||
from utils.utils import Timer
|
||||
import tiktoken
|
||||
import copy
|
||||
import re
|
||||
import requests
|
||||
import json
|
||||
import httpx
|
||||
import time
|
||||
# import Image
|
||||
import openai
|
||||
import os
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from config.config import GPT_MODELS
|
||||
from http import HTTPStatus
|
||||
|
||||
|
||||
__all__ = ["llm_call", "gpt_call", "claude_call", "run_like_a_chatgpt"]
|
||||
|
||||
PRICING_MODELS = {
|
||||
# model: [price_per_1000_prompt_tokens, price_per_1000_completion_tokens]
|
||||
# qwen
|
||||
"qwen-max": [0.02, 0.06],
|
||||
"qwen-plus": [0.004, 0.012],
|
||||
# claude
|
||||
"claude-3-5-sonnet-20240620": [0.003, 0.015],
|
||||
"claude-3-opus-20240229": [0.015, 0.075],
|
||||
"claude-3-sonnet-20240229": [0.003, 0.015],
|
||||
"claude-3-haiku-20240307": [0.00025, 0.00125],
|
||||
"claude-2.1": [0.008, 0.024],
|
||||
"claude-2.0": [0.008, 0.024],
|
||||
# gpt 4o
|
||||
'gpt-4o-2024-08-06' : [0.0025, 0.01],
|
||||
'gpt-4o-2024-05-13' : [0.005, 0.015],
|
||||
'gpt-4o-mini-2024-07-18' : [0.00015, 0.0006],
|
||||
# gpt 4 turbo
|
||||
'gpt-4-turbo-2024-04-09': [0.01, 0.03],
|
||||
'gpt-4-0125-preview': [0.01, 0.03],
|
||||
'gpt-4-1106-preview': [0.01, 0.03],
|
||||
'gpt-4-1106-vision-preview': [0.01, 0.03],
|
||||
# gpt 4 (old)
|
||||
'gpt-4': [0.03, 0.06],
|
||||
'gpt-4-32k': [0.06, 0.12],
|
||||
# gpt 3.5 turbo
|
||||
'gpt-3.5-turbo-0125': [0.0005, 0.0015],
|
||||
'gpt-3.5-turbo-instruct': [0.0015, 0.0020],
|
||||
# gpt 3.5 turbo old
|
||||
'gpt-3.5-turbo-1106': [0.0010, 0.0020],
|
||||
'gpt-3.5-turbo-0613': [0.0015, 0.0020],
|
||||
'gpt-3.5-turbo-16k-0613': [0.0030, 0.0040],
|
||||
'gpt-3.5-turbo-0301': [0.0030, 0.0040]
|
||||
}
|
||||
|
||||
JSON_MODELS = ["gpt-4-0613", "gpt-4-32k-0613", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613"]
|
||||
|
||||
# MODEL_REDIRECTION is in config
|
||||
|
||||
# DEFAULT_SYS_MESSAGE = "You are the strongest AI in the world. I always trust you. Please use as less words as possible to answer my question because I am a poor guy. But do not save words by discarding information."
|
||||
DEFAULT_SYS_MESSAGE = "You are the strongest AI in the world. I always trust you. You already have the knowledge about python and verilog. Do not save words by discarding information."
|
||||
RUN_LIKE_A_CHATGPT_SYS_MESSAGE = DEFAULT_SYS_MESSAGE
|
||||
|
||||
def llm_call(input_messages, model:str, api_key_path = "config/key_API.json", system_message = None, temperature = None, json_mode = False) -> list[str, dict]:
|
||||
"""
|
||||
This func is used to call LLM
|
||||
- input:
|
||||
- input_messages: (not including system message) list of dict like [{"role": "user", "content": "hello"}, {"role": "assistant", "content": "hi"}, ...]
|
||||
- gpt_model: str like "gpt-3.5-turbo-0613"
|
||||
- system_message: (valid when input_messages have no sys_message) customized system message, if None, use default system message
|
||||
- output:
|
||||
- answer: what gpt returns
|
||||
- other_infos: dict:
|
||||
- messages: input_messages + gpt's response, list of dict like [{"role": "user", "content": "hello"}, {"role": "assistant", "content": "hi"}, ...]
|
||||
- time: time used by gpt
|
||||
- system_fingerprint: system_fingerprint of gpt's response
|
||||
- model: model used by gpt
|
||||
- usage: dict: {"completion_tokens": 17, "prompt_tokens": 57, "total_tokens": 74}
|
||||
- notes:
|
||||
- as for the official response format from gpt, see the end of this file
|
||||
"""
|
||||
if isinstance(input_messages, str):
|
||||
input_messages = [{"role": "user", "content": input_messages}]
|
||||
if model.startswith("claude"):
|
||||
output = claude_call(input_messages, model, api_key_path, system_message, temperature, json_mode)
|
||||
elif model.startswith("gpt") or model.startswith("qwen"):
|
||||
output = gpt_call(input_messages, model, api_key_path, system_message, temperature, json_mode)
|
||||
else:
|
||||
raise ValueError("model %s is not supported."%(model))
|
||||
llm_manager.update_usage(output[1]["usage"]["prompt_tokens"], output[1]["usage"]["completion_tokens"], model)
|
||||
return output
|
||||
|
||||
|
||||
# def gpt_call(input_messages, model, api_key_path, system_message = None, temperature = None, json_mode = False):
|
||||
# """
|
||||
# This func is used to call gpt
|
||||
# - input:
|
||||
# - input_messages: (not including system message) list of dict like [{"role": "user", "content": "hello"}, {"role": "assistant", "content": "hi"}, ...]
|
||||
# - gpt_model: str like "gpt-3.5-turbo-0613"
|
||||
# - system_message: (valid when input_messages have no sys_message) customized system message, if None, use default system message
|
||||
# - output:
|
||||
# - answer: what gpt returns
|
||||
# - other_infos: dict:
|
||||
# - messages: input_messages + gpt's response, list of dict like [{"role": "user", "content": "hello"}, {"role": "assistant", "content": "hi"}, ...]
|
||||
# - time: time used by gpt
|
||||
# - system_fingerprint: system_fingerprint of gpt's response
|
||||
# - model: model used by gpt
|
||||
# - usage: dict: {"completion_tokens": 17, "prompt_tokens": 57, "total_tokens": 74}
|
||||
# - notes:
|
||||
# - as for the official response format from gpt, see the end of this file
|
||||
# """
|
||||
# client = enter_api_key(api_key_path)
|
||||
# # system message
|
||||
# has_sysmessage = False
|
||||
# for message in input_messages:
|
||||
# if message["role"] == "system":
|
||||
# has_sysmessage = True
|
||||
# break
|
||||
# if not has_sysmessage:
|
||||
# if system_message is None:
|
||||
# messages = [{"role": "system", "content": DEFAULT_SYS_MESSAGE}]
|
||||
# else:
|
||||
# messages = [{"role": "system", "content": system_message}]
|
||||
# else:
|
||||
# messages = []
|
||||
# messages.extend(input_messages)
|
||||
# # other parameters
|
||||
# more_completion_kwargs = {}
|
||||
# if temperature is not None:
|
||||
# more_completion_kwargs["temperature"] = temperature
|
||||
# if json_mode:
|
||||
# if not model in JSON_MODELS:
|
||||
# more_completion_kwargs["response_format"] = {"type": "json_object"}
|
||||
# # call gpt
|
||||
# with Timer(print_en=False) as gpt_response:
|
||||
# completion = client.chat.completions.create(
|
||||
# model=model,
|
||||
# messages=messages,
|
||||
# **more_completion_kwargs
|
||||
# )
|
||||
# answer = completion.choices[0].message.content
|
||||
# messages.append({"role": "assistant", "content": answer})
|
||||
# time = round(gpt_response.interval, 2)
|
||||
# system_fingerprint = completion.system_fingerprint
|
||||
# usage = {"completion_tokens": completion.usage.completion_tokens, "prompt_tokens": completion.usage.prompt_tokens, "total_tokens": completion.usage.total_tokens}
|
||||
# model = completion.model
|
||||
# other_infos = {"messages": messages, "time": time, "system_fingerprint": system_fingerprint, "model": model, "usage": usage}
|
||||
# # return answer, messages, time, system_fingerprint
|
||||
# return answer, other_infos
|
||||
|
||||
|
||||
|
||||
def gpt_call(input_messages, model, api_key_path, system_message=None, temperature=None, json_mode=False):
|
||||
"""
|
||||
GPT Call with Timeout Protection (10min connection, 30min read)
|
||||
"""
|
||||
# 1. System Message 处理 (保持原逻辑)
|
||||
has_sysmessage = False
|
||||
for message in input_messages:
|
||||
if message["role"] == "system":
|
||||
has_sysmessage = True
|
||||
break
|
||||
# if not has_sysmessage:
|
||||
# sys_content = system_message if system_message is not None else "You are a helpful assistant."
|
||||
# messages = [{"role": "system", "content": sys_content}]
|
||||
# else:
|
||||
# messages = []
|
||||
# messages.extend(input_messages)
|
||||
if not has_sysmessage:
|
||||
sys_content = system_message if system_message is not None else "You are a helpful assistant."
|
||||
# [修复] 确保 sys_content 是字符串
|
||||
if not isinstance(sys_content, str):
|
||||
print(f" [Warning] system_message is not string, got type: {type(sys_content)}")
|
||||
sys_content = str(sys_content)
|
||||
messages = [{"role": "system", "content": sys_content}]
|
||||
else:
|
||||
messages = []
|
||||
messages.extend(input_messages)
|
||||
|
||||
# [新增] 验证消息格式
|
||||
for i, msg in enumerate(messages):
|
||||
if not isinstance(msg.get("content"), str):
|
||||
print(f" [Error] messages[{i}]['content'] is not string!")
|
||||
print(f" Type: {type(msg.get('content'))}")
|
||||
print(f" Value: {msg.get('content')}")
|
||||
# 修复:转换为字符串
|
||||
messages[i]["content"] = str(msg.get("content"))
|
||||
|
||||
# 2. 参数准备 (保持原逻辑)
|
||||
more_completion_kwargs = {}
|
||||
if temperature is not None:
|
||||
more_completion_kwargs["temperature"] = temperature
|
||||
if json_mode and "gpt" in model: # Qwen 有时不支持 json_object,加个判断
|
||||
more_completion_kwargs["response_format"] = {"type": "json_object"}
|
||||
|
||||
# 3. Client 初始化 (必须在这里重写以注入 httpx 超时)
|
||||
api_key = ""
|
||||
base_url = None
|
||||
|
||||
# === 读取 Key (这里必须手动读,因为我们要配置 timeout) ===
|
||||
try:
|
||||
with open(api_key_path, 'r') as f:
|
||||
keys = json.load(f)
|
||||
|
||||
if model.startswith("qwen"):
|
||||
api_key = keys.get("dashscope") # 确保你的 json 里有 "dashscope"
|
||||
base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
else:
|
||||
api_key = keys.get("openai") or keys.get("gpt") # 兼容 "openai" 或 "gpt" 键名
|
||||
|
||||
if not api_key:
|
||||
print(f"❌ [Error] API Key not found in {api_key_path} for model {model}!")
|
||||
return "", {}
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ [Error] Failed to load API Key: {e}")
|
||||
return "", {}
|
||||
|
||||
# === 配置超时 ===
|
||||
http_client = httpx.Client(
|
||||
timeout=httpx.Timeout(
|
||||
connect=300.0, # 5分钟连不上就重试
|
||||
read=1800.0, # 30分钟生成不完才断 (给足时间)
|
||||
write=60.0,
|
||||
pool=60.0
|
||||
)
|
||||
)
|
||||
|
||||
client = openai.OpenAI(
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
http_client=http_client
|
||||
)
|
||||
|
||||
# 4. 调用循环
|
||||
MAX_RETRIES = 5
|
||||
answer = ""
|
||||
system_fingerprint = ""
|
||||
usage = {}
|
||||
time_used = 0.0
|
||||
|
||||
for attempt in range(MAX_RETRIES):
|
||||
try:
|
||||
with Timer(print_en=False) as gpt_response:
|
||||
completion = client.chat.completions.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
**more_completion_kwargs
|
||||
)
|
||||
|
||||
answer = completion.choices[0].message.content
|
||||
system_fingerprint = completion.system_fingerprint
|
||||
usage = {
|
||||
"completion_tokens": completion.usage.completion_tokens,
|
||||
"prompt_tokens": completion.usage.prompt_tokens,
|
||||
"total_tokens": completion.usage.total_tokens
|
||||
}
|
||||
|
||||
time_used = round(gpt_response.interval, 2)
|
||||
|
||||
# 调试打印,确认成功
|
||||
if answer:
|
||||
print(f" [LLM] Success. Time: {time_used}s. Length: {len(answer)}")
|
||||
|
||||
break # 成功则跳出
|
||||
|
||||
except httpx.ConnectTimeout:
|
||||
print(f"[Timeout] Connection failed (>5min). Retrying {attempt+1}/{MAX_RETRIES}...")
|
||||
time.sleep(5)
|
||||
except httpx.ReadTimeout:
|
||||
print(f" [Timeout] Generation too slow (>30min). Retrying {attempt+1}/{MAX_RETRIES}...")
|
||||
time.sleep(5)
|
||||
# except Exception as e:
|
||||
# print(f" [Error] Attempt {attempt+1} failed: {e}")
|
||||
# time.sleep(5)
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
if 'RequestTimeOut' in error_msg or '500' in error_msg:
|
||||
wait_time = 15 * (attempt + 1) # ✅ 服务端超时特殊处理
|
||||
print(f"🔄 [Server Timeout] API server busy. Retrying...")
|
||||
time.sleep(wait_time)
|
||||
else:
|
||||
wait_time = 5 * (attempt + 1) # ✅ 指数退避
|
||||
print(f"⚠️ [Error] Attempt {attempt+1} failed: {e}")
|
||||
time.sleep(wait_time)
|
||||
|
||||
# 5. 返回结果 (保持你的原格式)
|
||||
if answer:
|
||||
messages.append({"role": "assistant", "content": answer})
|
||||
else:
|
||||
# 失败兜底,防止外部报错
|
||||
usage = {"completion_tokens": 0, "prompt_tokens": 0, "total_tokens": 0}
|
||||
|
||||
other_infos = {
|
||||
"messages": messages,
|
||||
"time": time_used,
|
||||
"system_fingerprint": system_fingerprint,
|
||||
"model": model,
|
||||
"usage": usage
|
||||
}
|
||||
|
||||
return answer, other_infos
|
||||
|
||||
def claude_call(input_messages, model, api_key_path, system_message = None, temperature = None, json_mode = False):
|
||||
"""
|
||||
This func is used to call gpt
|
||||
#### input:
|
||||
- input_messages: (not including system message) list of dict like [{"role": "user", "content": "hello"}, {"role": "assistant", "content": "hi"}, ...]
|
||||
- gpt_model: str like "gpt-3.5-turbo-0613"
|
||||
- config: config object
|
||||
- system_message: (valid when input_messages have no sys_message) customized system message, if None, use default system message
|
||||
#### output:
|
||||
- answer: what gpt returns
|
||||
- other_infos: dict:
|
||||
- messages: input_messages + gpt's response, list of dict like [{"role": "user", "content": "hello"}, {"role": "assistant", "content": "hi"}, ...]
|
||||
- time: time used by gpt
|
||||
- system_fingerprint: system_fingerprint of gpt's response
|
||||
- model: model used by gpt
|
||||
- usage: dict: {"completion_tokens": 17, "prompt_tokens": 57, "total_tokens": 74}
|
||||
#### notes:
|
||||
as for the official response format from gpt, see the end of this file
|
||||
"""
|
||||
client = enter_api_key(api_key_path, provider="anthropic")
|
||||
prefill = None
|
||||
# system message
|
||||
has_sysmessage = False
|
||||
for message in input_messages:
|
||||
if message["role"] == "system":
|
||||
has_sysmessage = True
|
||||
break
|
||||
if not has_sysmessage:
|
||||
if system_message is None:
|
||||
messages = [{"role": "system", "content": DEFAULT_SYS_MESSAGE}]
|
||||
else:
|
||||
messages = [{"role": "system", "content": system_message}]
|
||||
else:
|
||||
messages = []
|
||||
messages.extend(input_messages)
|
||||
for message in messages:
|
||||
if message["role"] == "system":
|
||||
messages.remove(message) # delete the system message
|
||||
# other parameters
|
||||
more_completion_kwargs = {}
|
||||
if temperature is not None:
|
||||
more_completion_kwargs["temperature"] = temperature
|
||||
if json_mode:
|
||||
messages[-1]["content"] += "\nYour reply should be in JSON format."
|
||||
prefill = {"role": "assistant", "content": "{"}
|
||||
messages.append(prefill)
|
||||
# call claude
|
||||
if model == "claude-3-5-sonnet-20240620":
|
||||
max_tokens = 8192
|
||||
else:
|
||||
max_tokens = 4096
|
||||
with Timer(print_en=False) as gpt_response:
|
||||
completion = client.messages.create(
|
||||
max_tokens=max_tokens,
|
||||
model=model,
|
||||
messages=messages,
|
||||
**more_completion_kwargs
|
||||
)
|
||||
answer = completion.content[0].text
|
||||
if prefill is not None:
|
||||
answer = prefill["content"] + answer
|
||||
messages.append({"role": "assistant", "content": answer})
|
||||
time = round(gpt_response.interval, 2)
|
||||
system_fingerprint = ""
|
||||
usage = {"completion_tokens": completion.usage.output_tokens, "prompt_tokens": completion.usage.input_tokens, "total_tokens": completion.usage.input_tokens + completion.usage.output_tokens}
|
||||
other_infos = {"messages": messages, "time": time, "system_fingerprint": system_fingerprint, "model": model, "usage": usage}
|
||||
# return answer, messages, time, system_fingerprint
|
||||
return answer, other_infos
|
||||
|
||||
def run_like_a_chatgpt():
|
||||
config = Config()
|
||||
gpt_model = config.gpt.model
|
||||
gpt_key_path = config.gpt.key_path
|
||||
if config.gpt.chatgpt.start_form == 'prompt':
|
||||
preloaded_prompt = ls.load_txt(config.load.prompt.path)
|
||||
else:
|
||||
preloaded_prompt = None
|
||||
if gpt_model.startswith("gpt"):
|
||||
llm_name = "ChatGPT"
|
||||
elif gpt_model.startswith("claude"):
|
||||
llm_name = "Claude"
|
||||
else:
|
||||
llm_name = "LLM"
|
||||
# messages = [{"role": "system", "content": "You are a hardware code expert, skilled in understanding and generating verilog hardware language. You are the strongest AI hardware expert in the world. I totally believe you can fulfill the task I give you. You always give me the most detailed solution. Your reply should only contain code."}]
|
||||
messages = [{"role": "system", "content": RUN_LIKE_A_CHATGPT_SYS_MESSAGE}]
|
||||
response_data_dicts = [] # this is to record other data of gpt's response like seed and time
|
||||
while True:
|
||||
# load prompt
|
||||
if preloaded_prompt is not None:
|
||||
content = preloaded_prompt
|
||||
preloaded_prompt = None
|
||||
print("User (preloaded prompt): %s"%(content))
|
||||
ls.save_log_line("(the first user message is from preloaded prompt)", config)
|
||||
else:
|
||||
content = input("User: ")
|
||||
# break loop
|
||||
if content in ["exit", "quit", "break", "", None]:
|
||||
break
|
||||
# send prompt to gpt
|
||||
messages.append({"role": "user", "content": content})
|
||||
# run gpt
|
||||
answer, other_infos = llm_call(
|
||||
input_messages = messages,
|
||||
model = gpt_model,
|
||||
api_key_path = gpt_key_path,
|
||||
system_message = RUN_LIKE_A_CHATGPT_SYS_MESSAGE,
|
||||
temperature = config.gpt.temperature
|
||||
)
|
||||
messages, time, system_fingerprint = other_infos["messages"], other_infos["time"], other_infos["system_fingerprint"]
|
||||
# get data from response
|
||||
data_dict = {}
|
||||
data_dict["system_fingerprint"] = system_fingerprint
|
||||
data_dict["model"] = gpt_model
|
||||
data_dict["time"] = time
|
||||
response_data_dicts.append(data_dict)
|
||||
# print
|
||||
print(f'{llm_name}: {answer}')
|
||||
print("(%ss used)" % (time))
|
||||
if config.gpt.chatgpt.one_time_talk:
|
||||
break
|
||||
messages_plus = gen_messages_more_info(messages, response_data_dicts)
|
||||
if config.save.log.en:
|
||||
ls.save_messages_to_log(messages_plus, config)
|
||||
if config.save.message.en:
|
||||
ls.gpt_message_individual_save(messages, config, file_name="messages")
|
||||
ls.gpt_message_individual_save(messages_plus, config, file_name="messages_plus")
|
||||
|
||||
|
||||
# def enter_api_key(api_key_path, provider="openai"):
|
||||
# if provider == "openai":
|
||||
# key = ls.load_json_dict(api_key_path)["OPENAI_API_KEY"]
|
||||
# client = OpenAI(api_key=key)
|
||||
# elif provider == "anthropic":
|
||||
# key = ls.load_json_dict(api_key_path)["ANTHROPIC_API_KEY"]
|
||||
# client = Anthropic(api_key=key)
|
||||
# else:
|
||||
# raise ValueError("provider %s is not supported."%(provider))
|
||||
# return client
|
||||
|
||||
|
||||
def enter_api_key(api_key_path, provider="openai"):
|
||||
if provider == "openai":
|
||||
# 1. 读取文件里的 Key (刚才第一步改好的)
|
||||
key = ls.load_json_dict(api_key_path)["OPENAI_API_KEY"]
|
||||
|
||||
# 2. 关键:从环境变量获取 Base URL
|
||||
# 如果没有这一步,请求会发给 openai.com 导致 401
|
||||
base_url = os.environ.get("OPENAI_BASE_URL")
|
||||
|
||||
if base_url:
|
||||
print(f"DEBUG: Connecting to {base_url}...") # 加一行打印方便调试
|
||||
client = OpenAI(api_key=key, base_url=base_url)
|
||||
else:
|
||||
client = OpenAI(api_key=key)
|
||||
elif provider == "anthropic":
|
||||
# Claude 部分保持原样
|
||||
key = ls.load_json_dict(api_key_path)["ANTHROPIC_API_KEY"]
|
||||
client = Anthropic(api_key=key)
|
||||
else:
|
||||
raise ValueError("provider %s is not supported."%(provider))
|
||||
return client
|
||||
|
||||
|
||||
def gen_messages_more_info(original_messages, response_data_dicts):
|
||||
# additional info only at: role = "assistant"
|
||||
messages = copy.deepcopy(original_messages)
|
||||
idx_response = 0
|
||||
for i in range(len(messages)):
|
||||
if messages[i]["role"] == "assistant":
|
||||
# messages[i].extend(response_data_dicts[idx_response]) # wrong syntax
|
||||
messages[i] = {**messages[i], **response_data_dicts[idx_response]}
|
||||
idx_response += 1
|
||||
# add idx to each message
|
||||
for i in range(len(messages)):
|
||||
messages[i]["idx"] = i
|
||||
return messages
|
||||
|
||||
|
||||
def dalle3():
|
||||
"""
|
||||
This function hasn't been well packaged
|
||||
now we have the free dalle3 application: microsoft - copilot
|
||||
"""
|
||||
def download_image(url, folder_path):
|
||||
response = requests.get(url)
|
||||
file_path = os.path.join(folder_path, os.path.basename(url))
|
||||
with open(file_path, "wb") as file:
|
||||
file.write(response.content)
|
||||
return file_path
|
||||
|
||||
model_name = "dall-e-3"
|
||||
image_size = "1024x1024" # 1792x1024, 1024x1024, 1024x1792
|
||||
download_folder = r"saves/dalle3/"
|
||||
os.makedirs(download_folder, exist_ok=True)
|
||||
|
||||
while True:
|
||||
name = input("please name the generated figure (\"exit\" to exit): ")
|
||||
if name == "exit":
|
||||
break
|
||||
prompt = input("please input the prompt(\"exit\" to exit): ")
|
||||
if prompt == "exit":
|
||||
break
|
||||
|
||||
try:
|
||||
# num_images = int(input("please input the number of figures (default=1):") or "1")
|
||||
num_images = 1
|
||||
print("generating your figure...")
|
||||
# response = requests.post(
|
||||
# "https://api.openai-proxy.org/v1/images/generations",
|
||||
# headers={"Authorization": ""},
|
||||
# json={"model": model_name, "size": image_size, "prompt": prompt, "n": num_images},
|
||||
# )
|
||||
client = enter_api_key('config/key_API.json')
|
||||
response = client.images.generate(
|
||||
model=model_name,
|
||||
prompt=prompt,
|
||||
size=image_size,
|
||||
quality="standard",
|
||||
n=num_images,
|
||||
)
|
||||
# response.raise_for_status()
|
||||
# data = response.json()["data"]
|
||||
|
||||
image_url = response.data[0].url
|
||||
# the name should end with .png
|
||||
file_name = name + ".png"
|
||||
file_path = download_image(image_url, download_folder)
|
||||
new_file_path = os.path.join(download_folder, file_name)
|
||||
os.rename(file_path, new_file_path)
|
||||
print("figure was downloaded to %s" %(new_file_path))
|
||||
|
||||
|
||||
# file_path = download_image(image_url, download_folder)
|
||||
# print("图片已下载至:", file_path)
|
||||
|
||||
|
||||
# current_time = datetime.now(timezone.utc) + timedelta(hours=8)
|
||||
# current_time_str = current_time.strftime("%Y%m%d-%H%M")
|
||||
|
||||
# for i, image in enumerate(data):
|
||||
# image_url = image["url"]
|
||||
# file_name = current_time_str + f"-{str(i+1).zfill(3)}.png"
|
||||
# file_path = download_image(image_url, download_folder)
|
||||
# new_file_path = os.path.join(download_folder, file_name)
|
||||
# os.rename(file_path, new_file_path)
|
||||
# print("图片已下载至:", new_file_path)
|
||||
|
||||
except requests.exceptions.HTTPError as err:
|
||||
print("Request Error: ", err.response.text)
|
||||
|
||||
except Exception as e:
|
||||
print("Error: ", str(e))
|
||||
|
||||
|
||||
|
||||
############### utils of gpt ###############
|
||||
def num_tokens_from_string(string: str, model_name="gpt-4") -> int:
|
||||
"""
|
||||
Returns the number of tokens in a single text string.
|
||||
https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
"""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model_name)
|
||||
except KeyError:
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
num_tokens = len(encoding.encode(string))
|
||||
return num_tokens
|
||||
|
||||
def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613"):
|
||||
"""Returns the number of tokens used by a list of messages."""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model in PRICING_MODELS.keys():
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
|
||||
for key, value in message.items():
|
||||
num_tokens += len(encoding.encode(value))
|
||||
if key == "name": # if there's a name, the role is omitted
|
||||
num_tokens += -1 # role is always required and always 1 token
|
||||
num_tokens += 2 # every reply is primed with <im_start>assistant
|
||||
return num_tokens
|
||||
else:
|
||||
raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {model}.
|
||||
See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
|
||||
|
||||
# def extract_code(text, code_type):
|
||||
# """
|
||||
# #### function:
|
||||
# - extract code from text
|
||||
# #### input:
|
||||
# - text: str, gpt's response
|
||||
# - code_type: str, like "verilog"
|
||||
# #### output:
|
||||
# - list of found code blocks
|
||||
# """
|
||||
# code_type = code_type.lower()
|
||||
# start = "```" + code_type
|
||||
# end = "```"
|
||||
# verilog_blocks = re.findall(start + r'\s*(.*?)'+ end, text, re.DOTALL)
|
||||
# if verilog_blocks:
|
||||
# return verilog_blocks
|
||||
# else:
|
||||
# # return [""]
|
||||
# return [text]
|
||||
|
||||
def extract_code(text, code_type):
|
||||
"""
|
||||
[增强版] 能够处理带 verilog 标签、不带标签、甚至带中文解释的情况
|
||||
"""
|
||||
# 打印原始回复,让你在终端直接能看到 Qwen 到底回了什么 (调试神器)
|
||||
print("\n[DEBUG] Raw LLM Response:\n", text)
|
||||
print("-" * 30)
|
||||
|
||||
# 1. 尝试匹配 Markdown 代码块 (``` ... ```)
|
||||
# 正则解释:找 ``` 开头,中间可能跟着语言名(如verilog),然后是内容,最后是 ```
|
||||
pattern = r"```(?:\w+)?\s*(.*?)```"
|
||||
matches = re.findall(pattern, text, re.DOTALL)
|
||||
|
||||
if matches:
|
||||
# 如果找到了代码块,只返回代码块里的内容
|
||||
return matches
|
||||
|
||||
# 2. 如果没找到 Markdown 标记,可能是纯代码,但也可能包含 "module" 关键字
|
||||
# 我们尝试只提取 module ... endmodule 之间的内容 (简易版)
|
||||
if "module" in text and "endmodule" in text:
|
||||
# 这是一个非常粗暴但有效的兜底:找到第一个 module 和最后一个 endmodule
|
||||
start_idx = text.find("module")
|
||||
end_idx = text.rfind("endmodule") + len("endmodule")
|
||||
clean_code = text[start_idx:end_idx]
|
||||
return [clean_code]
|
||||
|
||||
# 3. 实在没招了,返回原始内容 (之前的逻辑)
|
||||
return [text]
|
||||
|
||||
|
||||
|
||||
def get_dict_from_gpt_json(gpt_json_string):
|
||||
"""
|
||||
- this function is used to get the dict from the gpt json string
|
||||
"""
|
||||
gpt_json_string = gpt_json_string.replace("```json", "").replace("```", "").strip()
|
||||
print(gpt_json_string)
|
||||
return json.loads(gpt_json_string)
|
||||
|
||||
def cost_calculator(usages:list, model="gpt-4-0125-preview"):
|
||||
"""
|
||||
- this function is used to calculate the price of gpt
|
||||
- usage: list of dicts, [{"completion_tokens": 17, "prompt_tokens": 57, "total_tokens": 74}, ...]
|
||||
|
||||
"""
|
||||
if model not in PRICING_MODELS:
|
||||
raise ValueError(f"model {model} is not supported in the pricing calculator.")
|
||||
price = 0
|
||||
for usage in usages:
|
||||
price += usage["prompt_tokens"] * PRICING_MODELS[model][0] / 1000.0 + usage["completion_tokens"] * PRICING_MODELS[model][1] / 1000.0
|
||||
return price
|
||||
|
||||
def message_to_conversation(messages):
|
||||
"""
|
||||
- this function is used to convert messages to conversation
|
||||
"""
|
||||
conversation = ""
|
||||
for message in messages:
|
||||
if message["role"] == "system":
|
||||
conversation += "############################## conversation begin ##############################\n"
|
||||
conversation += '########## %s ##########\n%s\n\n' % (message['role'], message['content'])
|
||||
return conversation
|
||||
|
||||
class LLM_Manager:
|
||||
_instance = None
|
||||
_initialized = False
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if not cls._instance:
|
||||
cls._instance = super(LLM_Manager, cls).__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self, api_key="config/key_API.json") -> None:
|
||||
if not self._initialized:
|
||||
# total
|
||||
self.tokens_in_total = 0
|
||||
self.tokens_out_total = 0
|
||||
self.tokens_both_total = 0
|
||||
self.cost_total = 0
|
||||
# section
|
||||
self.tokens_in_section = 0
|
||||
self.tokens_out_section = 0
|
||||
self.tokens_both_section = 0
|
||||
self.cost_section = 0
|
||||
# dict {"model1": {}, "model2": {}, ...}
|
||||
self.usage_info = {}
|
||||
# chat
|
||||
self._llm_model_now = None
|
||||
self._temperature = None
|
||||
self.messages = [{"role": "system", "content": DEFAULT_SYS_MESSAGE}]
|
||||
if not os.path.exists(api_key):
|
||||
raise ValueError(f"api key path {api_key} is not valid.")
|
||||
self._api_key_path = api_key
|
||||
|
||||
def update_usage(self, tokens_in:int, tokens_out:int, model:str):
|
||||
cost = tokens_in * PRICING_MODELS[model][0] / 1000.0 + tokens_out * PRICING_MODELS[model][1] / 1000.0
|
||||
# dict
|
||||
if model not in self.usage_info.keys():
|
||||
self.usage_info[model] = {"tokens_in": 0, "tokens_out": 0, "tokens_both": 0, "cost": 0}
|
||||
self.usage_info[model]["tokens_in"] += tokens_in
|
||||
self.usage_info[model]["tokens_out"] += tokens_out
|
||||
self.usage_info[model]["tokens_both"] += tokens_in + tokens_out
|
||||
self.usage_info[model]["cost"] += cost
|
||||
# total
|
||||
self.tokens_in_total += tokens_in
|
||||
self.tokens_out_total += tokens_out
|
||||
self.tokens_both_total += tokens_in + tokens_out
|
||||
self.cost_total += cost
|
||||
# section
|
||||
self.tokens_in_section += tokens_in
|
||||
self.tokens_out_section += tokens_out
|
||||
self.tokens_both_section += tokens_in + tokens_out
|
||||
self.cost_section += cost
|
||||
|
||||
def new_section(self):
|
||||
"""
|
||||
new usage section (only reset the tokens and cost of the section)
|
||||
"""
|
||||
self.tokens_in_section = 0
|
||||
self.tokens_out_section = 0
|
||||
self.tokens_both_section = 0
|
||||
self.cost_section = 0
|
||||
|
||||
def set_model(self, model:str):
|
||||
self._llm_model_now = model
|
||||
|
||||
def set_temperature(self, temperature:float):
|
||||
self._temperature = temperature
|
||||
|
||||
def chat(self, prompt:str, clear_mem:bool=False, model:str=None, temperature:float=None, sys_prompt:str=DEFAULT_SYS_MESSAGE)->str:
|
||||
model = self._llm_model_now if model is None else model
|
||||
temperature = self._temperature if temperature is None else temperature
|
||||
if clear_mem:
|
||||
self.messages = [{"role": "system", "content": sys_prompt}]
|
||||
self.messages.append({"role": "user", "content": prompt})
|
||||
answer, other_infos = llm_call(
|
||||
input_messages = self.messages,
|
||||
model = model,
|
||||
api_key_path = self._api_key_path,
|
||||
temperature = temperature
|
||||
) # usage already updated in llm_call
|
||||
# update messages
|
||||
self.messages = other_infos["messages"]
|
||||
return answer
|
||||
|
||||
|
||||
llm_manager = LLM_Manager()
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# print("GPT_call.py does not support running as a main file.")
|
||||
print('we are testing dalle3')
|
||||
dalle3()
|
||||
|
||||
|
||||
"""
|
||||
(see more in https://platform.openai.com/docs/guides/text-generation/chat-completions-api)
|
||||
An example Chat Completions API response looks as follows:
|
||||
{
|
||||
"choices": [
|
||||
{
|
||||
"finish_reason": "stop",
|
||||
"index": 0,
|
||||
"message": {
|
||||
"content": "The 2020 World Series was played in Texas at Globe Life Field in Arlington.",
|
||||
"role": "assistant"
|
||||
}
|
||||
}
|
||||
],
|
||||
"created": 1677664795,
|
||||
"id": "chatcmpl-7QyqpwdfhqwajicIEznoc6Q47XAyW",
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"object": "chat.completion",
|
||||
"usage": {
|
||||
"completion_tokens": 17,
|
||||
"prompt_tokens": 57,
|
||||
"total_tokens": 74
|
||||
}
|
||||
}
|
||||
"""
|
||||
BIN
__pycache__/LLM_call.cpython-312.pyc
Normal file
BIN
__pycache__/LLM_call.cpython-312.pyc
Normal file
Binary file not shown.
BIN
__pycache__/analyze.cpython-312.pyc
Normal file
BIN
__pycache__/analyze.cpython-312.pyc
Normal file
Binary file not shown.
BIN
__pycache__/iverilog_call.cpython-312.pyc
Normal file
BIN
__pycache__/iverilog_call.cpython-312.pyc
Normal file
Binary file not shown.
BIN
__pycache__/loader_saver.cpython-312.pyc
Normal file
BIN
__pycache__/loader_saver.cpython-312.pyc
Normal file
Binary file not shown.
BIN
__pycache__/run_tbgen.cpython-312.pyc
Normal file
BIN
__pycache__/run_tbgen.cpython-312.pyc
Normal file
Binary file not shown.
1059
analyze.py
Normal file
1059
analyze.py
Normal file
File diff suppressed because it is too large
Load Diff
43
autoline/TB1_gen.py
Normal file
43
autoline/TB1_gen.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""
|
||||
Description : The TB generation stage in the autoline. The main TB generation workflow is implemented in prompt_scriptws
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/7/24 11:27:21
|
||||
LastEdited : 2024/8/12 23:30:30
|
||||
"""
|
||||
|
||||
|
||||
from prompt_scripts import get_script, BaseScript
|
||||
from loader_saver import log_localprefix
|
||||
|
||||
class TaskTBgen():
|
||||
# TODO: in the future use pythonized prompt scripts and this class to replace the old TaskTBgen
|
||||
"""TBgen, in this class we generate tb by calling different python script according to stage_template"""
|
||||
def __init__(self, prob_data: dict, TBgen_prompt_script: str, task_dir: str, config):
|
||||
self.prob_data = prob_data
|
||||
self.prompt_script_name = TBgen_prompt_script
|
||||
self.task_dir = task_dir
|
||||
self.config = config
|
||||
WorkFlowClass = get_script(TBgen_prompt_script)
|
||||
self.workflow = WorkFlowClass(
|
||||
prob_data = prob_data,
|
||||
task_dir = task_dir,
|
||||
config = config
|
||||
)
|
||||
|
||||
@log_localprefix("TBgen")
|
||||
def run(self):
|
||||
self.workflow()
|
||||
|
||||
@property
|
||||
def scenario_num(self):
|
||||
return self.get_wf_attr("scenario_num")
|
||||
|
||||
@property
|
||||
def scenario_dict(self):
|
||||
return self.get_wf_attr("scenario_dict")
|
||||
|
||||
def get_wf_attr(self, attr_name:str):
|
||||
if hasattr(self.workflow, attr_name):
|
||||
return getattr(self.workflow, attr_name)
|
||||
else:
|
||||
return None
|
||||
416
autoline/TB2_syncheck.py
Normal file
416
autoline/TB2_syncheck.py
Normal file
@@ -0,0 +1,416 @@
|
||||
"""
|
||||
Description : This is the TB syntactic checking stage in the autoline (previously named as TaskTBsim in autoline.py)
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/7/24 11:24:31
|
||||
LastEdited : 2024/8/23 15:53:15
|
||||
"""
|
||||
|
||||
import os
|
||||
import LLM_call as llm
|
||||
import iverilog_call as iv
|
||||
import python_call as py
|
||||
import loader_saver as ls
|
||||
from config import Config
|
||||
from loader_saver import autologger as logger
|
||||
from loader_saver import log_localprefix
|
||||
from prompt_scripts import get_script, BaseScript
|
||||
from utils.utils import Timer, get_time
|
||||
|
||||
IDENTIFIER = {
|
||||
"tb_start" : "```verilog",
|
||||
"tb_end" : "```"
|
||||
}
|
||||
|
||||
TESTBENCH_TEMPLATE = """%s
|
||||
`timescale 1ns / 1ps
|
||||
(more verilog testbench code here...)
|
||||
endmodule
|
||||
%s""" % (IDENTIFIER["tb_start"], IDENTIFIER["tb_end"])
|
||||
|
||||
DEBUG_TEMPLATE = """please fix the verilog testbench code below according to the error message below. please directly give me the corrected verilog testbench codes.
|
||||
Attention: never remove the irrelevant codes!!!
|
||||
your verilog testbench should be like:
|
||||
%s
|
||||
please only reply the full code modified. NEVER remove other irrelevant codes!!!
|
||||
The testbench I give you is the one with error. To be convienient, each of the line begins with a line number. The line number also appears at the error message. You should use the line number to locate the error with the help of error message.
|
||||
""" % (TESTBENCH_TEMPLATE)
|
||||
|
||||
DEBUG_FINAL_INSTR = """ please directly give me the corrected verilog codes, no other words needed. Your verilog codes should start with [```verilog] and end with [```]."""
|
||||
|
||||
DEBUG_TEMPLATE_PY = """please fix the python code below according to the error message below. please directly give me the corrected python codes.
|
||||
Attention: never remove the irrelevant codes!!!
|
||||
please only reply the full code modified. NEVER remove other irrelevant codes!!!
|
||||
The python code I give you is the one with error. To be convienient, each of the line begins with a line number. The line number also appears at the error message. You should use the line number to locate the error with the help of error message.
|
||||
"""
|
||||
|
||||
DEBUG_FINAL_INSTR_PY = """ please directly give me the corrected python codes, no other words needed. Your python codes should start with [```python] and end with [```]."""
|
||||
|
||||
# will be discarded by 15/08/2024
|
||||
# DEBUG_TEMPLATE_END = """
|
||||
# VERY IMPORTANT: please ONLY reply the full code modified. NEVER remove other irrelevant codes!!!
|
||||
# Your testbench SHOULD NOT have the line number at the beginning of each line!!!
|
||||
# """
|
||||
|
||||
class TaskTBsim():
|
||||
"""
|
||||
#### input:
|
||||
- ivcode_path:
|
||||
- the path of iverilog dir (xxx/TB_gen/), will contain all verilog files. generated .vvp will also be saved here
|
||||
#### output:
|
||||
- dict of the simulation result
|
||||
- "sim_pass" : bool (whether the simulation is successful. This is only the necessary condition of the correctness of the testbench)
|
||||
- "debug_iter" : int (the number of debug iterations)
|
||||
- "sim_out" : str (the output of the simulation)
|
||||
- "sim_err" : str (the error of the simulation)
|
||||
- "TB_gen_debugged" : str or None (the testbench code after debug)
|
||||
#### iverilog_path:
|
||||
- the path of iverilog dir, will contain all verilog files. generated .vvp will also be saved here
|
||||
#### task_id:
|
||||
- the name of the problem, will be used as the name of generated files
|
||||
file structure:
|
||||
- original
|
||||
- task_id.v
|
||||
- task_id_tb.v
|
||||
- task_id_vlist.txt
|
||||
- task_id.vvp
|
||||
- debug_1
|
||||
- task_id.v
|
||||
- task_id_tb.v
|
||||
- task_id_vlist.txt
|
||||
- task_id.vvp
|
||||
- debug_2
|
||||
- ...
|
||||
"""
|
||||
def __init__(self, TBgen: BaseScript, TB_code: str, module_header: str, task_dir: str, task_id: str, config):
|
||||
self.TBgen = TBgen
|
||||
self.TB_code_now = TB_code
|
||||
self.module_header = module_header
|
||||
self.task_dir = task_dir if task_dir.endswith("/") else task_dir + "/" # for the compatibility with the old version
|
||||
self.task_id = task_id
|
||||
self.config = config
|
||||
self.working_dir = TBgen.TB_code_dir if TBgen.TB_code_dir.endswith("/") else TBgen.TB_code_dir + "/" # will change during the debug process
|
||||
self.DUT_code = module_header + "\n\nendmodule\n"
|
||||
self.debug_iter_max = config.autoline.debug.max
|
||||
self.debug_iter_to_reboot = config.autoline.debug.reboot
|
||||
self.proc_timeout = config.autoline.timeout
|
||||
# self.debug_iter_now = 0 # this is a counter for both iverilog and python so it is possible to be larger than debug_iter_max
|
||||
self.debug_iter_iv_now = 0
|
||||
self.debug_iter_after_reboot_iv = 0
|
||||
self.debug_iter_py_now = 0
|
||||
self.debug_iter_after_reboot_py = 0
|
||||
self.reboot_both = False
|
||||
# self.debug_iter_after_reboot = 0
|
||||
# pychecker related
|
||||
self.pychecker_en = self.TBgen.Pychecker_en
|
||||
self.PY_code_now = ""
|
||||
if self.pychecker_en:
|
||||
self.TBout_content = "" # will get after the last iverilog run
|
||||
self.PY_code_now = self.TBgen.Pychecker_code
|
||||
self.py_fail_reboot_both_iter = config.autoline.debug.py_rollback # will reboot both iv and py if python simulation failed xxx times
|
||||
self.py_debug_focus = self.TBgen.py_debug_focus
|
||||
# infos
|
||||
self.sim_pass = False # this should be com_pass, but it is too late to change it now
|
||||
self.py_pass = False
|
||||
self.Eval0_pass = False
|
||||
self.iverilog_info = None
|
||||
self.reboot_both_times = 0
|
||||
self.iv_runing_time = 0.0 # the time of running the last iverilog
|
||||
self.py_runing_time = 0.0 # the time of running the last python
|
||||
self.tokens = {"prompt": 0, "completion": 0}
|
||||
|
||||
@log_localprefix("TBsim")
|
||||
def run(self):
|
||||
if not self.pychecker_en:
|
||||
self.run_iverilog()
|
||||
self.Eval0_pass = self.sim_pass
|
||||
else:
|
||||
exit_en = False
|
||||
while (not exit_en):
|
||||
self.run_iverilog()
|
||||
if self.sim_pass:
|
||||
self.run_python()
|
||||
# if (self.sim_pass and self.py_pass) or self.exceed_max_debug:
|
||||
if not self.reboot_both:
|
||||
exit_en = True
|
||||
else:
|
||||
exit_en = True
|
||||
self.Eval0_pass = False
|
||||
raise ValueError("TBsim: iverilog failed, python simulation is not allowed.")
|
||||
self.Eval0_pass = self.sim_pass and self.py_pass
|
||||
logger.info("TBsim finished : %s!"%(self.Eval0_pass))
|
||||
|
||||
def run_iverilog(self):
|
||||
"""
|
||||
- the main function of TBsim
|
||||
"""
|
||||
if not self.reboot_both:
|
||||
# this will only be called at the first time of runing run_iverilog
|
||||
self._save_code_run_iverilog()
|
||||
self.sim_pass = self.iverilog_info[0]
|
||||
while (self.debug_iter_iv_now < self.debug_iter_max) and (not self.sim_pass):
|
||||
self.debug_iter_iv_now += 1
|
||||
if self.debug_iter_after_reboot_iv < self.debug_iter_to_reboot:
|
||||
self.debug_iter_after_reboot_iv += 1
|
||||
self._debug_iv()
|
||||
else:
|
||||
self._reboot_iv()
|
||||
self.sim_pass = self.iverilog_info[0]
|
||||
self.reboot_both = False
|
||||
if self.reboot_both:
|
||||
# this means didn't enter the while, because debug_iter_max is already reached
|
||||
logger.info("iverilog compilation (reboot from python) : failed! iverilog exceeded max debug iteration (%s)"%(self.debug_iter_max))
|
||||
if self.sim_pass:
|
||||
logger.info("iverilog compilation : passed!")
|
||||
else:
|
||||
logger.info("iverilog compilation : failed! exceeded max debug iteration (%s)"%(self.debug_iter_max))
|
||||
# self.sim_out = self.iverilog_info[4]["out"] if self.iverilog_info[4] is not None else ""
|
||||
# self.sim_err = self.iverilog_info[-1]
|
||||
# clean .vcd wave files
|
||||
self.clean_vcd()
|
||||
|
||||
def run_python(self):
|
||||
# read the TBout.txt into TBout_content in working_dir
|
||||
with open(self.TBout_path, "r") as f:
|
||||
self.TBout_content = f.read()
|
||||
self.debug_iter_after_reboot_py = 0
|
||||
py_rollback = 0 # local variable
|
||||
self._save_code_run_python()
|
||||
# self.debug_iter_py_now
|
||||
while (self.debug_iter_py_now < self.debug_iter_max) and (not self.python_info[0]):
|
||||
if (not self.python_info[0]) and (py_rollback >= self.py_fail_reboot_both_iter):
|
||||
# +1: debug py fail + [generated py fail]
|
||||
self.reboot_both = True
|
||||
break
|
||||
py_rollback += 1
|
||||
self.debug_iter_py_now += 1
|
||||
if self.debug_iter_after_reboot_py < self.debug_iter_to_reboot:
|
||||
self.debug_iter_after_reboot_py += 1
|
||||
self._debug_py()
|
||||
else:
|
||||
self._reboot_py()
|
||||
# self._reboot_py() # only reboot, no debugging because python debugging is much harder than verilog
|
||||
# currently debug_py doesn't support reboot
|
||||
if self.reboot_both:
|
||||
self.py_pass = False
|
||||
self.sim_pass = False
|
||||
self.debug_iter_after_reboot_iv = self.debug_iter_to_reboot
|
||||
logger.info("python simulation : failed! will reboot both iverilog and python")
|
||||
elif self.python_info[0]:
|
||||
self.py_pass = True
|
||||
logger.info("python simulation : passed!")
|
||||
else:
|
||||
self.py_pass = False
|
||||
logger.info("python simulation : failed! exceeded max debug iteration (%s)"%(self.debug_iter_max))
|
||||
self.py_out = self.python_info[1]["out"] if self.python_info[1] is not None else ""
|
||||
self.py_err = self.python_info[-1]
|
||||
|
||||
def _debug_iv(self):
|
||||
with Timer(print_en=False) as debug_time:
|
||||
logger.info("iverilog simulation failed! Debuging... (debug_iter = %s)"%(self.debug_iter_iv_now))
|
||||
self.working_dir = self.task_dir + "debug_%s/" % (self.total_debug_iter_now)
|
||||
os.makedirs(self.working_dir, exist_ok=True)
|
||||
debug_prompt = self._debug_prompt_gen_iv()
|
||||
debug_message = [{"role": "user", "content": debug_prompt}]
|
||||
gpt_response, info = llm.llm_call(debug_message, self.config.gpt.model, self.config.gpt.key_path)
|
||||
debug_message = info["messages"]
|
||||
self.TB_code_now = llm.extract_code(gpt_response, "verilog")[-1]
|
||||
self.TB_code_now = self.del_linemark(self.TB_code_now)
|
||||
self._save_code_run_iverilog()
|
||||
logger.info("%s: verilog DEBUG finished (%ss used)" % (self.debug_iter_info("iv"), round(debug_time.interval, 2)))
|
||||
self.tokens["prompt"] += info["usage"]["prompt_tokens"]
|
||||
self.tokens["completion"] += info["usage"]["completion_tokens"]
|
||||
ls.save_messages_to_txt(debug_message, self.working_dir+"debug_messages.txt")
|
||||
|
||||
def _reboot_iv(self):
|
||||
# change TBgen's code dir
|
||||
with Timer (print_en=False) as reboot_time:
|
||||
logger.info("iverilog simulation failed! Rebooting... (debug_iter = %s)"%(self.debug_iter_iv_now))
|
||||
self.working_dir = self.task_dir + "debug_%s_reboot/" % (self.total_debug_iter_now)
|
||||
os.makedirs(self.working_dir, exist_ok=True)
|
||||
self.TBgen.run_reboot(self.working_dir, reboot_mode="TB")
|
||||
self.TB_code_now = self.TBgen.TB_code
|
||||
self._save_code_run_iverilog()
|
||||
logger.info("%s: verilog REBOOT finished (%ss used)" % (self.debug_iter_info("iv"), round(reboot_time.interval, 2)))
|
||||
# the tookens will be added into TBgen's tokens count, we don't count it again here.
|
||||
# reset reboot counter
|
||||
self.debug_iter_after_reboot_iv = 0
|
||||
|
||||
def _debug_py(self):
|
||||
with Timer(print_en=False) as debug_time:
|
||||
logger.info("python compilation failed! Debuging python... (debug_iter = %s)"%(self.debug_iter_py_now))
|
||||
self.working_dir = self.task_dir + "debug_%s/" % (self.total_debug_iter_now)
|
||||
os.makedirs(self.working_dir, exist_ok=True)
|
||||
# run gpt
|
||||
debug_prompt = self._debug_prompt_gen_py()
|
||||
debug_message = [{"role": "user", "content": debug_prompt}]
|
||||
gpt_response, info = llm.llm_call(debug_message, self.config.gpt.model, self.config.gpt.key_path)
|
||||
debug_message = info["messages"]
|
||||
self.PY_code_now = llm.extract_code(gpt_response, "python")[-1]
|
||||
self.PY_code_now = self.del_linemark(self.PY_code_now)
|
||||
if self.py_debug_focus: # currently only support pychecker SEQ mode
|
||||
self.PY_code_now = self._py_focus(self.PY_code_now, before=False)
|
||||
self._save_code_run_python()
|
||||
logger.info("%s: python DEBUG finished (%ss used)" % (self.debug_iter_info("py"), round(debug_time.interval, 2)))
|
||||
self.tokens["prompt"] += info["usage"]["prompt_tokens"]
|
||||
self.tokens["completion"] += info["usage"]["completion_tokens"]
|
||||
ls.save_messages_to_txt(debug_message, self.working_dir+"debug_messages.txt")
|
||||
|
||||
def _reboot_py(self):
|
||||
# change TBgen's code dir
|
||||
with Timer (print_en=False) as reboot_time:
|
||||
logger.info("python compilation failed! Rebooting... (debug_iter = %s)"%(self.debug_iter_py_now))
|
||||
self.working_dir = self.task_dir + "debug_%s_reboot/" % (self.total_debug_iter_now)
|
||||
os.makedirs(self.working_dir, exist_ok=True)
|
||||
self.TBgen.run_reboot(self.working_dir, reboot_mode="PY")
|
||||
self.PY_code_now = self.TBgen.Pychecker_code
|
||||
self._save_code_run_python()
|
||||
logger.info("%s: python REBOOT finished (%ss used)" % (self.debug_iter_info("py"), round(reboot_time.interval, 2)))
|
||||
# the tookens will be added into TBgen's tokens count, we don't count it again here.
|
||||
# reset reboot counter
|
||||
self.debug_iter_after_reboot_py = 0
|
||||
|
||||
def _save_code_run_iverilog(self):
|
||||
with open(self.TB_path, "w") as f:
|
||||
f.write(self.TB_code_now)
|
||||
with open(self.DUT_path, "w") as f:
|
||||
f.write(self.DUT_code)
|
||||
with Timer(print_en=False) as iverilog_time:
|
||||
self.iverilog_info = iv.iverilog_call_and_save(self.working_dir, silent=True, timeout=self.proc_timeout)
|
||||
self.iv_runing_time = round(iverilog_time.interval, 2)
|
||||
self.error_message_now = self.iverilog_info[-1]
|
||||
if "program is timeout" in self.error_message_now:
|
||||
# if the error message is timeout, we will delete the TBout.txt
|
||||
# this is to avoid the situation that infinite loop produces a large TBout.txt
|
||||
if os.path.exists(self.TBout_path):
|
||||
os.remove(self.TBout_path)
|
||||
self.clean_vvp()
|
||||
|
||||
def _save_code_run_python(self):
|
||||
with open(self.PY_path, "w") as f:
|
||||
f.write(self.PY_code_now)
|
||||
with open(self.TBout_path, "w") as f:
|
||||
f.write(self.TBout_content)
|
||||
with Timer(print_en=False) as python_time:
|
||||
self.python_info = py.python_call_and_save(pypath=self.PY_path, silent=True, timeout=self.proc_timeout)
|
||||
self.py_runing_time = round(python_time.interval, 2)
|
||||
self.error_message_now = self.python_info[-1]
|
||||
|
||||
def _debug_prompt_gen_iv(self):
|
||||
debug_prompt = DEBUG_TEMPLATE + "\n previous testbench with error:\n" + self.add_linemark(self.TB_code_now) + "\n compiling error message:\n" + self.error_message_now
|
||||
return debug_prompt
|
||||
|
||||
def _debug_prompt_gen_py(self):
|
||||
if self.py_debug_focus:
|
||||
py_code = self._py_focus(self.PY_code_now, before=True)
|
||||
else:
|
||||
py_code = self.PY_code_now
|
||||
if not ("program is timeout" in self.error_message_now):
|
||||
self.error_message_now = self._py_error_message_simplify(self.error_message_now)
|
||||
debug_prompt = DEBUG_TEMPLATE_PY + "\n previous python code with error:\n" + self.add_linemark(py_code) + "\n compiling error message:\n" + self.error_message_now + DEBUG_FINAL_INSTR_PY
|
||||
return debug_prompt
|
||||
|
||||
def _py_focus(self, code:str, before:bool):
|
||||
"""
|
||||
- code: the code under debug / after debug
|
||||
- before: True, if before debug, will split the code; False, if after debug, will restore the code
|
||||
"""
|
||||
# KEY_WORD = "\ndef check_dut"
|
||||
KEY_WORDs_1 = "def check_dut(vectors_in):\n golden_dut = GoldenDUT()\n failed_scenarios = []"
|
||||
KEY_WORDs_2 = "\ndef SignalTxt_to_dictlist"
|
||||
if before:
|
||||
key_words = KEY_WORDs_1 if KEY_WORDs_1 in code else KEY_WORDs_2
|
||||
if key_words not in code:
|
||||
py_code_focus = code
|
||||
self.py_code_nofocus = ""
|
||||
else:
|
||||
py_code_focus = code.split(key_words)[0]
|
||||
self.py_code_nofocus = key_words + code.split(key_words)[1]
|
||||
return py_code_focus
|
||||
else:
|
||||
return code + self.py_code_nofocus
|
||||
|
||||
@staticmethod
|
||||
def _py_error_message_simplify(error_message:str, error_depth:int=1):
|
||||
"""
|
||||
- extract the key point of python error message
|
||||
- error_depth: how many (how deep, from bottom to top) error messages to extract
|
||||
"""
|
||||
msg_lines = error_message.split("\n")
|
||||
msg_out = ""
|
||||
for line in reversed(msg_lines):
|
||||
msg_out = line + "\n" + msg_out
|
||||
if "File" in line:
|
||||
error_depth -= 1
|
||||
if error_depth == 0:
|
||||
break
|
||||
return msg_out
|
||||
|
||||
@property
|
||||
def exceed_max_debug(self):
|
||||
return (self.debug_iter_iv_now >= self.debug_iter_max) or (self.debug_iter_py_now >= self.debug_iter_max)
|
||||
|
||||
@property
|
||||
def total_debug_iter_now(self):
|
||||
return self.debug_iter_iv_now + self.debug_iter_py_now
|
||||
|
||||
@property
|
||||
def TB_path(self):
|
||||
return self.working_dir + self.task_id + "_tb.v"
|
||||
|
||||
@property
|
||||
def DUT_path(self):
|
||||
return self.working_dir + self.task_id + ".v"
|
||||
|
||||
@property
|
||||
def PY_path(self):
|
||||
return self.working_dir + self.task_id + "_tb.py"
|
||||
|
||||
@property
|
||||
def TBout_path(self):
|
||||
return self.working_dir + "TBout.txt"
|
||||
|
||||
def debug_iter_info(self, type):
|
||||
"""return debug iter info string. Type: "iv" or "py" """
|
||||
if self.pychecker_en:
|
||||
if type == "iv":
|
||||
return "verilog iter - %d/%d, total - %d/%d"%(self.debug_iter_iv_now, self.debug_iter_max, self.total_debug_iter_now, self.debug_iter_max*2)
|
||||
elif type == "py":
|
||||
return "python tier - %d/%d, total - %d/%d"%(self.debug_iter_py_now, self.debug_iter_max, self.total_debug_iter_now, self.debug_iter_max*2)
|
||||
else:
|
||||
raise ValueError("TaskTBsim.debug_iter_info(type): type should be 'iv' or 'py'")
|
||||
else:
|
||||
# only iverilog
|
||||
return "debug iter %d/%d"%(self.debug_iter_iv_now, self.debug_iter_max)
|
||||
|
||||
@staticmethod
|
||||
def add_linemark(code: str):
|
||||
"""add the line mark (1., 2., ...) to the code at the beginning of each line"""
|
||||
code = code.split("\n")
|
||||
code = [str(i+1) + ". " + line for i, line in enumerate(code)]
|
||||
return "\n".join(code)
|
||||
|
||||
@staticmethod
|
||||
def del_linemark(code: str):
|
||||
"""delete the line mark at the begening of each line if line mark exists"""
|
||||
code = code.split("\n")
|
||||
if code[1].split(".")[0].isdigit(): # use code[1] in case the first line is empty
|
||||
code = [line.split(". ")[1:] for line in code]
|
||||
for i, line in enumerate(code):
|
||||
code[i] = ". ".join(line)
|
||||
return "\n".join(code)
|
||||
|
||||
def clean_vcd(self):
|
||||
"""clean the .vcd files in the task_dir"""
|
||||
clean_dir = self.task_dir[:-1] if self.task_dir.endswith("/") else self.task_dir
|
||||
for root, dirs, files in os.walk(clean_dir):
|
||||
for file in files:
|
||||
if file.endswith(".vcd"):
|
||||
os.remove(os.path.join(root, file))
|
||||
|
||||
def clean_vvp(self):
|
||||
"""clean the .vvp files in the task_dir"""
|
||||
clean_dir = self.task_dir[:-1] if self.task_dir.endswith("/") else self.task_dir
|
||||
for root, dirs, files in os.walk(clean_dir):
|
||||
for file in files:
|
||||
if file.endswith(".vvp"):
|
||||
os.remove(os.path.join(root, file))
|
||||
836
autoline/TB3_funccheck.py
Normal file
836
autoline/TB3_funccheck.py
Normal file
@@ -0,0 +1,836 @@
|
||||
"""
|
||||
Description : The functionality checking of the generated TB, the submodule of Autoline
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/7/22 10:36:06
|
||||
LastEdited : 2025/2/25 22:11:13
|
||||
"""
|
||||
|
||||
import os
|
||||
import LLM_call as llm
|
||||
import iverilog_call as iv
|
||||
import python_call as py
|
||||
import numpy as np
|
||||
import loader_saver as ls
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.colors as mcolors
|
||||
from loader_saver import autologger as logger
|
||||
from loader_saver import log_localprefix
|
||||
|
||||
|
||||
|
||||
class TaskTBcheck():
|
||||
"""
|
||||
### description
|
||||
- this is the self-checking stage of our pipeline; This is the main contribution of AutoBench2。
|
||||
- This stage is to check the functional correctness of the testbench generated by AutoBench.
|
||||
"""
|
||||
|
||||
def __init__(self, task_dir:str, task_id:str, description:str, module_header:str, TB_code_v:str, TB_code_py:str|None=None, rtl_list:list[str]=None, rtl_num:int=20, scenario_num=None, correct_max:int=3, runfiles_save:bool=True, discriminator_mode:str="col_full_wrong", corrector_mode:str="naive", circuit_type:str=None, rtl_compens_max_iter:int=3, rtl_compens_en:bool=True, desc_improve:bool=False, **LLM_kwargs) -> None:
|
||||
"""
|
||||
- input:
|
||||
- task_dir: the root directory of the taskTBcheck
|
||||
- task_id: the name of the problem
|
||||
- description: the description of the problem
|
||||
- module_header: the header of the module
|
||||
- TB_code_v: the generated verilog testbench code
|
||||
- TB_code_py (opt.): the generated python testbench code, if None, the tb is in a pure verilog mode
|
||||
- rtl_list (opt.): the list of llm-generated RTL codes, if None, will generate 20 RTL codes using LLM
|
||||
- rtl_num (opt.): the number of RTL codes to generate, only used when rtl_list is None
|
||||
- scenario_num (opt.): the number of scenarios in the testbench, if None, will be calculated from the failed scenarios (not accurate but won't impact the results)
|
||||
- correct_max (opt.): the maximum number of correction iterations
|
||||
- runfiles_save (opt.): whether to save the compilation files in TB_discrimination
|
||||
- discriminator_mode (default: col_full_wrong): the mode of the discriminator
|
||||
- corrector_mode (default: naive): the mode of the corrector
|
||||
- circuit_type (opt.): the type of the circuit, used in the corrector (better performance if provided)
|
||||
- rtl_compens_max_iter (default: 3): the maximum number of iterations of RTL compensation
|
||||
- rtl_compens_en (default: True): whether to enable RTL compensation
|
||||
- **LLM_kwargs: the keyword arguments for LLM (used in corrector and rtl generation), including:
|
||||
- "main_model": the main llm name used in TB_generation and correction
|
||||
- "rtlgen_model": the llm naem used in RTL generation
|
||||
- "api_key_path": the path of the api key
|
||||
- "temperature": the temperature of LLM
|
||||
"""
|
||||
self.task_dir = task_dir
|
||||
self.working_dir = self.task_dir
|
||||
self.task_id = task_id
|
||||
self.description = description
|
||||
self.module_header = module_header
|
||||
self.TB_code_v = TB_code_v
|
||||
self.TB_code_py = TB_code_py
|
||||
self.pychecker_en = TB_code_py is not None
|
||||
self.rtl_list = rtl_list
|
||||
self.scenario_num = scenario_num
|
||||
self.correct_max = correct_max
|
||||
self.runfiles_save = runfiles_save
|
||||
self.main_model = LLM_kwargs.get("main_model", None)
|
||||
self.rtlgen_model = LLM_kwargs.get("rtlgen_model", None)
|
||||
self.llm_api_key_path = LLM_kwargs.get("api_key_path", "config/key_API.json")
|
||||
self.llm_temperature = LLM_kwargs.get("temperature", None)
|
||||
self.circuit_type = circuit_type
|
||||
self.rtl_compens_max_iter = rtl_compens_max_iter # see self.discriminate_TB for more info
|
||||
self.rtl_compens_en = rtl_compens_en
|
||||
self.desc_improve = desc_improve
|
||||
self.tolerance_for_same_wrong_scen = 2
|
||||
self.same_wrong_scen_times = 0
|
||||
# discriminator and corrector
|
||||
self.discriminator_mode = discriminator_mode
|
||||
self.corrector_mode = corrector_mode
|
||||
self.discriminator = TB_discriminator(discriminator_mode)
|
||||
self.corrector = TB_corrector(self.corrector_mode, self.pychecker_en, self.circuit_type)
|
||||
self.improver = SPEC_improver(description, "naive", self.pychecker_en, self.main_model, self.circuit_type)
|
||||
# rtl list and number
|
||||
self.rtl_newly_gen_num = 0
|
||||
if self.rtl_list is None:
|
||||
# self.rtl_num = rtl_num
|
||||
self.set_rtl_num(rtl_num)
|
||||
self.rtl_list_gen()
|
||||
else:
|
||||
# self.rtl_num = len(self.rtl_list)
|
||||
self.set_rtl_num(len(self.rtl_list))
|
||||
self.scenario_matrix = None
|
||||
self.wrong_scen_num = 0 # a small number as default, will be replaced later
|
||||
self.previous_wrong_scen_num = 9999 # a very large number as default, will be replaced later
|
||||
self.TB_syntax_error = False
|
||||
# tb analysis results
|
||||
self.tb_pass = None
|
||||
self.wrong_col_index = None
|
||||
self.correct_col_index = None
|
||||
self.unsure_col_index = None
|
||||
# next_action
|
||||
self.next_action = None
|
||||
self.iter_now = 0
|
||||
self.corrected = False # this means the TB has been corrected
|
||||
if self.main_model is None:
|
||||
logger.warning("main_model not found, may have trouble while correcting tb")
|
||||
# record and runinfo
|
||||
self.op_record = [] # record the order of the operations, similar to the var in autoline; will be added to the funccheck's op_record in the final runinfo.
|
||||
|
||||
@property
|
||||
def rtl_num(self):
|
||||
"""protected attr. rtl_num is initialized at the beginning and will not be changed"""
|
||||
return self._rtl_num
|
||||
|
||||
@log_localprefix("TBcheck")
|
||||
def run(self):
|
||||
"""
|
||||
- the main function of TaskTBcheck
|
||||
- the TB check stage contains several sub-stages:
|
||||
- 1. TB discriminating
|
||||
- 2. TB correcting
|
||||
- output: will update the next action of the task, including:
|
||||
- "pass": the TB already passed the selfcheck, will go to the evaluation stage
|
||||
- "reboot": the whole pipeline will start from the very beginning
|
||||
- workflow: inital discrimination -> correction-discrimination lloop -> pass or reboot
|
||||
"""
|
||||
# TODO: if error occurs, go to reboot.
|
||||
# initial discrimination
|
||||
tolerance = 1
|
||||
syntax_error = False
|
||||
self.discriminate_TB()
|
||||
if self.TB_syntax_error:
|
||||
logger.negative("Testbench has syntax error, I give up. Reboot the whole process")
|
||||
syntax_error = True
|
||||
self.next_action = "reboot"
|
||||
elif self.tb_pass:
|
||||
logger.info("Testbench passed the funccheck")
|
||||
self.next_action = "pass"
|
||||
else:
|
||||
# enter the correction loop, the initial TB has no syntax error when entering
|
||||
if self.correct_max == 0:
|
||||
logger.negative("No correction is allowed, I give up. Reboot the whole autoline process")
|
||||
self.next_action = "reboot"
|
||||
for self.iter_now in range(1, self.correct_max+1):
|
||||
if (self.iter_now > 1) and (self.wrong_scen_num > self.previous_wrong_scen_num) and (not syntax_error):
|
||||
# give up, the correction makes it worse
|
||||
logger.negative(f"wrong scenarios increased ({self.wrong_scen_num} > {self.previous_wrong_scen_num}), I give up, quiting the funccheck stage...")
|
||||
self.next_action = "reboot"
|
||||
break
|
||||
elif (self.iter_now > 1) and (self.wrong_scen_num == self.previous_wrong_scen_num) and (not syntax_error):
|
||||
self.same_wrong_scen_times += 1
|
||||
if self.same_wrong_scen_times >= self.tolerance_for_same_wrong_scen:
|
||||
logger.info(f"wrong scenarios not decreased for {self.tolerance_for_same_wrong_scen} times ({self.wrong_scen_num} = {self.previous_wrong_scen_num}), I give up, quiting the funccheck stage...")
|
||||
self.next_action = "reboot"
|
||||
break
|
||||
else:
|
||||
logger.info(f"wrong scenarios not decreased for {self.same_wrong_scen_times} times ({self.wrong_scen_num} = {self.previous_wrong_scen_num}), continue the correction")
|
||||
self.correct_TB()
|
||||
self.discriminate_TB()
|
||||
if self.tb_pass:
|
||||
logger.info("Testbench passed the funccheck after correction")
|
||||
self.next_action = "pass"
|
||||
break
|
||||
elif self.TB_syntax_error:
|
||||
# if the syntax error is from the corrector, we should roll back before correction
|
||||
if tolerance > 0:
|
||||
logger.negative(f"Testbench has syntax error after correction, I still have tolerance for that (tolerance={tolerance}). roll back and retry the self correction.")
|
||||
self.TB_code_v, self.TB_code_py = self.TB_code_v_before_cor, self.TB_code_py_before_cor
|
||||
tolerance -= 1
|
||||
syntax_error = True
|
||||
else:
|
||||
logger.negative("Testbench has syntax error after correction, I don't have tolerance. I give up. Reboot the whole autoline process")
|
||||
self.next_action = "reboot"
|
||||
syntax_error = True
|
||||
break
|
||||
self.next_action = "reboot" if self.iter_now == self.correct_max else None
|
||||
if (self.next_action == "reboot") and (not syntax_error) and (self.desc_improve):
|
||||
# the desc improver does not work well so we do not use it in this work
|
||||
self.improve_SPEC()
|
||||
logger.info(f"self funccheck finished. Next Action: [{self.next_action}]")
|
||||
return self.next_action
|
||||
|
||||
@log_localprefix("discriminator")
|
||||
def discriminate_TB(self, no_any_files:bool=False):
|
||||
"""
|
||||
- check the correctness of the testbench and return the rtl analysis results in matrix form
|
||||
- important data: the rtl list, the TB code
|
||||
- update the following data: `scenario_matrix`, `tb_pass`, `wrong_col_index`, `correct_col_index`, `unsure_col_index`, `wrong_scen_num`
|
||||
"""
|
||||
rtl_dir_prefix = "RTL_"
|
||||
self.op_record.append("discrim")
|
||||
self.working_dir = os.path.join(self.task_dir, f"discrim_{self.iter_now}")
|
||||
logger.info(f"Discriminating the testbench, NO.{self.iter_now} discrimination")
|
||||
for i in range(self.rtl_compens_max_iter):
|
||||
# the loop is for the case that too few RTL passed the syntax check, generate more rtls and recheck
|
||||
failed_scenario_matrix = []
|
||||
# for rtl_code in self.rtl_list:
|
||||
self.TB_syntax_error = True
|
||||
syntax_error_rtl = []
|
||||
for rtl_idx, rtl_code in enumerate(self.rtl_list):
|
||||
rtl_dir = os.path.join(self.working_dir, f"{rtl_dir_prefix}{rtl_idx+1}")
|
||||
scenario_vector = self.run_testbench(rtl_dir, self.TB_code_v, rtl_code, self.TB_code_py, rtl_idx+1, self.runfiles_save and (not no_any_files))
|
||||
failed_scenario_matrix.append(scenario_vector) # like [[2, 5], [3, 4, 5]]
|
||||
if scenario_vector != [-1]:
|
||||
self.TB_syntax_error = False
|
||||
else:
|
||||
syntax_error_rtl.append(rtl_idx+1)
|
||||
if syntax_error_rtl != []:
|
||||
logger.info(f"RTL(s) {syntax_error_rtl} have syntax error during discrimination")
|
||||
|
||||
if self.TB_syntax_error:
|
||||
# there are two cases for TB syntax error:
|
||||
# 1. this syntax error is from the previous stage, if so, we should directly reboot the whole autoline process
|
||||
# 2. this syntax error is from the corrector, if so, we roll back to the version before correction and retry the correction
|
||||
self.tb_pass = False
|
||||
return None
|
||||
|
||||
# the len of each scenario vector should be the same, thus we transform each vector into a onehot vector [[1,1,0,1,1,0], [1,1,1,0,0,0]]
|
||||
self.scenario_matrix = self.failed_scenarios_to_onehot_array(failed_scenario_matrix, max_scen_idx=self.scenario_num, taskid=self.task_id)
|
||||
if not no_any_files:
|
||||
# save this matrix into the working dir in a human readable form
|
||||
np.savetxt(os.path.join(self.working_dir, "scenario_matrix.csv"), self.scenario_matrix, delimiter=",", fmt="%d")
|
||||
# save this matrix in a plot form
|
||||
self.draw_scenario_matrix(self.scenario_matrix, self.task_id, os.path.join(self.working_dir, "scenario_matrix.png"))
|
||||
|
||||
# we delete the syntax errored rtl, if no syntax error in TB
|
||||
self.rtl_list = [rtl for rtl, scen in zip(self.rtl_list, failed_scenario_matrix) if scen != [-1]]
|
||||
failed_scenario_matrix = [scen for scen in failed_scenario_matrix if scen != [-1]]
|
||||
if len(self.rtl_list) < 0.5*self.rtl_num:
|
||||
# too few RTL passed the syntax check
|
||||
logger.info(f"too few RTL passed the syntax check ({len(self.rtl_list)}/{self.rtl_num}), I will generate more and recheck. This is not TB's fault.")
|
||||
self.rtl_list, gen_num = self.gen_rtl(self.rtl_num-len(self.rtl_list), self.description, self.module_header, self.rtlgen_model, self.rtl_list)
|
||||
self.rtl_newly_gen_num += gen_num
|
||||
# delete the previous rtl dirs (dir start with rtl_dir_prefix and under the working dir)
|
||||
for subdir in os.listdir(self.working_dir):
|
||||
if subdir.startswith(rtl_dir_prefix) and os.path.isdir(os.path.join(self.working_dir, subdir)):
|
||||
os.system(f"rm -rf {os.path.join(self.working_dir, subdir)}")
|
||||
logger.info(f"re-discriminate the testbench with updated RTL list")
|
||||
if i == self.rtl_compens_max_iter-1:
|
||||
logger.info(f"no re-discrimination since the max iteration reached")
|
||||
else:
|
||||
break
|
||||
|
||||
# discriminate the testbench according to the one hot matrix
|
||||
self.tb_pass, self.wrong_col_index, self.correct_col_index, self.unsure_col_index = self.discriminator.discriminate(self.scenario_matrix)
|
||||
self.previous_wrong_scen_num = self.wrong_scen_num
|
||||
self.wrong_scen_num = len(self.wrong_col_index)
|
||||
return self.tb_pass, self.wrong_col_index, self.correct_col_index, self.unsure_col_index
|
||||
|
||||
@log_localprefix("corrector")
|
||||
def correct_TB(self):
|
||||
"""
|
||||
- correct the testbench by using the RTL analysis results
|
||||
"""
|
||||
self.op_record.append("correct")
|
||||
self.working_dir = os.path.join(self.task_dir, f"correct_{self.iter_now}")
|
||||
self.TB_code_v_before_cor, self.TB_code_py_before_cor = self.TB_code_v, self.TB_code_py
|
||||
self.TB_code_v, self.TB_code_py = self.corrector.correct(self.description, self.wrong_col_index, self.TB_code_v, self.main_model, self.TB_code_py, self.working_dir)
|
||||
self.corrected = True
|
||||
# self check if the testbench is corrected
|
||||
# self.working_dir = os.path.join(self.task_dir, f"TBcheck_after_correct")
|
||||
# self.discriminate_TB()
|
||||
# if self.tb_pass:
|
||||
# logger.info(f"[{self.task_id}] - Testbench passed the selfcheck after correction")
|
||||
# self.next_action = "pass"
|
||||
# else:
|
||||
# logger.warning(f"[{self.task_id}] - Testbench failed the selfcheck after correction, failed scenarios: {self.wrong_col_index}")
|
||||
|
||||
@log_localprefix("improver")
|
||||
def improve_SPEC(self):
|
||||
"""
|
||||
- improve the specification of the task according to the discrimination and correction results
|
||||
"""
|
||||
self.op_record.append("improve")
|
||||
self.working_dir = os.path.join(self.task_dir, "improve_Desc")
|
||||
self.description = self.improver.improve(self.wrong_col_index, self.correct_col_index, self.TB_code_v, self.TB_code_py, working_dir=self.working_dir)
|
||||
|
||||
|
||||
|
||||
@staticmethod
|
||||
def run_testbench(dir, driver_code:str, DUT_code:str, checker_code:str, rtl_index:int, save_en:bool=True):
|
||||
"""
|
||||
- modified from autoline.py TBEval.run_testbench
|
||||
- it has two mode: pychecker mode or verilog testbench mode
|
||||
-input:
|
||||
- dir: the dir to save the TB, DUT and pychecker code
|
||||
- driver_code: str; the testbench code
|
||||
- DUT_code: str; the DUT code
|
||||
- checker_code: str; the pychecker code
|
||||
- output:
|
||||
- a list of failed scenarios (if rtl has syntax error, return [-1])
|
||||
"""
|
||||
# iverilog part
|
||||
# save the TB and DUT
|
||||
os.makedirs(dir, exist_ok=True)
|
||||
v_driver_path = os.path.join(dir, "driver.v")
|
||||
py_checker_path = os.path.join(dir, "checker.py")
|
||||
dut_path = os.path.join(dir, "DUT.v")
|
||||
with open(v_driver_path, "w") as f:
|
||||
f.write(driver_code)
|
||||
with open(dut_path, "w") as f:
|
||||
f.write(DUT_code)
|
||||
iv_run_info = iv.iverilog_call_and_save(dir, silent=True)
|
||||
if not iv_run_info[0]:
|
||||
# logger.trace(f"RTL index [{rtl_index}]: Iverilog Compilation Failed, the PREREQUISITE of 'Evaluation' is no syntactic error from Testbench!!!")
|
||||
# raise RuntimeError("Iverilog Compilation Failed")
|
||||
return [-1]
|
||||
with open(py_checker_path, "w") as f:
|
||||
f.write(checker_code)
|
||||
py_run_info = py.python_call_and_save(pypath=py_checker_path, silent=True)
|
||||
if not py_run_info[0]:
|
||||
# logger.trace(f"RTL index [{rtl_index}]: Iverilog Compilation Failed: the PREREQUISITE of 'Evaluation' is no syntactic error from Python code!!!")
|
||||
# raise RuntimeError("Python Compilation Failed")
|
||||
return [-1]
|
||||
python_info_out = py_run_info[1]["out"]
|
||||
python_info_out : str
|
||||
# find the last ] in the out
|
||||
last_bracket_end = python_info_out.rfind("]")
|
||||
# find the last [ in the out
|
||||
last_bracket_start = python_info_out.rfind("[")
|
||||
# if the last [..] is a [], return []
|
||||
if last_bracket_end == last_bracket_start+1:
|
||||
return []
|
||||
# extract the digits
|
||||
failed_scenarios = python_info_out[last_bracket_start+1:last_bracket_end].replace("'", "").split(",")
|
||||
# if the item is not pure digit such as 2b, then we only extract the digit part
|
||||
failed_scenarios = [int("".join([char for char in scenario if char.isdigit()])) for scenario in failed_scenarios]
|
||||
failed_scenarios = list(map(int, failed_scenarios))
|
||||
# if save_en false, we delete the dir
|
||||
if not save_en:
|
||||
os.system(f"rm -rf {dir}")
|
||||
return list(set(failed_scenarios))
|
||||
|
||||
def rtl_list_gen(self)->list[str]:
|
||||
"""
|
||||
- generate the RTL list using LLM, will empty the old rtl list
|
||||
- attr needed: description, module_header, rtl_num, llm_model
|
||||
- attr changed: rtl_list
|
||||
"""
|
||||
self.rtl_list = []
|
||||
logger.info(f"rtl list not found, generating naive rtls for testbench checking")
|
||||
self.rtl_list, gen_num = self.gen_rtl(self.rtl_num, self.description, self.module_header, self.rtlgen_model, self.rtl_list)
|
||||
self.rtl_newly_gen_num += gen_num
|
||||
# save the rtl list
|
||||
save_path = os.path.join(self.task_dir, "rtl_list.json")
|
||||
os.makedirs(self.task_dir, exist_ok=True)
|
||||
ls.save_json_lines([{"task_id": self.task_id, "llmgen_RTL": self.rtl_list}], save_path)
|
||||
|
||||
@staticmethod
|
||||
def gen_rtl(num:int, description:str, header:str, llm_mode:str, rtl_list:list=[]):
|
||||
"""
|
||||
- input:
|
||||
- num (int): the number of RTLs to generate
|
||||
- description (str): the description of the rtl problem
|
||||
- header (str): the header of the module
|
||||
- llm (str): the llm model to use (official model name)
|
||||
- rtl_list (list) [optional]: the newly generated RTLs will be appended to this list, can be empty
|
||||
- output:
|
||||
- rtl_list (list): the list of the newly generated RTLs (and the old ones, if have)
|
||||
"""
|
||||
rtl_gen_num = 0
|
||||
prompt = "Your task is to write a verilog RTL design according to the design specification. The infomation we have is the problem description that guides student to write the RTL code (DUT) and the header of the desired module. here is the problem description:\n"
|
||||
prompt += description
|
||||
prompt += "\nHere is the header of the desired module:\n"
|
||||
prompt += header
|
||||
prompt += "\nPlease only return the module code (header should be included) in verilog, please do not include any other words."
|
||||
for i in range(num):
|
||||
# call llm
|
||||
answer = llm.llm_call(prompt, llm_mode)[0]
|
||||
# extract the module code
|
||||
module_code = llm.extract_code(answer, "verilog")[0]
|
||||
# logger.trace(f"[{self.task_id}] - {i+1} RTLs generated")
|
||||
rtl_list.append(module_code)
|
||||
rtl_gen_num += 1
|
||||
logger.info("%d naive rtls generated"%(rtl_gen_num))
|
||||
return rtl_list, rtl_gen_num
|
||||
|
||||
@staticmethod
|
||||
def failed_scenarios_to_onehot_array(failed_scenarios:list[list], max_scen_idx:int|None=None, taskid:str=""):
|
||||
"""
|
||||
- input: [failed_scenarios:list[int]] (for example: [[1,2,3], [2,3,4], [1,3,4], [-1]]), if one failed scenario list is [-1], it means the rtl has syntax error, should be skipped
|
||||
- output (np.array): a onehot array (for example: [[0,0,0,1], [1,0,0,0], [0,1,0,0], [-1,-1,-1,-1]]) (1 denots pass, 0 denotes fail, -1 means syntax error)
|
||||
"""
|
||||
|
||||
# find the max scenario index
|
||||
listlen = len(failed_scenarios)
|
||||
max_idx_given = max_scen_idx if max_scen_idx is not None else 1
|
||||
# we calculate the max_index_cal, and define the final max scenario index using max(max_index_cal, max_index_given)
|
||||
max_idx_cal = max(map(lambda x: max(x) if x != [] else 0, failed_scenarios))
|
||||
if max_idx_cal in [-1, 0]:
|
||||
# -1: all the scenarios in this rtl are -1
|
||||
# 0: usually not possible because the scenario index is from 1, but if exists, set to 1.
|
||||
max_idx_cal = 1
|
||||
if failed_scenarios == list(map(lambda x: [], range(listlen))):
|
||||
# this means all rtl passed
|
||||
max_idx_cal = 1 # set to 1 otherwise the plot will be empty
|
||||
max_idx = max(max_idx_cal, max_idx_given)
|
||||
|
||||
# if the failed scenario list is [-1], then all the scenarios in this rtl are -1
|
||||
# create the onehot array
|
||||
grid_map = [[1]*max_idx for _ in range(listlen)]
|
||||
for rtl_idx, failed_scens in enumerate(failed_scenarios):
|
||||
if failed_scens == [-1]:
|
||||
grid_map[rtl_idx] = [-1]*max_idx
|
||||
continue
|
||||
for scen_idx in failed_scens:
|
||||
grid_map[rtl_idx][scen_idx-1] = 0
|
||||
return np.array(grid_map)
|
||||
|
||||
@staticmethod
|
||||
def draw_scenario_matrix(scenario_matrix:np.ndarray, task_id:str, saving_path:str):
|
||||
"""
|
||||
- draw the 2D failed scenario array. The element in the array can only be 0, 1, -1. We use red for 0, green for 1, and gray for -1.
|
||||
- if the scenario is empty, will return a gray color block.
|
||||
"""
|
||||
if len(scenario_matrix) == 0:
|
||||
scenario_matrix = np.array([[-1]])
|
||||
# if the element in data not in [0, 1, -1], change the element to -1
|
||||
scenario_matrix = np.where(np.logical_or(scenario_matrix == 0, np.logical_or(scenario_matrix == 1, scenario_matrix == -1)), scenario_matrix, -1)
|
||||
# get the RGB values for salmon, grey and mediumseagreen
|
||||
salmon = mcolors.to_rgb("salmon")
|
||||
grey = mcolors.to_rgb("grey")
|
||||
mediumseagreen = mcolors.to_rgb("mediumseagreen")
|
||||
color_mapping = {
|
||||
0: salmon,
|
||||
1: mediumseagreen,
|
||||
-1: grey
|
||||
}
|
||||
rgb_image = np.array([[color_mapping[value] for value in row] for row in scenario_matrix])
|
||||
# assign the color to the scenario_matrix
|
||||
for value, color in color_mapping.items():
|
||||
rgb_image[scenario_matrix == value] = color
|
||||
plt.imshow(rgb_image)
|
||||
plt.ylabel("RTL index")
|
||||
plt.xlabel("Scenario index")
|
||||
current_xticks = np.arange(scenario_matrix.shape[1])
|
||||
plt.xticks(current_xticks, current_xticks + 1)
|
||||
current_yticks = np.arange(scenario_matrix.shape[0])
|
||||
plt.yticks(current_yticks, current_yticks + 1)
|
||||
plt.title(f"[{task_id}] - Matrix of RTL-TB Scenario Correctness")
|
||||
plt.savefig(saving_path)
|
||||
plt.close()
|
||||
|
||||
def update_description(self)->str:
|
||||
"""
|
||||
- will modify the description of the task according to the descrimination and correction results
|
||||
"""
|
||||
logger.info("the description of the task is updated")
|
||||
return self.description
|
||||
|
||||
def set_rtl_num(self, value:int):
|
||||
self._rtl_num = value
|
||||
|
||||
def __call__(self, *args, **kwds):
|
||||
return self.run()
|
||||
|
||||
|
||||
class TB_discriminator():
|
||||
"""
|
||||
this class is used to discriminate the testbench according to the failing matrix
|
||||
"""
|
||||
def __init__(self, mode:str) -> None:
|
||||
self.mode = mode
|
||||
match self.mode:
|
||||
case "col_full_wrong":
|
||||
# the most naive mode;
|
||||
# tb correction: if any scenario col is fully wrong, then the tb is wrong
|
||||
# scenario correction: the scenarios that are fully wrong are wrong
|
||||
pass
|
||||
case "col_80_wrong":
|
||||
# similar to the above, but the criterion is 80% wrong
|
||||
pass
|
||||
case "col_90_wrong":
|
||||
# similar to the above, but the criterion is 90% wrong
|
||||
pass
|
||||
case "col_70_wrong":
|
||||
# similar to the above, but the criterion is 70% wrong
|
||||
pass
|
||||
case "col_60_wrong":
|
||||
# similar to the above, but the criterion is 60% wrong
|
||||
pass
|
||||
case "col_50_wrong":
|
||||
# similar to the above, but the criterion is 50% wrong
|
||||
pass
|
||||
case "col_40_wrong":
|
||||
# similar to the above, but the criterion is 40% wrong
|
||||
pass
|
||||
case "col_70_wrong_row_25_correct":
|
||||
# similar to 70_wrong, but if 25% of the RTLs are fully correct, then the TB is correct
|
||||
pass
|
||||
case "col_50_wrong_row_25_correct":
|
||||
# similar to 50_wrong, but if 25% of the RTLs are fully correct, then the TB is correct
|
||||
pass
|
||||
case "col_70_wrong_row_1_correct":
|
||||
# similar to 70_wrong, but if 1% of the RTLs are fully correct, then the TB is correct
|
||||
pass
|
||||
case "col_70_wrong_row_10_correct":
|
||||
# similar to 70_wrong, but if 10% of the RTLs are fully correct, then the TB is correct
|
||||
pass
|
||||
case _:
|
||||
logger.critical("class discriminator - mode not found!!!")
|
||||
|
||||
def discriminate(self, failed_matrix:np.ndarray)->tuple[bool, list[int], list[int], list[int]]:
|
||||
"""
|
||||
- input: the failed matrix of the testbench in onehot form
|
||||
- output:
|
||||
- the idexes in the scen list are starting from 1
|
||||
- bool: whether the testbench is correct
|
||||
- list[int]: the list of the wrong scenarios
|
||||
- list[int]: the list of the correct scenarios
|
||||
- list[int]: the list of the scenarios that discriminator are not sure
|
||||
- the -1 row will not be considered here. See function 'failed_scenarios_to_onehot_array'
|
||||
"""
|
||||
# first check if all the scenarios are [-1], which means the tb has syntax error
|
||||
if np.all(failed_matrix == -1):
|
||||
return None, [], [], []
|
||||
failed_matrix = failed_matrix[~np.all(failed_matrix == -1, axis=1)]
|
||||
match self.mode:
|
||||
case "col_full_wrong":
|
||||
# check which column of the matrix is fully wrong (0)
|
||||
wrong_col_index = np.where(np.all(np.isin(failed_matrix, [0, -1]), axis=0))[0] + 1
|
||||
correct_col_index = np.where(np.all(np.isin(failed_matrix, [1, -1]), axis=0))[0] + 1
|
||||
unsure_col_index = np.where(np.any(failed_matrix == 0, axis=0) & np.any(failed_matrix == 1, axis=0))[0] + 1
|
||||
tb_pass = len(wrong_col_index) == 0 # as long as there is no fully wrong column, the tb is correct (loose criterion)
|
||||
logger.match_level(tb_pass, "positive", "negative", f"TB_discriminating finished, TB {"passed" if tb_pass else "failed"}, wrong scenarios: {wrong_col_index}, scenario pass ratio: {len(correct_col_index)}/{len(failed_matrix[0])}")
|
||||
# my_log = logger.positive if tb_pass else logger.negative
|
||||
# my_log.info(f"TB_discriminating finished, TB {"passed" if tb_pass else "failed"}, wrong scenarios: {wrong_col_index}")
|
||||
return tb_pass, wrong_col_index, correct_col_index, unsure_col_index
|
||||
case "col_80_wrong":
|
||||
# check which column of the matrix is 80% wrong (0)
|
||||
wrong_col_index = np.where(np.sum(failed_matrix == 0, axis=0) >= 0.8*len(failed_matrix))[0] + 1
|
||||
correct_col_index = np.where(np.all(np.isin(failed_matrix, [1, -1]), axis=0))[0] + 1
|
||||
unsure_col_index = np.where(np.logical_and(np.sum(failed_matrix == 0, axis=0) < 0.8*len(failed_matrix), np.any(failed_matrix == 0, axis=0)))[0] + 1
|
||||
tb_pass = len(wrong_col_index) == 0
|
||||
logger.match_level(tb_pass, "positive", "negative", f"TB_discriminating finished, TB {"passed" if tb_pass else "failed"}, wrong scenarios: {wrong_col_index}, scenario pass ratio: {len(correct_col_index)}/{len(failed_matrix[0])}")
|
||||
return tb_pass, wrong_col_index, correct_col_index, unsure_col_index
|
||||
case "col_90_wrong":
|
||||
# check which column of the matrix is 90% wrong (0)
|
||||
wrong_col_index = np.where(np.sum(failed_matrix == 0, axis=0) >= 0.9*len(failed_matrix))[0] + 1
|
||||
correct_col_index = np.where(np.all(np.isin(failed_matrix, [1, -1]), axis=0))[0] + 1
|
||||
unsure_col_index = np.where(np.logical_and(np.sum(failed_matrix == 0, axis=0) < 0.9*len(failed_matrix), np.any(failed_matrix == 0, axis=0)))[0] + 1
|
||||
tb_pass = len(wrong_col_index) == 0
|
||||
logger.match_level(tb_pass, "positive", "negative", f"TB_discriminating finished, TB {"passed" if tb_pass else "failed"}, wrong scenarios: {wrong_col_index}, scenario pass ratio: {len(correct_col_index)}/{len(failed_matrix[0])}")
|
||||
return tb_pass, wrong_col_index, correct_col_index, unsure_col_index
|
||||
case "col_70_wrong":
|
||||
# check which column of the matrix is 70% wrong (0)
|
||||
wrong_col_index = np.where(np.sum(failed_matrix == 0, axis=0) >= 0.7*len(failed_matrix))[0] + 1
|
||||
correct_col_index = np.where(np.all(np.isin(failed_matrix, [1, -1]), axis=0))[0] + 1
|
||||
unsure_col_index = np.where(np.logical_and(np.sum(failed_matrix == 0, axis=0) < 0.7*len(failed_matrix), np.any(failed_matrix == 0, axis=0)))[0] + 1
|
||||
tb_pass = len(wrong_col_index) == 0
|
||||
logger.match_level(tb_pass, "positive", "negative", f"TB_discriminating finished, TB {"passed" if tb_pass else "failed"}, wrong scenarios: {wrong_col_index}, scenario pass ratio: {len(correct_col_index)}/{len(failed_matrix[0])}")
|
||||
return tb_pass, wrong_col_index, correct_col_index, unsure_col_index
|
||||
case "col_70_wrong_row_25_correct":
|
||||
# check which column of the matrix is 70% wrong (0)
|
||||
wrong_col_index = np.where(np.sum(failed_matrix == 0, axis=0) >= 0.7*len(failed_matrix))[0] + 1
|
||||
correct_col_index = np.where(np.all(np.isin(failed_matrix, [1, -1]), axis=0))[0] + 1
|
||||
unsure_col_index = np.where(np.logical_and(np.sum(failed_matrix == 0, axis=0) < 0.7*len(failed_matrix), np.any(failed_matrix == 0, axis=0)))[0] + 1
|
||||
tb_pass = len(wrong_col_index) == 0
|
||||
if np.sum(np.all(failed_matrix == 1, axis=1)) >= 0.25*len(failed_matrix):
|
||||
tb_pass = True
|
||||
logger.match_level(tb_pass, "positive", "negative", f"TB_discriminating finished, TB {"passed" if tb_pass else "failed"}, wrong scenarios: {wrong_col_index}, scenario pass ratio: {len(correct_col_index)}/{len(failed_matrix[0])}")
|
||||
return tb_pass, wrong_col_index, correct_col_index, unsure_col_index
|
||||
case "col_70_wrong_row_1_correct":
|
||||
# check which column of the matrix is 70% wrong (0)
|
||||
wrong_col_index = np.where(np.sum(failed_matrix == 0, axis=0) >= 0.7*len(failed_matrix))[0] + 1
|
||||
correct_col_index = np.where(np.all(np.isin(failed_matrix, [1, -1]), axis=0))[0] + 1
|
||||
unsure_col_index = np.where(np.logical_and(np.sum(failed_matrix == 0, axis=0) < 0.7*len(failed_matrix), np.any(failed_matrix == 0, axis=0)))[0] + 1
|
||||
tb_pass = len(wrong_col_index) == 0
|
||||
if np.sum(np.all(failed_matrix == 1, axis=1)) >= 0.01*len(failed_matrix):
|
||||
tb_pass = True
|
||||
logger.match_level(tb_pass, "positive", "negative", f"TB_discriminating finished, TB {"passed" if tb_pass else "failed"}, wrong scenarios: {wrong_col_index}, scenario pass ratio: {len(correct_col_index)}/{len(failed_matrix[0])}")
|
||||
return tb_pass, wrong_col_index, correct_col_index, unsure_col_index
|
||||
case "col_70_wrong_row_10_correct":
|
||||
# check which column of the matrix is 70% wrong (0)
|
||||
wrong_col_index = np.where(np.sum(failed_matrix == 0, axis=0) >= 0.7*len(failed_matrix))[0] + 1
|
||||
correct_col_index = np.where(np.all(np.isin(failed_matrix, [1, -1]), axis=0))[0] + 1
|
||||
unsure_col_index = np.where(np.logical_and(np.sum(failed_matrix == 0, axis=0) < 0.7*len(failed_matrix), np.any(failed_matrix == 0, axis=0)))[0] + 1
|
||||
tb_pass = len(wrong_col_index) == 0
|
||||
if np.sum(np.all(failed_matrix == 1, axis=1)) >= 0.1*len(failed_matrix):
|
||||
tb_pass = True
|
||||
logger.match_level(tb_pass, "positive", "negative", f"TB_discriminating finished, TB {"passed" if tb_pass else "failed"}, wrong scenarios: {wrong_col_index}, scenario pass ratio: {len(correct_col_index)}/{len(failed_matrix[0])}")
|
||||
return tb_pass, wrong_col_index, correct_col_index, unsure_col_index
|
||||
case "col_60_wrong":
|
||||
# check which column of the matrix is 60% wrong (0)
|
||||
wrong_col_index = np.where(np.sum(failed_matrix == 0, axis=0) >= 0.6*len(failed_matrix))[0] + 1
|
||||
correct_col_index = np.where(np.all(np.isin(failed_matrix, [1, -1]), axis=0))[0] + 1
|
||||
unsure_col_index = np.where(np.logical_and(np.sum(failed_matrix == 0, axis=0) < 0.6*len(failed_matrix), np.any(failed_matrix == 0, axis=0)))[0] + 1
|
||||
tb_pass = len(wrong_col_index) == 0
|
||||
logger.match_level(tb_pass, "positive", "negative", f"TB_discriminating finished, TB {"passed" if tb_pass else "failed"}, wrong scenarios: {wrong_col_index}, scenario pass ratio: {len(correct_col_index)}/{len(failed_matrix[0])}")
|
||||
return tb_pass, wrong_col_index, correct_col_index, unsure_col_index
|
||||
case "col_50_wrong":
|
||||
# check which column of the matrix is 50% wrong (0)
|
||||
wrong_col_index = np.where(np.sum(failed_matrix == 0, axis=0) >= 0.5*len(failed_matrix))[0] + 1
|
||||
correct_col_index = np.where(np.all(np.isin(failed_matrix, [1, -1]), axis=0))[0] + 1
|
||||
unsure_col_index = np.where(np.logical_and(np.sum(failed_matrix == 0, axis=0) < 0.5*len(failed_matrix), np.any(failed_matrix == 0, axis=0)))[0] + 1
|
||||
tb_pass = len(wrong_col_index) == 0
|
||||
logger.match_level(tb_pass, "positive", "negative", f"TB_discriminating finished, TB {"passed" if tb_pass else "failed"}, wrong scenarios: {wrong_col_index}, scenario pass ratio: {len(correct_col_index)}/{len(failed_matrix[0])}")
|
||||
return tb_pass, wrong_col_index, correct_col_index, unsure_col_index
|
||||
case "col_50_wrong_row_25_correct":
|
||||
# check which column of the matrix is 50% wrong (0)
|
||||
wrong_col_index = np.where(np.sum(failed_matrix == 0, axis=0) >= 0.5*len(failed_matrix))[0] + 1
|
||||
correct_col_index = np.where(np.all(np.isin(failed_matrix, [1, -1]), axis=0))[0] + 1
|
||||
unsure_col_index = np.where(np.logical_and(np.sum(failed_matrix == 0, axis=0) < 0.5*len(failed_matrix), np.any(failed_matrix == 0, axis=0)))[0] + 1
|
||||
tb_pass = len(wrong_col_index) == 0
|
||||
if np.sum(np.all(failed_matrix == 1, axis=1)) >= 0.25*len(failed_matrix):
|
||||
tb_pass = True
|
||||
logger.match_level(tb_pass, "positive", "negative", f"TB_discriminating finished, TB {"passed" if tb_pass else "failed"}, wrong scenarios: {wrong_col_index}, scenario pass ratio: {len(correct_col_index)}/{len(failed_matrix[0])}")
|
||||
return tb_pass, wrong_col_index, correct_col_index, unsure_col_index
|
||||
case "col_40_wrong":
|
||||
# check which column of the matrix is 40% wrong (0)
|
||||
wrong_col_index = np.where(np.sum(failed_matrix == 0, axis=0) >= 0.4*len(failed_matrix))[0] + 1
|
||||
correct_col_index = np.where(np.all(np.isin(failed_matrix, [1, -1]), axis=0))[0] + 1
|
||||
unsure_col_index = np.where(np.logical_and(np.sum(failed_matrix == 0, axis=0) < 0.4*len(failed_matrix), np.any(failed_matrix == 0, axis=0)))[0] + 1
|
||||
tb_pass = len(wrong_col_index) == 0
|
||||
logger.match_level(tb_pass, "positive", "negative", f"TB_discriminating finished, TB {"passed" if tb_pass else "failed"}, wrong scenarios: {wrong_col_index}, scenario pass ratio: {len(correct_col_index)}/{len(failed_matrix[0])}")
|
||||
return tb_pass, wrong_col_index, correct_col_index, unsure_col_index
|
||||
case _:
|
||||
logger.critical("TB discriminator - mode not found!!!")
|
||||
raise RuntimeError("TB discriminator - mode not found!!!")
|
||||
|
||||
COR_PROMPT_1 = """Your task is to correct the testbench according to the failing scenarios. the information we have is the failed/passed scenarios of the testbench, the problem description and the testbench code.
|
||||
the testbench code is consisted of both verilog and python code. The verilog code aims to generate test stimulus (under test scenarios) and drive the DUT to generate the output signal; the python code aims to check if the output vector from the DUT is correct.
|
||||
ATTENTION: The python code contains error, and your target is to find it and tell me how to correct it (you don't need to give me the code in this stage).
|
||||
"""
|
||||
|
||||
HINT_SEQ = """
|
||||
Hints - explaination of the given python code:
|
||||
the python class "GoldenDUT": This python class can represent the golden DUT (the ideal one). In "GoldenDUT", following methods are defined:
|
||||
- 1. a method "def __init__(self)": Set the inner states/values of the golden DUT. These values have suffix "_reg". The initial value of these inner values is "x", but later will be digits. The "__init__" method has no input parameters except "self".
|
||||
- 2. a method "def load(self, signal_vector)": This method is to load the important input signals and the inner values of "GoldenDUT" shall change according to the input signals. There is no clock signal in the input signal vector, every time the "load" method is called, it means a new clock cycle. The initial values "x" should be changed according to the input signals. This method has no return value.
|
||||
- 3. a method "def check(self, signal_vector)": This method is to determine the expected output values and compare them with output signals from DUT. It should return True or False only.
|
||||
"""
|
||||
|
||||
HINT_CMB = """
|
||||
Hints - explaination of the given python code:
|
||||
The given python code contains one class "GoldenDUT". this python class can represent the golden DUT (the ideal one). By calling the inner method "check", the signal vector from DUT will be checked. The details of the golden DUT are as follows:
|
||||
|
||||
- a. a method "def __init__(self)". Set the inner states/values of the golden DUT. The "__init__" method has no input parameters except "self".
|
||||
- b. a method "def load(self, signal_vector)". This method is to load the important input signals and get the expected output signals. it should return the expected output values. It can call other methods to help computing the expected output. It will be called by other inner methods later.
|
||||
- c. a method "def check(self, signal_vector)". This method is to call "load" to get the expected output values, and compare them with output signals from DUT. It should return True or False only. It can call other methods to help checking.
|
||||
- d. other methods, they can be called by "__init__", "load" or "check".
|
||||
- e. the input of "load" and "check" is the signal vector. The signal vector is a dictionary, the key is the signal name, the value is the signal value.
|
||||
"""
|
||||
|
||||
COR_PROMPT_2_PART1 = """
|
||||
please correct the python code according to the following rules:
|
||||
|
||||
PYTHON code rule: please do not change the original high level structure of the python code. i.e., if python code only contains one class and several functions such as init, load, check and more, only modify the implementation of the function, but do not change the name or delete the functions/class methods. you can add new class methods or functions if needed. you can use python libraries such as numpy or math.
|
||||
|
||||
"""
|
||||
COR_PROMPT_2_PART2 = """
|
||||
i.e., your python code format in response should still be like:
|
||||
|
||||
class <class_name>:
|
||||
def __init__(self):
|
||||
...(omitted)
|
||||
|
||||
def load(self, ...):
|
||||
...
|
||||
|
||||
def check(self, ...):
|
||||
...
|
||||
|
||||
def <other_functions>(self, ...):
|
||||
...
|
||||
|
||||
ATTENTION: please give me the corrected python code according to our previous conversation and the hints above. please give me the corrected full python code (not the part but the whole python code like I give you in our previous conversation).
|
||||
"""
|
||||
|
||||
class TB_corrector():
|
||||
def __init__(self, mode:str, pychecker_en:bool, circuit_type:str="") -> None:
|
||||
self.mode = mode
|
||||
self.pychecker_en = pychecker_en
|
||||
circuit_type_dict = {"CMB": "combinational", "SEQ": "sequential"}
|
||||
self.circuit_type = circuit_type_dict.get(circuit_type, "unknown")
|
||||
# logger.debug(f"TB_corrector class - mode: {self.mode}, pychecker_en: {self.pychecker_en}, circuit_type: {self.circuit_type}; The input circuit type is {circuit_type}")
|
||||
match self.mode:
|
||||
case "naive":
|
||||
# the most naive mode;
|
||||
pass
|
||||
case _:
|
||||
logger.critical("TB_corrector class - mode not found!!!")
|
||||
|
||||
def correct(self, description, failed_scenarios, TB_code_v:str, llm_model:str, TB_code_py:str|None=None, working_dir:str=None) -> tuple[str, str]:
|
||||
match self.mode:
|
||||
case "naive":
|
||||
if self.pychecker_en:
|
||||
TB_code_py = self._py_focus(TB_code_py, before=True)
|
||||
py_code_hint = HINT_CMB if self.circuit_type == "combinational" else HINT_SEQ
|
||||
prompt = COR_PROMPT_1
|
||||
prompt += "Here is the problem description:\n"
|
||||
prompt += description
|
||||
prompt += "\nHere is the testbench code:\n"
|
||||
prompt += "ATTENTION: the following scenarios are wrong: " + str(failed_scenarios) + "\n"
|
||||
# circuit type
|
||||
prompt += f"the circuit type of this task is {self.circuit_type}\n"
|
||||
prompt += "Here is the verilog code. it contains the meaning of each scenario. you can combine the wong scenario info above and the following code to better understand the reason of failing:\n"
|
||||
prompt += TB_code_v
|
||||
prompt += "\nHere is the python code, it contains error, please combine it with the wrong scenario info and the verilog code to understand:\n"
|
||||
prompt += TB_code_py
|
||||
prompt += "\nHere is some hints for your better understanding of the python codes above:"
|
||||
prompt += py_code_hint
|
||||
prompt += "\nplease reply me with the following steps:"
|
||||
prompt += "\n1. please analyze the reason of the failed scenarios. If possible, please find the in common between the failed scenarios."
|
||||
prompt += f"\n2. please analyze which part of the python code is related to the failed test scenarios ({str(failed_scenarios)})."
|
||||
prompt += "\n3. please tell me how to correct the wrong part (in natural language, do not give me the complete code implementation. please explain it in English.)"
|
||||
prompt += "\nhere is an example of the reply:"
|
||||
prompt += "\n1. the failed scenarios are all related to the same signal x\n2. the mid part of the function_X is related to the failed scenarios\n3. the correct logic of signal x should be y."
|
||||
logger.info("naive corrector mode begins")
|
||||
answer = llm.llm_call(prompt, llm_model)[0]
|
||||
prompt_2 = COR_PROMPT_2_PART1 + py_code_hint + COR_PROMPT_2_PART2
|
||||
message = [{"role": "user", "content": prompt}, {"role": "assistant", "content": answer}, {"role": "user", "content": prompt_2}]
|
||||
answer_2, more_info = llm.llm_call(message, llm_model)
|
||||
# if "VERILOG" in answer_2:
|
||||
# TB_code_v = llm.extract_code(answer_2, "verilog")[0]
|
||||
# else:
|
||||
# TB_code_py = llm.extract_code(answer_2, "python")[0]
|
||||
TB_code_py = llm.extract_code(answer_2, "python")[0]
|
||||
TB_code_py = self._py_focus(TB_code_py, before=False)
|
||||
if working_dir is not None:
|
||||
ls.save_messages_to_txt(more_info["messages"], os.path.join(working_dir, "conversation.txt"))
|
||||
ls.save_code(TB_code_v, os.path.join(working_dir, "TB.v"))
|
||||
if TB_code_py is not None:
|
||||
ls.save_code(TB_code_py, os.path.join(working_dir, "TB.py"))
|
||||
logger.info("naive corrector mode ends; conversation and codes saved")
|
||||
else:
|
||||
logger.critical("TB_corrector - pychecker not enabled")
|
||||
raise RuntimeError("TB_corrector - pychecker not enabled")
|
||||
return TB_code_v, TB_code_py
|
||||
case _:
|
||||
logger.critical("TB_corrector - mode not found!!!")
|
||||
raise RuntimeError("TB_corrector - mode not found!!!")
|
||||
|
||||
def _py_focus(self, code:str, before:bool):
|
||||
"""
|
||||
- imigrated from TB2_syncheck.py
|
||||
- code: the code under debug / after debug
|
||||
- before: True, if before debug, will split the code; False, if after debug, will restore the code
|
||||
"""
|
||||
KEY_WORDs_1 = "def check_dut(vectors_in):\n golden_dut = GoldenDUT()\n failed_scenarios = []"
|
||||
KEY_WORDs_2 = "\ndef SignalTxt_to_dictlist"
|
||||
if before:
|
||||
key_words = KEY_WORDs_1 if KEY_WORDs_1 in code else KEY_WORDs_2 # for compatibility with the old version
|
||||
if key_words not in code:
|
||||
py_code_focus = code
|
||||
self._py_code_nofocus = ""
|
||||
else:
|
||||
py_code_focus = code.split(key_words)[0]
|
||||
self._py_code_nofocus = key_words + code.split(key_words)[1]
|
||||
return py_code_focus
|
||||
else:
|
||||
return code + self._py_code_nofocus
|
||||
|
||||
IMPR_PROMPT_1 = """Your task is to improve the quality of an RTL problem description using the following given information. Our final target is using the description to generate a testbench for the RTL design. Currently we already have the testbench but it is not perfect correct. Now in this stage the target is to generate a better description.
|
||||
The information we have the is original RTL description, the testbench code. The testbench code includes the verilog code for test scenario generation, and the python code for checking the output vector.The verilog code aims to generate test stimulus (under test scenarios) and drive the DUT to generate the output signal; the python code aims to check if the output vector from the DUT is correct.
|
||||
Attention: the testbench we provide is not the perfect one, and it contains error. However, we already know the test scenarios where the testbench works correctly and the scenarios where the testbench has problem. We will provide you the scenarios index later.
|
||||
Here, firstly, is the problem description to be imporved:
|
||||
\n"""
|
||||
|
||||
DESC_MARK_BEGIN = "***description begins***"
|
||||
DESC_MARK_END = "***description ends***"
|
||||
|
||||
DESC_STEP_INSTRUCT = f"""
|
||||
please reply me with the following steps:
|
||||
1. please analyze which part of the testbench (especially the python checker code) is correct and can be used to improve the description.
|
||||
2. please analyze how can we improve the descriptin. for example, which part of the technical details can be more detailed, which part can be more clear, which part can be more concise.
|
||||
3. please provide the improved complete description. We will directly use it in the later stages.
|
||||
the format of description should be like:
|
||||
{DESC_MARK_BEGIN}
|
||||
... (the improved description, should be complete)
|
||||
{DESC_MARK_END}
|
||||
"""
|
||||
|
||||
DESC_FINAL_INSTRUCT = f"""
|
||||
ATTENTION: please know that the provided testbench is not perfect and may contains many errors. Thus, your modification on the description should not change the function of the original description. When there are conflicts between the testbench and the description, always believe the description is correct. Do not delete the information in the description, but you can rewrite it in a better way. You can also add more details to it. But NEVER mention any scenario index because the scenarios will not be the same at the next stage.
|
||||
when you answer the last question (provide the improved complete description), the descriptino should start with "{DESC_MARK_BEGIN}" and end with "{DESC_MARK_END}". Only in this way can we recognize the improved description.
|
||||
"""
|
||||
class SPEC_improver():
|
||||
def __init__(self, description, mode:str, pychecker_en:bool, llm_model:str, circuit_type:str="") -> None:
|
||||
self.description = description
|
||||
self.mode = mode
|
||||
self.pychecker_en = pychecker_en
|
||||
circuit_type_dict = {"CMB": "combinational", "SEQ": "sequential"}
|
||||
self.llm_model = llm_model
|
||||
self.circuit_type = circuit_type_dict.get(circuit_type, "unknown")
|
||||
|
||||
def improve(self, wrong_scenarios, correct_scenarios, TB_code_v:str, TB_code_py:str|None=None, working_dir:str|None=None) -> str:
|
||||
# not implemented yet
|
||||
match self.mode:
|
||||
case "naive":
|
||||
logger.info("naive description improver mode begins")
|
||||
prompt = ""
|
||||
prompt += IMPR_PROMPT_1
|
||||
prompt += DESC_MARK_BEGIN + "\n"
|
||||
prompt += self.description + "\n"
|
||||
prompt += DESC_MARK_END + "\n"
|
||||
prompt += "\nHere is the testbench codes:\n"
|
||||
prompt += "ATTENTION: the following scenarios are wrong: " + str(wrong_scenarios) + "\n"
|
||||
prompt += "ATTENTION: the following scenarios are correct, you can rely on these scenarios to improve the description: " + str(correct_scenarios) + "\n"
|
||||
prompt += TB_code_v + "\n"
|
||||
if self.pychecker_en:
|
||||
prompt += f"\nHere is the python code (python checker). please note that the python checker has correct function under the scenarios {str(correct_scenarios)}, but wrong under the scenarios {str(wrong_scenarios)}:\n"
|
||||
prompt += TB_code_py + "\n"
|
||||
prompt += DESC_STEP_INSTRUCT
|
||||
prompt += DESC_FINAL_INSTRUCT
|
||||
message = [{"role": "user", "content": prompt}]
|
||||
answer, more_info = llm.llm_call(message, self.llm_model)
|
||||
try:
|
||||
improved_description = answer.split(DESC_MARK_BEGIN)[1].split(DESC_MARK_END)[0]
|
||||
if improved_description == "":
|
||||
improved_description = self.description
|
||||
except:
|
||||
improved_description = self.description
|
||||
if working_dir is not None:
|
||||
ls.save_messages_to_txt(more_info["messages"], os.path.join(working_dir, "conversation.txt"))
|
||||
# save description
|
||||
with open(os.path.join(working_dir, "description.txt"), "w") as f:
|
||||
f.write(improved_description)
|
||||
logger.info("naive description improver mode ends")
|
||||
return improved_description
|
||||
case "hint":
|
||||
logger.info("description improver 'hint' mode begins")
|
||||
|
||||
return self.description
|
||||
|
||||
def test():
|
||||
from loguru import logger
|
||||
failed_scenarios = [
|
||||
[1,3,5],
|
||||
[-1],
|
||||
[2,3]
|
||||
]
|
||||
max_scenario = 7
|
||||
onehot_array = TaskTBcheck.failed_scenarios_to_onehot_array(failed_scenarios, max_scenario)
|
||||
print(np.array(onehot_array))
|
||||
my_disc = TB_discriminator("col_full_wrong")
|
||||
print(my_disc.discriminate(np.array(onehot_array)))
|
||||
|
||||
245
autoline/TB4_eval.py
Normal file
245
autoline/TB4_eval.py
Normal file
@@ -0,0 +1,245 @@
|
||||
"""
|
||||
Description : This is the testbench eval stage in autoline
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/7/24 11:24:43
|
||||
LastEdited : 2024/8/28 21:08:21
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import iverilog_call as iv
|
||||
import python_call as py
|
||||
from loader_saver import autologger as logger
|
||||
from loader_saver import log_localprefix
|
||||
from utils.utils import Timer, get_time
|
||||
|
||||
TC_PASS_CHECK_LIST_TB_GEN = ["All test cases passed", "all test cases passed", "All Test Cases Passed"]
|
||||
TC_PASS_CHECK_LIST_TB_GOLDEN = ['Mismatches: 0 in ', 'Hint: Total mismatched samples is 0 out of']
|
||||
TC_PASS_CHECK_LIST_PYCHECKER = ["[]"]
|
||||
|
||||
class TaskTBeval():
|
||||
"""
|
||||
### description
|
||||
- this is the evaluation stage of our pipeline; the priority of this stage is that TB is generated and the empty DUT compilation is passed;
|
||||
- please use `try` to catch the exception of this function.
|
||||
- this module is independent from the previous modules.
|
||||
#### input
|
||||
- task_id: the name of the problem
|
||||
- root_dir: the dir of one problem
|
||||
- TB_gen: the testbench under evaluation (str)
|
||||
- TB_golden: the golden testbench (str)
|
||||
- DUT_golden: the golden RTL DUT (str)
|
||||
- DUT_mutant_list: the list of RTL DUT mutants modified from DUT_golden;[str]
|
||||
#### output
|
||||
- dict
|
||||
- "Eval1_pass" : bool (whether the golden RTL checking passed)
|
||||
- "Eval2_pass" : bool (whether the golden TB comparison on RTL mutants passed)
|
||||
- "Eval2_failed_mutant_idxes" : list of int (the index of the failed mutants)
|
||||
"""
|
||||
"""main structure: run(), run_Eval1(), run_Eval2()"""
|
||||
def __init__(self, task_id: str, task_dir: str, TB_gen: str, TB_golden:str=None, DUT_golden:str=None, DUT_mutant_list:list=None, DUT_gptgen_list:list = None, pychecker_en:bool = False, pychecker_code:str = "", runfiles_save:bool = True):
|
||||
self.task_id = task_id
|
||||
self.task_dir = task_dir
|
||||
self.TB_gen = TB_gen
|
||||
self.TB_golden = TB_golden
|
||||
self.DUT_golden = DUT_golden
|
||||
self.DUT_mutant_list = DUT_mutant_list
|
||||
self.DUT_gptgen_list = DUT_gptgen_list
|
||||
self.pychecker_en = pychecker_en
|
||||
self.save_en = runfiles_save
|
||||
self.TB_gen_mode = "TB_gen" if not self.pychecker_en else "Pychecker"
|
||||
self.pychecker_code = pychecker_code
|
||||
self.working_dir = ""
|
||||
# Eval1 related
|
||||
self.Eval1_exist = False
|
||||
# self.Eval1_dir = task_dir + "eval1_GoldenRTL/"
|
||||
self.Eval1_dir = os.path.join(task_dir, "eval1_GoldenRTL")
|
||||
self.Eval1_results = None
|
||||
self.Eval1_pass = None
|
||||
# Eval2 related
|
||||
self.Eval2_exist = False
|
||||
# self.Eval2_dir = task_dir + "eval2_GoldenTB_and_mutants/"
|
||||
self.Eval2_dir = os.path.join(task_dir, "eval2_GoldenTB_and_mutants")
|
||||
self.Eval2_pass = None
|
||||
self.Eval2_failed_mutant_idx = None
|
||||
self.Eval2_passed_mutant_idx = None
|
||||
# Eval2b related
|
||||
self.Eval2b_exist = False
|
||||
# self.Eval2b_dir = task_dir + "eval2b_GPTgenTB/"
|
||||
self.Eval2b_dir = os.path.join(task_dir, "eval2b_GPTgenTB")
|
||||
self.Eval2b_pass = None
|
||||
self.Eval2b_failed_mutant_idx = None
|
||||
self.Eval2b_passed_mutant_idx = None
|
||||
|
||||
@log_localprefix("TBeval")
|
||||
def run(self):
|
||||
# Eval 1
|
||||
if self.DUT_golden is not None:
|
||||
self.run_Eval1()
|
||||
if self.Eval1_pass:
|
||||
# Eval 2
|
||||
if self.TB_golden is not None and self.DUT_mutant_list is not None:
|
||||
self.run_Eval2(mode="mutant")
|
||||
# Eval 2b
|
||||
if self.TB_golden is not None and self.DUT_gptgen_list is not None:
|
||||
self.run_Eval2(mode="gptgen")
|
||||
else:
|
||||
logger.info("[%s] Eval 2/2b is skipped because Eval 1 failed" % (self.task_id))
|
||||
self.clean_wave_vcd() # some golden TBs may generate wave.vcd files
|
||||
|
||||
def run_Eval1(self):
|
||||
silent = True
|
||||
### Eval 1: Golden RTL checking
|
||||
logger.info("Eval 1: Golden RTL checking begins")
|
||||
self.Eval1_pass = self.run_testbench(self.Eval1_dir, self.TB_gen, self.DUT_golden, self.TB_gen_mode, self.pychecker_code, raise_when_fail=True, save_en=self.save_en)
|
||||
logger.match_level(self.Eval1_pass, "positive", "failed", "Eval 1: Golden RTL checking %s!" % ("passed" if self.Eval1_pass else "failed"))
|
||||
# my_log = logger.positive if self.Eval1_pass else logger.failed
|
||||
# my_log("[%s] Eval 1: Golden RTL checking %s!" % (self.task_id, "passed" if self.Eval1_pass else "failed"))
|
||||
self.Eval1_exist = True
|
||||
|
||||
def run_Eval2(self, mode:str="mutant"):
|
||||
""" mode: "mutant" or "gptgen" """
|
||||
silent = True
|
||||
assert mode in ["mutant", "gptgen"], "Invalid mode in run_Eval2: " + mode
|
||||
if mode == "mutant": # Eval2
|
||||
print_str = "Eval 2: Golden TB checking on RTL mutants"
|
||||
mutant_subdir_name = "mutant"
|
||||
DUT_list = self.DUT_mutant_list
|
||||
eval_dir = self.Eval2_dir
|
||||
elif mode == "gptgen": # Eval2b
|
||||
print_str = "Eval 2b: Golden TB checking on GPT generated RTL codes"
|
||||
mutant_subdir_name = "gptgen_DUT"
|
||||
DUT_list = self.DUT_gptgen_list
|
||||
eval_dir = self.Eval2b_dir
|
||||
### Eval 2: Golden TB comparison on RTL mutants
|
||||
logger.info(print_str)
|
||||
mutant_results = []
|
||||
for idx, DUT_mutant in enumerate(DUT_list):
|
||||
# mutant_subdir = eval_dir + "%s_%d/"%(mutant_subdir_name, idx+1)
|
||||
mutant_subdir = os.path.join(eval_dir, "%s_%d"%(mutant_subdir_name, idx+1))
|
||||
# GoldenTB_subsubdir = mutant_subdir + "GoldenTB/"
|
||||
GoldenTB_subsubdir = os.path.join(mutant_subdir, "GoldenTB")
|
||||
# GenedTB_subsubdir = mutant_subdir + "GeneratedTB/"
|
||||
GenedTB_subsubdir = os.path.join(mutant_subdir, "GeneratedTB")
|
||||
try: #in case the mutant has syntax error
|
||||
TBgolden_pass = self.run_testbench(GoldenTB_subsubdir, self.TB_golden, DUT_mutant, "TB_golden", save_en=self.save_en)
|
||||
except:
|
||||
TBgolden_pass = False
|
||||
try:
|
||||
TBgen_pass = self.run_testbench(GenedTB_subsubdir, self.TB_gen, DUT_mutant, self.TB_gen_mode, self.pychecker_code, save_en=self.save_en)
|
||||
except:
|
||||
TBgen_pass = False
|
||||
if not TBgolden_pass and not TBgen_pass:
|
||||
mutant_pass = True
|
||||
elif TBgolden_pass and TBgen_pass:
|
||||
mutant_pass = True
|
||||
else:
|
||||
mutant_pass = False
|
||||
mutant_results.append(mutant_pass)
|
||||
eval_pass = all(mutant_results)
|
||||
failed_mutant_idx = [idx + 1 for idx, result in enumerate(mutant_results) if not result]
|
||||
passed_mutant_idx = [idx + 1 for idx, result in enumerate(mutant_results) if result]
|
||||
if mode == "mutant":
|
||||
self.Eval2_pass, self.Eval2_failed_mutant_idx, self.Eval2_passed_mutant_idx, self.Eval2_exist = eval_pass, failed_mutant_idx, passed_mutant_idx, True
|
||||
elif mode == "gptgen":
|
||||
self.Eval2b_pass, self.Eval2b_failed_mutant_idx, self.Eval2b_passed_mutant_idx, self.Eval2b_exist = eval_pass, failed_mutant_idx, passed_mutant_idx, True
|
||||
result = "perfectly passed" if eval_pass else ("finished (%d/%d)" % (len(passed_mutant_idx), len(mutant_results)))
|
||||
my_log = logger.success if (eval_pass or (len(passed_mutant_idx)/len(mutant_results)>=0.8)) else logger.failed
|
||||
my_log("%s %s!" % (print_str, result))
|
||||
|
||||
def run_testbench(self, dir, TB_code, DUT_code, TB_type, pychecker_code = "", raise_when_fail = False, save_en = True):
|
||||
"""
|
||||
it has two mode: pychecker mode or verilog testbench mode
|
||||
-input:
|
||||
- dir: the dir to save the TB, DUT and pychecker code
|
||||
- TB_code: str; the testbench code
|
||||
- DUT_code: str; the DUT code
|
||||
- TB_type: str: TB_gen, TB_golden, Pychecker
|
||||
- pychecker_code: str; the pychecker code
|
||||
- output:
|
||||
- pass: bool; if the DUT passed the testbench
|
||||
"""
|
||||
# iverilog part
|
||||
# save the TB and DUT
|
||||
assert TB_type in ["TB_gen", "TB_golden", "Pychecker"], "Invalid TB_type in run_testbench: " + TB_type
|
||||
os.makedirs(dir, exist_ok=True)
|
||||
self.working_dir = dir
|
||||
with open(self.TB_path, "w") as f:
|
||||
f.write(TB_code)
|
||||
with open(self.DUT_path, "w") as f:
|
||||
f.write(DUT_code)
|
||||
iv_run_info = iv.iverilog_call_and_save(dir, silent=True)
|
||||
if raise_when_fail:
|
||||
assert iv_run_info[0], "%s Iverilog Compilation Failed: the PREREQUISITE of 'Evaluation' is no syntactic error from Testbench!!!"%(TB_type)
|
||||
# pychecker part (if enabled)
|
||||
if TB_type == "Pychecker":
|
||||
with open(self.PY_path, "w") as f:
|
||||
f.write(pychecker_code)
|
||||
py_run_info = py.python_call_and_save(pypath=self.PY_path, silent=True)
|
||||
if raise_when_fail:
|
||||
assert py_run_info[0], "%s Python Compilation Failed: the PREREQUISITE of 'Evaluation' is no syntactic error from Python code!!!"%(TB_type)
|
||||
# check if the DUT passed the testbench
|
||||
TC_pass = self.TC_pass_from_TC_out(sim_pass=True, sim_out=py_run_info[1]["out"], TB_type="Pychecker") & iv_run_info[0] & py_run_info[0]
|
||||
else:
|
||||
TC_pass = self.TC_pass_from_TC_out(sim_pass=True, sim_out=iv_run_info[4]["out"], TB_type=TB_type) & iv_run_info[0]
|
||||
if not save_en:
|
||||
# os.system(f"rm -rf {dir}")
|
||||
cmd = f"find {dir} -type f ! -name 'run_info*'" + r" -exec rm -f {} +"
|
||||
os.system(cmd)
|
||||
return TC_pass
|
||||
|
||||
def clean_wave_vcd(self):
|
||||
"""clean the .vcd files in the task_dir"""
|
||||
# clean_dir = self.task_dir[:-1] if self.task_dir.endswith("/") else self.task_dir
|
||||
clean_dir = self.task_dir
|
||||
for root, dirs, files in os.walk(clean_dir):
|
||||
for file in files:
|
||||
# clean wave.vcd
|
||||
if file.endswith(".vcd"):
|
||||
os.remove(os.path.join(root, file))
|
||||
|
||||
@property
|
||||
def TB_path(self):
|
||||
# return self.working_dir + self.task_id + "_tb.v"
|
||||
return os.path.join(self.working_dir, self.task_id + "_tb.v")
|
||||
|
||||
@property
|
||||
def DUT_path(self):
|
||||
# return self.working_dir + self.task_id + ".v"
|
||||
return os.path.join(self.working_dir, self.task_id + ".v")
|
||||
|
||||
@property
|
||||
def PY_path(self):
|
||||
# return self.working_dir + self.task_id + "_tb.py"
|
||||
return os.path.join(self.working_dir, self.task_id + "_tb.py")
|
||||
|
||||
@staticmethod
|
||||
def TC_pass_from_TC_out(sim_pass: bool, sim_out: str, TB_type="TB_gen"):
|
||||
"""
|
||||
get the information if DUT passed all the test cases from the testbench
|
||||
#### input
|
||||
- sim_pass: bool; if TB passed the compilation. if not, will return False without check
|
||||
- sim_out: the simulation output message;
|
||||
- TB_ty: "TB_gen" or "TB_golden" or "Pychecker"; the type of the testbench
|
||||
"""
|
||||
if not sim_pass:
|
||||
return False
|
||||
assert TB_type in ["TB_gen", "TB_golden", "Pychecker"], "Invalid TB_type during 'TC_pass_from_TC_out': " + TB_type
|
||||
tc_pass_check_list_dict = {"TB_gen": TC_PASS_CHECK_LIST_TB_GEN, "TB_golden": TC_PASS_CHECK_LIST_TB_GOLDEN, "Pychecker": TC_PASS_CHECK_LIST_PYCHECKER}
|
||||
tc_pass_check_list = tc_pass_check_list_dict[TB_type]
|
||||
if TB_type in ["TB_gen", "TB_golden"]:
|
||||
for check_str in tc_pass_check_list:
|
||||
if check_str in sim_out:
|
||||
return True
|
||||
return False
|
||||
elif TB_type in ['Pychecker']:
|
||||
# check if the last [] contains any element
|
||||
# find the last ] in the out
|
||||
last_bracket_end = sim_out.rfind("]")
|
||||
# find the last [ in the out
|
||||
last_bracket_start = sim_out.rfind("[")
|
||||
# check if the last bracket pair is "[]", containing no element
|
||||
if (last_bracket_end - last_bracket_start) == 1:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
537
autoline/TB_autoline.py
Normal file
537
autoline/TB_autoline.py
Normal file
@@ -0,0 +1,537 @@
|
||||
"""
|
||||
Description : The main function of autoline, originally the first part of autoline.py in AutoBench 1.0
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/7/24 11:44:15
|
||||
LastEdited : 2024/9/1 10:32:18
|
||||
"""
|
||||
import os
|
||||
import analyze as al
|
||||
import loader_saver as ls
|
||||
|
||||
import time
|
||||
|
||||
from config import Config
|
||||
from loader_saver import save_dict_json_form, log_localprefix
|
||||
from data.probset import HDLBitsProbset
|
||||
from loader_saver import autologger as logger
|
||||
from utils.utils import Timer
|
||||
from autoline.TB1_gen import TaskTBgen
|
||||
from autoline.TB2_syncheck import TaskTBsim
|
||||
from autoline.TB3_funccheck import TaskTBcheck
|
||||
from autoline.TB4_eval import TaskTBeval
|
||||
from prompt_scripts import BaseScript
|
||||
from LLM_call import llm_manager
|
||||
|
||||
# [新增] 引入我们刚写的模块
|
||||
from autoline.TB_cga import TaskTBCGA
|
||||
|
||||
|
||||
def run_autoline():
|
||||
# load config
|
||||
config = Config()
|
||||
autoline = AutoLine(config)
|
||||
autoline()
|
||||
|
||||
class AutoLine():
|
||||
"""the class of the autoline"""
|
||||
def __init__(self, config: Config):
|
||||
self.config = config
|
||||
self.logger = logger
|
||||
self.logger.assert_(config.get_item("autoline", "promptscript") is not None, "config.autoline.promptscript is None, please check the config file.")
|
||||
self.load_data()
|
||||
# set run info
|
||||
# self.run_info_path = config.save.root + "Chatbench_RunInfo.json"
|
||||
self.run_info_path = os.path.join(config.save.root, "Chatbench_RunInfo.json")
|
||||
self.run_info = []
|
||||
self.analyzer_en = (config.autoline.onlyrun is None) or (config.autoline.onlyrun == "TBgensimeval") # only run the analyzer when not in the onlyrun mode (partial run)
|
||||
|
||||
def run(self):
|
||||
for idx, probdata_single in enumerate(self.probset.data):
|
||||
task_id = probdata_single["task_id"]
|
||||
self.logger.info("")
|
||||
self.logger.info("######################### task %d/%d [%s] #########################" % (idx+1, self.probset.num, task_id))
|
||||
# run_info_single = pipeline_one_prob(probdata_single, self.config)
|
||||
one_task = AutoLine_Task(probdata_single, self.config)
|
||||
run_info_single = one_task.run()
|
||||
self.run_info.append(run_info_single)
|
||||
# save run info: (write to file every iteration and will overwrite the previous one)
|
||||
save_dict_json_form(self.run_info, self.run_info_path)
|
||||
if self.analyzer_en:
|
||||
self.run_analyzer()
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return self.run(*args, **kwargs)
|
||||
|
||||
def load_data(self):
|
||||
cfg_probset = self.config.autoline.probset
|
||||
self.probset = HDLBitsProbset()
|
||||
self.probset.load_by_config(cfg_probset)
|
||||
|
||||
def run_analyzer(self):
|
||||
analyzer = al.Analyzer(self.run_info, self.config.gpt.model)
|
||||
analyzer.run()
|
||||
logger.info(analyzer.messages)
|
||||
|
||||
|
||||
|
||||
class AutoLine_Task():
|
||||
def __init__(self, prob_data:dict, config:Config):
|
||||
# config:
|
||||
self.config = config
|
||||
# probdata:
|
||||
self.prob_data = prob_data
|
||||
self.main_model = self.config.gpt.model # The main llm model used in the autoline (generation, correction...)
|
||||
self.task_id = prob_data["task_id"]
|
||||
self.task_NO = prob_data["task_number"]
|
||||
self.prob_description = prob_data["description"]
|
||||
self.header = prob_data["header"]
|
||||
self.DUT_golden = prob_data['module_code']
|
||||
self.TB_golden = prob_data.get("testbench", None)
|
||||
self.mutant_list = prob_data.get("mutants", None)
|
||||
self.rtlgen_list = prob_data.get('llmgen_RTL', None)
|
||||
self.rtlgen_model = self.config.gpt.rtlgen_model # if llmgen_list is none, this will be used
|
||||
self.rtl_num = self.config.autoline.TBcheck.rtl_num # will be covered if llmgen_list is not None
|
||||
# system config:
|
||||
# self.task_dir = self.config.save.root + self.task_id + "/"
|
||||
self.task_dir = os.path.join(self.config.save.root, self.task_id)
|
||||
self.working_dir = self.task_dir
|
||||
os.makedirs(self.task_dir, exist_ok=True)
|
||||
# === [CGA Mod] Save DUT immediately to task dir for CGA access ===
|
||||
self.dut_path = os.path.join(self.task_dir, "DUT.v")
|
||||
ls.save_code(self.DUT_golden, self.dut_path)
|
||||
# ==============================================================
|
||||
self.update_desc = config.autoline.update_desc
|
||||
self.error_interuption = config.autoline.error_interruption # for debug'
|
||||
self.save_codes = config.autoline.save_finalcodes
|
||||
self.save_compile = self.config.autoline.save_compile # save the compiling codes in TBcheck and TBeval or not.
|
||||
# TBgen paras:
|
||||
self.TBgen_prompt_script = config.autoline.promptscript
|
||||
self.circuit_type = None
|
||||
self.scenario_dict = None
|
||||
self.scenario_num = None
|
||||
self.checklist_worked = None
|
||||
# TBcheck paras:
|
||||
self.TBcheck_correct_max = self.config.autoline.TBcheck.correct_max
|
||||
self.iter_max = config.autoline.itermax
|
||||
self.discrim_mode = config.autoline.TBcheck.discrim_mode
|
||||
self.correct_mode = config.autoline.TBcheck.correct_mode
|
||||
self.rtl_compens_en = config.autoline.TBcheck.rtl_compens_en
|
||||
self.rtl_compens_max_iter = config.autoline.TBcheck.rtl_compens_max_iter
|
||||
self.cga_enabled = config.autoline.cga.enabled
|
||||
# stages:
|
||||
self.TBgen_manager:TaskTBgen = None
|
||||
self.TBgen:BaseScript = None
|
||||
self.TBsim:TaskTBsim = None
|
||||
self.TBcheck:TaskTBcheck = None
|
||||
self.TBeval:TaskTBeval = None
|
||||
self.stage_now = "initialization"
|
||||
# changing paras:
|
||||
self.autoline_iter_now = 0
|
||||
self.TB_code_v = None
|
||||
self.TB_code_py = None
|
||||
self.next_action = None
|
||||
# results:
|
||||
self.incomplete_running = True
|
||||
self.full_pass = False
|
||||
self.TB_corrected = False
|
||||
self.run_info = {}
|
||||
self.run_info_short = {}
|
||||
self.TBcheck_rtl_newly_gen_num = 0 # in autoline, "funccheck" = "TBcheck"
|
||||
self.op_record = [] # will record the order of each stage, for example: ["gen", "syncheck", "funccheck", "gen", "syncheck", "funccheck", "eval"]
|
||||
self.funccheck_op_record = []
|
||||
self.funccheck_iters = []
|
||||
#初始化
|
||||
self.cga_coverage = 0.0
|
||||
# === [CGA Mod] Initialize result dictionary for final reporting ===
|
||||
self.result_dict = {
|
||||
"task_id": self.task_id,
|
||||
"stage": "Init",
|
||||
"pass": False,
|
||||
"coverage": 0.0,
|
||||
"cga_enabled": self.cga_enabled
|
||||
}
|
||||
# =================================================================
|
||||
# renew current section of llm_manager and logger
|
||||
llm_manager.new_section()
|
||||
logger.set_temp_log()
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
The main function of running the autoline for one problem
|
||||
"""
|
||||
with log_localprefix(self.task_id):
|
||||
self.run_stages()
|
||||
self.runinfo_update()
|
||||
if self.save_codes:
|
||||
self.save_TB_codes()
|
||||
# === [CGA Mod] Save Result JSON for Analyzer ===
|
||||
self.result_dict['stage'] = self.stage_now
|
||||
|
||||
try:
|
||||
result_save_path = self.config.autoline.result_path
|
||||
except AttributeError:
|
||||
# 如果 config 对象没这个属性,或者它是字典且没这个key
|
||||
result_save_path = "results"
|
||||
|
||||
# 确保是绝对路径或相对于项目根目录
|
||||
if not os.path.exists(result_save_path):
|
||||
os.makedirs(result_save_path, exist_ok=True)
|
||||
ls.save_dict_json_form(self.result_dict, os.path.join(result_save_path, f"{self.task_id}.json"))
|
||||
# ===============================================
|
||||
return self.run_info
|
||||
|
||||
def run_TBgen(self, subdir:str=None):
|
||||
# TODO: export the circuit type and scenario number
|
||||
self.op_record.append("gen")
|
||||
working_dir = os.path.join(self.task_dir, subdir) if subdir is not None else self.task_dir
|
||||
self.stage_now = "TBgen"
|
||||
self.TBgen_manager = TaskTBgen(self.prob_data, self.TBgen_prompt_script, working_dir, self.config)
|
||||
self.TBgen = self.TBgen_manager.workflow
|
||||
with log_localprefix("TBgen"):
|
||||
self.TBgen()
|
||||
self.TB_code_v = self.TBgen.get_attr("TB_code_v")
|
||||
self.TB_code_py = self.TBgen.get_attr("TB_code_py")
|
||||
self.scenario_dict = self.TBgen.get_attr("scenario_dict")
|
||||
self.scenario_num = self.TBgen.get_attr("scenario_num")
|
||||
self.circuit_type = self.TBgen.get_attr("circuit_type")
|
||||
self.checklist_worked = self.TBgen.get_attr("checklist_worked")
|
||||
self.incomplete_running = True
|
||||
self._blank_log()
|
||||
|
||||
def run_TBsim(self, subdir:str=None):
|
||||
self.op_record.append("syncheck")
|
||||
working_dir = os.path.join(self.task_dir, subdir) if subdir is not None else self.task_dir
|
||||
self.stage_now = "TBsim"
|
||||
self.TBsim = TaskTBsim(
|
||||
self.TBgen,
|
||||
self.TBgen.TB_code,
|
||||
self.header,
|
||||
working_dir,
|
||||
self.task_id,
|
||||
self.config
|
||||
)
|
||||
self.TBsim.run()
|
||||
self.TB_code_v = self.TBsim.TB_code_now
|
||||
self.TB_code_py = self.TBsim.PY_code_now
|
||||
self._blank_log()
|
||||
|
||||
def run_TBcheck(self, subdir:str=None):
|
||||
self.op_record.append("funccheck")
|
||||
working_dir = os.path.join(self.task_dir, subdir) if subdir is not None else self.task_dir
|
||||
self.stage_now = "TBcheck"
|
||||
self.TBcheck = TaskTBcheck(
|
||||
task_dir = working_dir,
|
||||
task_id = self.task_id,
|
||||
description = self.prob_description,
|
||||
module_header = self.header,
|
||||
TB_code_v = self.TB_code_v,
|
||||
TB_code_py = self.TB_code_py,
|
||||
rtl_list = self.rtlgen_list,
|
||||
rtl_num = self.rtl_num,
|
||||
scenario_num = self.scenario_num,
|
||||
correct_max = self.TBcheck_correct_max,
|
||||
runfiles_save=self.save_compile,
|
||||
discriminator_mode=self.discrim_mode,
|
||||
corrector_mode=self.correct_mode,
|
||||
circuit_type=self.circuit_type,
|
||||
rtl_compens_en=self.rtl_compens_en,
|
||||
rtl_compens_max_iter=self.rtl_compens_max_iter,
|
||||
main_model = self.main_model,
|
||||
rtlgen_model = self.rtlgen_model,
|
||||
desc_improve=self.update_desc
|
||||
)
|
||||
self.rtlgen_list = self.TBcheck.rtl_list
|
||||
self.TBcheck.run()
|
||||
self.TB_code_v = self.TBcheck.TB_code_v
|
||||
self.TB_code_py = self.TBcheck.TB_code_py
|
||||
self.TB_corrected = self.TBcheck.corrected
|
||||
self.funccheck_op_record.append(self.TBcheck.op_record)
|
||||
self.funccheck_iters.append(self.TBcheck.iter_now)
|
||||
self.TBcheck_rtl_newly_gen_num += self.TBcheck.rtl_newly_gen_num
|
||||
self.next_action = self.TBcheck.next_action
|
||||
if self.update_desc:
|
||||
self.prob_data['description'] = self.TBcheck.update_description()
|
||||
self.prob_description = self.prob_data['description']
|
||||
self._blank_log()
|
||||
|
||||
def run_TBeval(self, subdir:str=None):
|
||||
self.op_record.append("eval")
|
||||
working_dir = os.path.join(self.task_dir, subdir) if subdir is not None else self.task_dir
|
||||
self.stage_now = "TBeval"
|
||||
self.TBeval = TaskTBeval(
|
||||
self.task_id,
|
||||
working_dir,
|
||||
TB_gen=self.TB_code_v,
|
||||
TB_golden=self.TB_golden,
|
||||
DUT_golden=self.DUT_golden,
|
||||
DUT_mutant_list=self.mutant_list,
|
||||
DUT_gptgen_list=None,
|
||||
pychecker_en=self.TBsim.pychecker_en,
|
||||
pychecker_code=self.TB_code_py,
|
||||
runfiles_save=self.save_compile
|
||||
)
|
||||
# attention: the rtls in DUT_gptgen_list are not the same as the rtls used in TBcheck, so currently we just block this feature
|
||||
try:
|
||||
self.TBeval.run()
|
||||
except:
|
||||
logger.failed("error when running TBeval, the autoline for this task stopped.")
|
||||
self.incomplete_running = True
|
||||
self._blank_log()
|
||||
# 在 run_TB4_eval 或其他方法旁边添加这个新方法
|
||||
def run_TBCGA(self, work_subdir="CGA", optimize=True, op_name="cga"):
|
||||
"""
|
||||
Coverage-Guided Agent 阶段
|
||||
"""
|
||||
self.stage_now = "TBCGA"
|
||||
self.op_record.append(op_name)
|
||||
|
||||
cga = TaskTBCGA(
|
||||
task_dir=self.task_dir,
|
||||
task_id=self.task_id,
|
||||
header=self.header,
|
||||
DUT_code=self.DUT_golden,
|
||||
TB_code=self.TB_code_v,
|
||||
config=self.config,
|
||||
work_subdir=work_subdir,
|
||||
max_iter=(self.config.autoline.cga.max_iter if optimize else 0)
|
||||
)
|
||||
|
||||
# [修改] 接收分数
|
||||
final_tb, final_score = cga.run()
|
||||
|
||||
|
||||
self.cga_coverage = final_score
|
||||
# 更新状态
|
||||
self.TB_code_v = final_tb
|
||||
self.result_dict['coverage'] = final_score
|
||||
|
||||
# [新增] 强制归档 final_TB.v 到工作目录
|
||||
final_tb_path = os.path.join(self.task_dir, "final_TB.v")
|
||||
ls.save_code(final_tb, final_tb_path)
|
||||
logger.info(f"Saved optimized TB to: {final_tb_path}")
|
||||
|
||||
def run_stages(self):
|
||||
with Timer(print_en=False) as self.running_time:
|
||||
if not self.error_interuption:
|
||||
self.run_stages_core()
|
||||
else:
|
||||
try:
|
||||
self.run_stages_core()
|
||||
except Exception as e:
|
||||
self.incomplete_running = True
|
||||
logger.error("error when running %s, the autoline for this task stopped. error message: %s"%(self.stage_now, str(e)))
|
||||
if self.error_interuption:
|
||||
# if True, stop the pipeline
|
||||
raise e
|
||||
self.incomplete_running = False
|
||||
|
||||
def run_stages_core(self):
|
||||
match self.config.autoline.onlyrun:
|
||||
case "TBgen":
|
||||
self.run_TBgen()
|
||||
case "TBgensim":
|
||||
self.run_TBgen()
|
||||
self.run_TBsim()
|
||||
# case _: # default, run all
|
||||
case "TBgensimeval":
|
||||
try:
|
||||
self.run_TBgen("1_TBgen")
|
||||
self.run_TBsim("2_TBsim")
|
||||
self.run_TBeval("3_TBeval")
|
||||
except Exception as e:
|
||||
self.incomplete_running = True
|
||||
logger.error("error when running %s, the autoline for this task stopped. error message: %s"%(self.stage_now, str(e)))
|
||||
else:
|
||||
self.incomplete_running = False
|
||||
case _: # default, run all
|
||||
for i in range(self.iter_max):
|
||||
self.autoline_iter_now = i
|
||||
try:
|
||||
self.run_TBgen(f"{i+1}_1_TBgen")
|
||||
self.run_TBsim(f"{i+1}_2_TBsim")
|
||||
self.run_TBcheck(f"{i+1}_3_TBcheck")
|
||||
except Exception as e:
|
||||
|
||||
|
||||
# logger.error(f"error when running {self.stage_now}, current pipeline iter: {i+1}, will {"REBOOT" if i<self.iter_max-1 else "go to NEXT STAGE"}. error message: {str(e)}")
|
||||
# self.next_action = "reboot"
|
||||
# continue
|
||||
err_msg = str(e)
|
||||
logger.error(f"Error when running {self.stage_now}, iter: {i+1}. Message: {err_msg}")
|
||||
|
||||
# === [关键修改:API 降温冷静期] ===
|
||||
# 如果是 iverilog 失败或 API 超时,强制休息 15 秒
|
||||
# 这能有效防止阿里云 API 报 429 错误或连接被重置
|
||||
logger.warning("⚠️ Pipeline interrupted. Cooling down for 15s to avoid API Rate Limit...")
|
||||
time.sleep(15)
|
||||
# ================================
|
||||
|
||||
# 如果配置里要求一报错就退出,则抛出异常
|
||||
if getattr(self.config.autoline, 'error_interruption', False):
|
||||
raise e
|
||||
|
||||
# 否则,标记为重启,准备进入下一次循环
|
||||
self.next_action = "reboot"
|
||||
self.incomplete_running = True # 标记当前运行不完整
|
||||
continue
|
||||
|
||||
|
||||
|
||||
match self.next_action:
|
||||
case "pass":
|
||||
break
|
||||
case "reboot":
|
||||
continue
|
||||
# === [CGA 插入点 START] ===
|
||||
# 只有当任务状态正常,且没有要求重启时
|
||||
if self.next_action == "pass":
|
||||
# 在进入 CGA 前,手动标记当前状态为完成,防止内部逻辑误判
|
||||
self.incomplete_running = False
|
||||
try:
|
||||
if self.cga_enabled:
|
||||
self.run_TBCGA(work_subdir="CGA", optimize=True, op_name="cga")
|
||||
else:
|
||||
self.run_TBCGA(work_subdir="CGA_baseline", optimize=False, op_name="coverage_eval")
|
||||
except Exception as e:
|
||||
logger.error(f"CGA Stage Failed: {e}. Fallback to original TB.")
|
||||
self.result_dict['error'] = str(e)
|
||||
# === [CGA 插入点 END] ===
|
||||
|
||||
try:
|
||||
self.run_TBeval(f"{self.autoline_iter_now+1}_4_TBeval")
|
||||
except Exception as e:
|
||||
self.incomplete_running = True
|
||||
logger.error("error when running %s, the autoline for this task stopped. error message: %s"%(self.stage_now, str(e)))
|
||||
|
||||
def runinfo_update(self):
|
||||
# general
|
||||
self.run_info = {
|
||||
"task_id": self.task_id,
|
||||
"task_number": self.task_NO,
|
||||
"time": round(self.running_time.interval, 2),
|
||||
"prompt_tokens": llm_manager.tokens_in_section,
|
||||
"completion_tokens": llm_manager.tokens_out_section,
|
||||
"token_cost": llm_manager.cost_section,
|
||||
"ERROR(incomplete)": self.incomplete_running,
|
||||
"op_record": self.op_record,
|
||||
"reboot_times": self.autoline_iter_now,
|
||||
"max_iter": self.iter_max,
|
||||
|
||||
# === [新增] 将覆盖率写入最终报告 ===
|
||||
"coverage": self.cga_coverage
|
||||
}
|
||||
# token and cost from llm_manager
|
||||
|
||||
# TBgen
|
||||
if self.TBgen is not None:
|
||||
# self.run_info["prompt_tokens"] += self.TBgen.tokens["prompt"]
|
||||
# self.run_info["completion_tokens"] += self.TBgen.tokens["completion"]
|
||||
self.run_info["circuit_type"] = self.circuit_type
|
||||
self.run_info["checklist_worked"] = self.checklist_worked
|
||||
self.run_info["scenario_num"] = self.scenario_num
|
||||
# TBsim
|
||||
if self.TBsim is not None:
|
||||
# self.run_info["prompt_tokens"] += self.TBsim.tokens["prompt"]
|
||||
# self.run_info["completion_tokens"] += self.TBsim.tokens["completion"]
|
||||
self.run_info.update({
|
||||
"Eval0_pass": self.TBsim.Eval0_pass,
|
||||
"Eval0_iv_pass": self.TBsim.sim_pass,
|
||||
"debug_iter_iv": self.TBsim.debug_iter_iv_now,
|
||||
"iv_runing_time": self.TBsim.iv_runing_time
|
||||
})
|
||||
if self.TBsim.pychecker_en:
|
||||
self.run_info.update({
|
||||
"Eval0_py_pass": self.TBsim.py_pass,
|
||||
"debug_iter_py": self.TBsim.debug_iter_py_now,
|
||||
"py_runing_time": self.TBsim.py_runing_time
|
||||
})
|
||||
# TODO: TBcheck runinfo update
|
||||
if self.TBcheck is not None:
|
||||
self.run_info.update({
|
||||
"TB_corrected": self.TB_corrected,
|
||||
"TBcheck_oprecord": self.funccheck_op_record,
|
||||
"rtl_num_newly_gen": self.TBcheck_rtl_newly_gen_num
|
||||
})
|
||||
# TBeval
|
||||
if self.TBeval is not None:
|
||||
if self.TBeval.Eval1_exist:
|
||||
self.run_info.update({"Eval1_pass": self.TBeval.Eval1_pass})
|
||||
self.result_dict["Eval1_pass"] = self.TBeval.Eval1_pass
|
||||
if self.TBeval.Eval2_exist:
|
||||
self.run_info.update({
|
||||
"Eval2_pass": self.TBeval.Eval2_pass,
|
||||
"Eval2_ratio": "%d/%d"%(len(self.TBeval.Eval2_passed_mutant_idx), len(self.prob_data['mutants'])),
|
||||
"Eval2_failed_mutant_idxes": self.TBeval.Eval2_failed_mutant_idx
|
||||
})
|
||||
self.result_dict.update({
|
||||
"Eval2_pass": self.TBeval.Eval2_pass,
|
||||
"Eval2_ratio": "%d/%d"%(len(self.TBeval.Eval2_passed_mutant_idx), len(self.prob_data['mutants'])),
|
||||
"Eval2_failed_mutant_idxes": self.TBeval.Eval2_failed_mutant_idx
|
||||
})
|
||||
if self.TBeval.Eval2b_exist:
|
||||
self.run_info.update({
|
||||
"Eval2b_pass": self.TBeval.Eval2b_pass,
|
||||
"Eval2b_ratio": "%d/%d"%(len(self.TBeval.Eval2b_passed_mutant_idx), len(self.prob_data['gptgen_RTL'])),
|
||||
"Eval2b_failed_mutant_idxes": self.TBeval.Eval2b_failed_mutant_idx
|
||||
})
|
||||
self.result_dict.update({
|
||||
"Eval2b_pass": self.TBeval.Eval2b_pass,
|
||||
"Eval2b_ratio": "%d/%d"%(len(self.TBeval.Eval2b_passed_mutant_idx), len(self.prob_data['gptgen_RTL'])),
|
||||
"Eval2b_failed_mutant_idxes": self.TBeval.Eval2b_failed_mutant_idx
|
||||
})
|
||||
# full pass
|
||||
if not self.incomplete_running:
|
||||
self.full_pass = self.TBsim.sim_pass and self.TBeval.Eval1_pass and self.TBeval.Eval2_pass
|
||||
self.run_info.update({
|
||||
"full_pass": self.full_pass
|
||||
})
|
||||
self.result_dict["full_pass"] = self.full_pass
|
||||
self.result_dict["pass"] = self.full_pass
|
||||
else:
|
||||
self.result_dict["full_pass"] = False
|
||||
self.result_dict["pass"] = False
|
||||
self.result_dict["stage"] = self.stage_now
|
||||
self.result_dict["coverage"] = self.cga_coverage
|
||||
save_dict_json_form(self.run_info, os.path.join(self.task_dir, "run_info.json"))
|
||||
|
||||
# short run info
|
||||
if "Eval2_ratio" in self.run_info.keys():
|
||||
eval_progress = "Eval2 - " + self.run_info["Eval2_ratio"]
|
||||
elif "Eval1_pass" in self.run_info.keys() and self.run_info["Eval1_pass"]:
|
||||
eval_progress = "Eval1 - passed"
|
||||
elif "Eval0_pass" in self.run_info.keys() and self.run_info["Eval0_pass"]:
|
||||
eval_progress = "Eval1 - failed"
|
||||
elif "Eval0_pass" in self.run_info.keys() and not self.run_info["Eval0_pass"]:
|
||||
eval_progress = "Eval0 - failed"
|
||||
else:
|
||||
eval_progress = "Eval0 - not found"
|
||||
self.run_info_short = {
|
||||
"task_id": self.run_info.get("task_id", None),
|
||||
"eval_progress": eval_progress,
|
||||
"TB_corrected": self.run_info.get("TB_corrected", None),
|
||||
"reboot_times": self.run_info.get("reboot_times", None),
|
||||
"time": self.run_info.get("time", None),
|
||||
"cost": self.run_info.get("token_cost", None),
|
||||
}
|
||||
save_dict_json_form(self.run_info_short, os.path.join(self.task_dir, "run_info_short.json"))
|
||||
|
||||
# run log
|
||||
running_log = logger.reset_temp_log()
|
||||
tasklog_path = os.path.join(self.task_dir, "task_log.log")
|
||||
os.makedirs(os.path.dirname(tasklog_path), exist_ok=True)
|
||||
with open(tasklog_path, "w") as f:
|
||||
f.write(running_log)
|
||||
|
||||
return self.run_info
|
||||
|
||||
def save_TB_codes(self):
|
||||
save_dir = self.task_dir
|
||||
ls.save_code(self.TB_code_v if isinstance(self.TB_code_v, str) else "// TB code (Verilog) unavailable", os.path.join(save_dir, "final_TB.v"))
|
||||
ls.save_code(self.TB_code_py if isinstance(self.TB_code_py, str) else "## TB code (Python) unavailable", os.path.join(save_dir, "final_TB.py"))
|
||||
|
||||
@staticmethod
|
||||
def _blank_log():
|
||||
logger.info("")
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return self.run(*args, **kwargs)
|
||||
1453
autoline/TB_cga.py
Normal file
1453
autoline/TB_cga.py
Normal file
File diff suppressed because it is too large
Load Diff
22
autoline/__init__.py
Normal file
22
autoline/__init__.py
Normal file
@@ -0,0 +1,22 @@
|
||||
"""
|
||||
Description : Automatic pipeline of Chatbench: from HDLBits problem to simulation
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2023/12/7 15:13:00
|
||||
LastEdited : 2024/8/16 13:37:31
|
||||
autoline.py (c) 2023
|
||||
"""
|
||||
|
||||
from autoline.TB_autoline import run_autoline
|
||||
|
||||
from autoline.TB1_gen import TaskTBgen
|
||||
from autoline.TB2_syncheck import TaskTBsim
|
||||
from autoline.TB3_funccheck import TaskTBcheck, TB_corrector, TB_discriminator
|
||||
from autoline.TB4_eval import TaskTBeval
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise RuntimeError("you cannot run autoline.py directly!")
|
||||
# probset = Probset("data/HDLBits/HDLBits_data.jsonl", "data/HDLBits/HDLBits_data_miniset_mutants.jsonl", "data/HDLBits/HDLBits_circuit_type.jsonl", exclude_tasks=['rule110'], filter_content={'circuit_type': 'SEQ'})
|
||||
# print(probset.num)
|
||||
|
||||
BIN
autoline/__pycache__/TB1_gen.cpython-312.pyc
Normal file
BIN
autoline/__pycache__/TB1_gen.cpython-312.pyc
Normal file
Binary file not shown.
BIN
autoline/__pycache__/TB2_syncheck.cpython-312.pyc
Normal file
BIN
autoline/__pycache__/TB2_syncheck.cpython-312.pyc
Normal file
Binary file not shown.
BIN
autoline/__pycache__/TB3_funccheck.cpython-312.pyc
Normal file
BIN
autoline/__pycache__/TB3_funccheck.cpython-312.pyc
Normal file
Binary file not shown.
BIN
autoline/__pycache__/TB4_eval.cpython-312.pyc
Normal file
BIN
autoline/__pycache__/TB4_eval.cpython-312.pyc
Normal file
Binary file not shown.
BIN
autoline/__pycache__/TB_autoline.cpython-312.pyc
Normal file
BIN
autoline/__pycache__/TB_autoline.cpython-312.pyc
Normal file
Binary file not shown.
BIN
autoline/__pycache__/TB_cga.cpython-312.pyc
Normal file
BIN
autoline/__pycache__/TB_cga.cpython-312.pyc
Normal file
Binary file not shown.
BIN
autoline/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
autoline/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
BIN
autoline/__pycache__/cga_utils.cpython-312.pyc
Normal file
BIN
autoline/__pycache__/cga_utils.cpython-312.pyc
Normal file
Binary file not shown.
BIN
autoline/__pycache__/diversity_injector.cpython-312.pyc
Normal file
BIN
autoline/__pycache__/diversity_injector.cpython-312.pyc
Normal file
Binary file not shown.
BIN
autoline/__pycache__/energy_allocator.cpython-312.pyc
Normal file
BIN
autoline/__pycache__/energy_allocator.cpython-312.pyc
Normal file
Binary file not shown.
BIN
autoline/__pycache__/quality_evaluator.cpython-312.pyc
Normal file
BIN
autoline/__pycache__/quality_evaluator.cpython-312.pyc
Normal file
Binary file not shown.
BIN
autoline/__pycache__/semantic_analyzer.cpython-312.pyc
Normal file
BIN
autoline/__pycache__/semantic_analyzer.cpython-312.pyc
Normal file
Binary file not shown.
BIN
autoline/__pycache__/test_history.cpython-312.pyc
Normal file
BIN
autoline/__pycache__/test_history.cpython-312.pyc
Normal file
Binary file not shown.
5444
autoline/cga_utils.py
Normal file
5444
autoline/cga_utils.py
Normal file
File diff suppressed because it is too large
Load Diff
601
autoline/diversity_injector.py
Normal file
601
autoline/diversity_injector.py
Normal file
@@ -0,0 +1,601 @@
|
||||
"""
|
||||
Description : Diversity Constraint Injector (Layer 1)
|
||||
- Analyze existing test sequences
|
||||
- Detect overused patterns
|
||||
- Generate diversity constraints for Prompt
|
||||
- Recommend new test scenarios
|
||||
Author : CGA Enhancement Project
|
||||
Time : 2026/03/16
|
||||
"""
|
||||
|
||||
import logging
|
||||
import re
|
||||
from typing import List, Dict, Optional, Any, Tuple, Set
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
|
||||
# 支持两种导入方式:包导入和直接加载
|
||||
try:
|
||||
from .test_history import (
|
||||
TestHistoryManager,
|
||||
TestRecord,
|
||||
InputSequence,
|
||||
SequencePattern
|
||||
)
|
||||
|
||||
except ImportError:
|
||||
from test_history import (
|
||||
TestHistoryManager,
|
||||
TestRecord,
|
||||
InputSequence,
|
||||
SequencePattern
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 配置常量
|
||||
# ============================================================================
|
||||
|
||||
class DiversityConfig:
|
||||
"""多样性约束配置"""
|
||||
|
||||
# 编辑距离阈值
|
||||
MIN_EDIT_DISTANCE = 3
|
||||
|
||||
# 模式过度使用阈值
|
||||
OVERUSE_THRESHOLD = 3
|
||||
|
||||
# 新场景推荐数量
|
||||
NEW_SCENARIO_COUNT = 3
|
||||
|
||||
# 序列长度限制(用于约束生成)
|
||||
MAX_SEQUENCE_LENGTH = 10
|
||||
|
||||
# 多样性得分权重
|
||||
PATTERN_WEIGHT = 0.4
|
||||
EDIT_DISTANCE_WEIGHT = 0.3
|
||||
COVERAGE_WEIGHT = 0.3
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 约束类型定义
|
||||
# ============================================================================
|
||||
|
||||
class ConstraintType(Enum):
|
||||
"""约束类型枚举"""
|
||||
FORBID_SEQUENCE = "forbid_sequence" # 禁止特定序列
|
||||
MIN_EDIT_DISTANCE = "min_edit_distance" # 最小编辑距离
|
||||
AVOID_PATTERN = "avoid_pattern" # 革免模式
|
||||
TRY_SCENARIO = "try_scenario" # 尝试新场景
|
||||
EXPLORE_RANGE = "explore_range" # 探索范围
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 约束数据结构
|
||||
# ============================================================================
|
||||
|
||||
@dataclass
|
||||
class DiversityConstraint:
|
||||
"""
|
||||
多样性约束
|
||||
|
||||
Attributes:
|
||||
constraint_type: 约束类型
|
||||
description: 约束描述
|
||||
details: 详细信息
|
||||
priority: 优先级 (1-5, 5最高)
|
||||
"""
|
||||
constraint_type: ConstraintType
|
||||
description: str
|
||||
details: Dict[str, Any] = field(default_factory=dict)
|
||||
priority: int = 3
|
||||
|
||||
def to_prompt_text(self) -> str:
|
||||
"""转换为Prompt文本"""
|
||||
if self.constraint_type == ConstraintType.FORBID_SEQUENCE:
|
||||
return f"- AVOID using this sequence pattern: {self.details.get('pattern', 'unknown')}"
|
||||
|
||||
elif self.constraint_type == ConstraintType.MIN_EDIT_DISTANCE:
|
||||
return f"- Your test sequence MUST differ from previous tests (edit distance >= {self.details.get('min_distance', 3)})"
|
||||
|
||||
elif self.constraint_type == ConstraintType.AVOID_PATTERN:
|
||||
signal = self.details.get('signal', '')
|
||||
pattern = self.details.get('pattern', '')
|
||||
return f"- AVOID the pattern '{pattern}' for signal '{signal}' (already used {self.details.get('count', 0)} times)"
|
||||
|
||||
elif self.constraint_type == ConstraintType.TRY_SCENARIO:
|
||||
return f"- TRY this new approach: {self.details.get('scenario', 'unknown')}"
|
||||
|
||||
elif self.constraint_type == ConstraintType.EXPLORE_RANGE:
|
||||
return f"- EXPLORE values in range [{self.details.get('min', 0)}, {self.details.get('max', 255)}] for {self.details.get('signal', 'signal')}"
|
||||
|
||||
return f"- {self.description}"
|
||||
# ============================================================================
|
||||
# 序列分析器
|
||||
# ============================================================================
|
||||
|
||||
class SequenceAnalyzer:
|
||||
"""
|
||||
序列分析器
|
||||
|
||||
分析输入序列的特征和模式
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def extract_value_range(values: List[Tuple[int, Any]]) -> Tuple[Any, Any]:
|
||||
"""提取值范围"""
|
||||
if not values:
|
||||
return (0, 0)
|
||||
|
||||
numeric_values = []
|
||||
for _, v in values:
|
||||
# 尝试转换为数值
|
||||
if isinstance(v, (int, float)):
|
||||
numeric_values.append(v)
|
||||
elif isinstance(v, str):
|
||||
# 处理 '0, '1, 'x 等
|
||||
if v in ['0', '1', 'x', 'z']:
|
||||
numeric_values.append(int(v) if v.isdigit() else 0)
|
||||
# 处理带位宽的值
|
||||
match = re.match(r"(\d+)'[bdh]([0-9a-fA-fA-FxXzZ_]+)", v)
|
||||
if match:
|
||||
try:
|
||||
numeric_values.append(int(match.group(2), 16))
|
||||
except:
|
||||
pass
|
||||
|
||||
if numeric_values:
|
||||
return (min(numeric_values), max(numeric_values))
|
||||
return (0, 0)
|
||||
|
||||
@staticmethod
|
||||
def detect_transition_pattern(values: List[Tuple[int, Any]]) -> str:
|
||||
"""检测转换模式"""
|
||||
if len(values) < 2:
|
||||
return "single"
|
||||
|
||||
# 提取值序列
|
||||
val_seq = [v for _, v in values]
|
||||
|
||||
# 检测递增
|
||||
if all(str(val_seq[i]) <= str(val_seq[i+1]) for i in range(len(val_seq)-1)):
|
||||
return "incremental"
|
||||
|
||||
# 检测递减
|
||||
if all(str(val_seq[i]) >= str(val_seq[i+1]) for i in range(len(val_seq)-1)):
|
||||
return "decremental"
|
||||
|
||||
# 检测交替
|
||||
if len(val_seq) >= 4:
|
||||
if val_seq[0] == val_seq[2] and val_seq[1] == val_seq[3]:
|
||||
return "alternating"
|
||||
|
||||
# 检测脉冲(单个变化后恢复)
|
||||
if len(val_seq) == 3 and val_seq[0] == val_seq[2] != val_seq[1]:
|
||||
return "pulse"
|
||||
|
||||
return "random"
|
||||
|
||||
@staticmethod
|
||||
def calculate_sequence_length(code: str) -> int:
|
||||
"""计算代码中的操作序列长度"""
|
||||
# 统计赋值语句数量
|
||||
assignments = len(re.findall(r'\w+\s*=\s*\S+\s*;', code))
|
||||
# 统计repeat语句
|
||||
repeats = re.findall(r'repeat\s*\(\s*(\d+)\s*\)', code)
|
||||
repeat_cycles = sum(int(r) for r in repeats)
|
||||
|
||||
return assignments + repeat_cycles
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 场景推荐器
|
||||
# ============================================================================
|
||||
|
||||
class ScenarioRecommender:
|
||||
"""
|
||||
场景推荐器
|
||||
|
||||
根据历史记录和未覆盖功能点推荐新测试场景
|
||||
"""
|
||||
|
||||
# 场景模板
|
||||
SCENARIO_TEMPLATES = {
|
||||
'fsm': [
|
||||
"Test state transition from {state_a} to {state_b}",
|
||||
"Test illegal state transition handling",
|
||||
"Test state machine reset behavior",
|
||||
"Test state holding under stable inputs"
|
||||
],
|
||||
'counter': [
|
||||
"Test counter overflow behavior (count to max value)",
|
||||
"Test counter underflow (if applicable)",
|
||||
"Test counter reset during counting",
|
||||
"Test counter enable/disable control"
|
||||
],
|
||||
'branch': [
|
||||
"Test boundary condition: {condition} at threshold",
|
||||
"Test all branches of nested if-else",
|
||||
"Test case statement with all possible values"
|
||||
],
|
||||
'protocol': [
|
||||
"Test handshake timeout scenario",
|
||||
"Test back-to-back transactions",
|
||||
"Test protocol violation handling"
|
||||
],
|
||||
'general': [
|
||||
"Apply random input patterns for extended duration",
|
||||
"Test with boundary values (all 0s, all 1s)",
|
||||
"Test rapid signal transitions",
|
||||
"Test power-on/reset sequence variations"
|
||||
]
|
||||
}
|
||||
|
||||
def __init__(self, history_manager: TestHistoryManager):
|
||||
self.history = history_manager
|
||||
|
||||
def recommend_scenarios(self,
|
||||
uncovered_functions: List[Dict],
|
||||
covered_patterns: Set[str] = None) -> List[str]:
|
||||
"""
|
||||
推荐新的测试场景
|
||||
|
||||
Args:
|
||||
uncovered_functions: 未覆盖的功能点列表
|
||||
covered_patterns: 已覆盖的模式集合
|
||||
|
||||
Returns:
|
||||
推荐场景列表
|
||||
"""
|
||||
recommendations = []
|
||||
covered_patterns = covered_patterns or set()
|
||||
|
||||
# 基于未覆盖功能点推荐
|
||||
for func in uncovered_functions[:3]:
|
||||
func_type = func.get('type', 'general')
|
||||
func_name = func.get('name', '')
|
||||
|
||||
templates = self.SCENARIO_TEMPLATES.get(func_type, self.SCENARIO_TEMPLATES['general'])
|
||||
|
||||
for template in templates[:1]: # 每个功能点取一个模板
|
||||
scenario = self._fill_template(template, func)
|
||||
if scenario not in covered_patterns:
|
||||
recommendations.append(scenario)
|
||||
|
||||
# 基于历史分析推荐
|
||||
if self.history.records:
|
||||
# 分析已使用的场景类型
|
||||
used_patterns = set()
|
||||
for record in self.history.records:
|
||||
for seq in record.input_sequences:
|
||||
pattern = SequenceAnalyzer.detect_transition_pattern(seq.values)
|
||||
used_patterns.add(pattern)
|
||||
|
||||
# 推荐未使用的场景类型
|
||||
all_patterns = {'incremental', 'decremental', 'alternating', 'pulse', 'random'}
|
||||
unused_patterns = all_patterns - used_patterns
|
||||
|
||||
|
||||
if unused_patterns:
|
||||
recommendations.append(f"Try {list(unused_patterns)[0]} input pattern (different from your usual approach)")
|
||||
|
||||
# 确保有足够的推荐
|
||||
while len(recommendations) < DiversityConfig.NEW_SCENARIO_COUNT:
|
||||
recommendations.append("Explore a completely different input sequence than before")
|
||||
|
||||
return recommendations[:DiversityConfig.NEW_SCENARIO_COUNT]
|
||||
|
||||
def _fill_template(self, template: str, func: Dict) -> str:
|
||||
"""填充场景模板"""
|
||||
result = template
|
||||
|
||||
# 替换占位符
|
||||
if '{state_a}' in template or '{state_b}' in template:
|
||||
states = func.get('states', ['STATE_A', 'STATE_B'])
|
||||
if len(states) >= 2:
|
||||
result = result.replace('{state_a}', states[0])
|
||||
result = result.replace('{state_b}', states[1])
|
||||
|
||||
if '{condition}' in template:
|
||||
condition = func.get('condition', 'signal')
|
||||
result = result.replace('{condition}', condition)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 约束生成器
|
||||
# ============================================================================
|
||||
|
||||
class ConstraintGenerator:
|
||||
"""
|
||||
约束生成器
|
||||
|
||||
根据历史分析生成多样性约束
|
||||
"""
|
||||
|
||||
def __init__(self, history_manager: TestHistoryManager):
|
||||
self.history = history_manager
|
||||
self.analyzer = SequenceAnalyzer()
|
||||
|
||||
def generate_constraints(self,
|
||||
target_function: str = None,
|
||||
uncovered_functions: List[Dict] = None) -> List[DiversityConstraint]:
|
||||
"""
|
||||
生成多样性约束
|
||||
|
||||
Args:
|
||||
target_function: 当前目标功能点
|
||||
uncovered_functions: 未覆盖功能点列表
|
||||
|
||||
Returns:
|
||||
约束列表
|
||||
"""
|
||||
constraints = []
|
||||
|
||||
if not self.history.records:
|
||||
return constraints
|
||||
|
||||
# 1. 生成过度使用模式约束
|
||||
overused = self.history.get_overused_patterns(DiversityConfig.OVERUSE_THRESHOLD)
|
||||
for pattern in overused[:3]: # 最多3个
|
||||
constraints.append(DiversityConstraint(
|
||||
constraint_type=ConstraintType.AVOID_PATTERN,
|
||||
description=f"Avoid overused pattern for {pattern.signal_name}",
|
||||
details={
|
||||
'signal': pattern.signal_name,
|
||||
'pattern': pattern.pattern,
|
||||
'count': pattern.count
|
||||
},
|
||||
priority=5
|
||||
))
|
||||
|
||||
# 2. 生成编辑距离约束
|
||||
recent_count = min(5, len(self.history.records))
|
||||
if recent_count > 00:
|
||||
constraints.append(DiversityConstraint(
|
||||
constraint_type=ConstraintType.MIN_EDIT_DISTANCE,
|
||||
description="Maintain minimum edit distance from recent tests",
|
||||
details={
|
||||
'min_distance': DiversityConfig.MIN_EDIT_DISTANCE,
|
||||
'reference_count': recent_count
|
||||
},
|
||||
priority=4
|
||||
))
|
||||
|
||||
# 3. 生成值范围探索约束
|
||||
if uncovered_functions:
|
||||
for func in uncovered_functions[:2]:
|
||||
# 根据功能点类型生成范围约束
|
||||
if func.get('type') == 'counter':
|
||||
max_val = func.get('max_value', 255)
|
||||
constraints.append(DiversityConstraint(
|
||||
constraint_type=ConstraintType.EXPLORE_RANGE,
|
||||
description=f"Explore counter boundary values",
|
||||
details={
|
||||
'signal': func.get('name', 'counter'),
|
||||
'min': 0,
|
||||
'max': max_val
|
||||
},
|
||||
priority=3
|
||||
))
|
||||
|
||||
# 按优先级排序
|
||||
constraints.sort(key=lambda c: c.priority, reverse=True)
|
||||
|
||||
return constraints
|
||||
|
||||
def generate_forbidden_sequence_prompt(self) -> str:
|
||||
"""生成禁止序列提示"""
|
||||
overused = self.history.get_overused_patterns(DiversityConfig.OVERUSE_THRESHOLD)
|
||||
|
||||
if not overused:
|
||||
return ""
|
||||
|
||||
lines = ["[DIVERSITY CONSTRAINTS - AVOID THESE OVERUSED PATTERNS]"]
|
||||
|
||||
for i, pattern in enumerate(overused[:5], 1):
|
||||
lines.append(f"{i}. Signal '{pattern.signal_name}': {pattern.pattern[:50]}")
|
||||
lines.append(f" (This pattern has been used {pattern.count} times already)")
|
||||
|
||||
lines.append("\nPlease create a DIFFERENT input sequence to improve test diversity.")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 多样性约束注入器(主入口)
|
||||
# ============================================================================
|
||||
|
||||
class DiversityInjector:
|
||||
"""
|
||||
多样性约束注入器 - 第1层主入口
|
||||
|
||||
整合序列分析、模式检测、约束生成,提供统一的多样性约束接口
|
||||
"""
|
||||
|
||||
def __init__(self, history_manager: TestHistoryManager = None):
|
||||
"""
|
||||
Args:
|
||||
history_manager: 测试历史管理器
|
||||
"""
|
||||
self.history = history_manager or TestHistoryManager()
|
||||
self.constraint_generator = ConstraintGenerator(self.history)
|
||||
self.scenario_recommender = ScenarioRecommender(self.history)
|
||||
|
||||
def inject_diversity_constraints(self,
|
||||
prompt: str,
|
||||
target_function: str = None,
|
||||
uncovered_functions: List[Dict] = None) -> str:
|
||||
"""
|
||||
将多样性约束注入到Prompt中
|
||||
|
||||
Args:
|
||||
prompt: 废始Prompt
|
||||
target_function: 当前目标功能点
|
||||
uncovered_functions: 未覆盖功能点列表
|
||||
|
||||
Returns:
|
||||
注入约束后的Prompt
|
||||
"""
|
||||
if not self.history.records:
|
||||
return prompt # 没有历史记录时不注入
|
||||
|
||||
# 生成约束
|
||||
constraints = self.constraint_generator.generate_constraints(
|
||||
target_function=target_function,
|
||||
uncovered_functions=uncovered_functions
|
||||
)
|
||||
|
||||
# 生成推荐场景
|
||||
recommendations = self.scenario_recommender.recommend_scenarios(
|
||||
uncovered_functions=uncovered_functions or []
|
||||
)
|
||||
|
||||
# 构建约束文本
|
||||
constraint_text = self._build_constraint_section(constraints, recommendations)
|
||||
|
||||
# 找到插入点(在 [OUTPUT REQUIREMENTS] 之前插入)
|
||||
insert_marker = "[OUTPUT REQUIREMENTS"
|
||||
if insert_marker in prompt:
|
||||
parts = prompt.split(insert_marker, 1)
|
||||
enhanced_prompt = parts[0] + constraint_text + "\n\n" + insert_marker + parts[1]
|
||||
else:
|
||||
# 如果找不到标记,追加到末尾
|
||||
enhanced_prompt = prompt + "\n\n" + constraint_text
|
||||
|
||||
return enhanced_prompt
|
||||
|
||||
def _build_constraint_section(self,
|
||||
constraints: List[DiversityConstraint],
|
||||
recommendations: List[str]) -> str:
|
||||
"""构建约束章节"""
|
||||
lines = []
|
||||
lines.append("[DIVERSITY CONSTRAINTS - CRITICAL]")
|
||||
lines.append("To improve test effectiveness, follow these diversity requirements:")
|
||||
lines.append("")
|
||||
|
||||
# 添加约束
|
||||
for constraint in constraints:
|
||||
lines.append(constraint.to_prompt_text())
|
||||
|
||||
lines.append("")
|
||||
|
||||
# 添加推荐场景
|
||||
if recommendations:
|
||||
lines.append("[RECOMMENDED NEW APPROACHES]")
|
||||
for i, rec in enumerate(recommendations, 1):
|
||||
lines.append(f"{i}. {rec}")
|
||||
|
||||
lines.append("")
|
||||
lines.append("IMPORTANT: Repeated test patterns reduce coverage improvement efficiency.")
|
||||
lines.append("Generate a DISTINCTLY DIFFERENT test sequence from previous attempts.")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def get_diversity_context(self) -> str:
|
||||
"""获取多样性上下文信息(用于Prompt)"""
|
||||
if not self.history.records:
|
||||
return ""
|
||||
|
||||
stats = self.history.get_statistics()
|
||||
overused = self.history.get_overused_patterns(DiversityConfig.OVERUSE_THRESHOLD)
|
||||
|
||||
context_lines = []
|
||||
context_lines.append(f"Test History: {stats['total_tests']} tests generated")
|
||||
context_lines.append(f"Unique Patterns: {stats['total_patterns']}")
|
||||
|
||||
if overused:
|
||||
context_lines.append(f"Overused Patterns: {len(overused)} (avoid these)")
|
||||
|
||||
return "\n".join(context_lines)
|
||||
|
||||
def evaluate_diversity(self,
|
||||
new_code: str,
|
||||
known_signals: List[str] = None) -> Dict[str, float]:
|
||||
"""
|
||||
评估新代码的多样性
|
||||
|
||||
Args:
|
||||
new_code: 新生成的测试代码
|
||||
known_signals: 已知信号列表
|
||||
|
||||
Returns:
|
||||
多样性评估结果
|
||||
"""
|
||||
results = {}
|
||||
|
||||
# 1. 序列多样性
|
||||
if known_signals:
|
||||
self.history.sequence_extractor.set_known_signals(known_signals)
|
||||
new_sequences = self.history.sequence_extractor.extract(new_code)
|
||||
results['sequence_diversity'] = self.history.calculate_sequence_diversity(new_sequences)
|
||||
|
||||
# 2. 编辑距离多样性
|
||||
results['edit_distance_diversity'] = self.history.calculate_edit_distance_diversity(new_code)
|
||||
|
||||
# 3. 综合得分
|
||||
results['overall_diversity'] = (
|
||||
DiversityConfig.PATTERN_WEIGHT * results['sequence_diversity'] +
|
||||
DiversityConfig.EDIT_DISTANCE_WEIGHT * results['edit_distance_diversity']
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
def record_test(self,
|
||||
code: str,
|
||||
target_function: str = "",
|
||||
coverage_score: float = 0.0,
|
||||
success: bool = False,
|
||||
iteration: int = 0,
|
||||
known_signals: List[str] = None) -> TestRecord:
|
||||
"""
|
||||
记录新的测试用例
|
||||
|
||||
Args:
|
||||
code: 测试代码
|
||||
target_function: 目标功能点
|
||||
coverage_score: 覆盖率分数
|
||||
success: 是否成功
|
||||
iteration: 迭代次数
|
||||
known_signals: 已知信号列表
|
||||
|
||||
Returns:
|
||||
测试记录
|
||||
"""
|
||||
return self.history.add_record(
|
||||
code=code,
|
||||
target_function=target_function,
|
||||
coverage_score=coverage_score,
|
||||
success=success,
|
||||
iteration=iteration,
|
||||
known_signals=known_signals
|
||||
)
|
||||
|
||||
def get_statistics(self) -> Dict[str, Any]:
|
||||
"""获取统计信息"""
|
||||
return self.history.get_statistics()
|
||||
|
||||
def generate_diversity_report(self) -> str:
|
||||
"""生成多样性报告"""
|
||||
return self.history.get_diversity_report()
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 便捷函数
|
||||
# ============================================================================
|
||||
|
||||
def create_diversity_injector(history_file: str = None) -> DiversityInjector:
|
||||
"""
|
||||
创建多样性约束注入器
|
||||
|
||||
Args:
|
||||
history_file: 屆史记录文件路径
|
||||
|
||||
Returns:
|
||||
初始化完成的多样性约束注入器
|
||||
"""
|
||||
history_manager = TestHistoryManager(history_file=history_file)
|
||||
return DiversityInjector(history_manager=history_manager)
|
||||
787
autoline/energy_allocator.py
Normal file
787
autoline/energy_allocator.py
Normal file
@@ -0,0 +1,787 @@
|
||||
"""
|
||||
Description : Energy Allocation Layer (Layer 4)
|
||||
- Adaptive Resource Scheduling
|
||||
- Dynamic energy distribution based on function point importance
|
||||
Author : CGA Enhancement Project
|
||||
Time : 2026/03/11
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import List, Dict, Optional, Any, Tuple
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 数据结构定义
|
||||
# ============================================================================
|
||||
|
||||
class EnergyState(Enum):
|
||||
"""能量状态枚举"""
|
||||
ACTIVE = "active" # 活跃,有剩余能量
|
||||
DEPLETED = "depleted" # 能量耗尽
|
||||
COMPLETED = "completed" # 已完成覆盖
|
||||
SUSPENDED = "suspended" # 暂停(连续失败过多)
|
||||
|
||||
|
||||
@dataclass
|
||||
class EnergyAllocation:
|
||||
"""
|
||||
能量分配记录
|
||||
|
||||
Attributes:
|
||||
function_point: 功能点名称
|
||||
importance: 重要性评分 (0.0 - 1.0)
|
||||
allocated: 分配的总能量
|
||||
consumed: 已消耗的能量
|
||||
remaining: 剩余能量
|
||||
consecutive_failures: 连续失败次数
|
||||
state: 当前能量状态
|
||||
"""
|
||||
function_point: str
|
||||
importance: float
|
||||
allocated: float = 0.0
|
||||
consumed: float = 0.0
|
||||
remaining: float = 0.0
|
||||
consecutive_failures: int = 0
|
||||
state: EnergyState = EnergyState.ACTIVE
|
||||
total_attempts: int = 0
|
||||
successful_attempts: int = 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class GenerationResult:
|
||||
"""
|
||||
生成结果记录
|
||||
|
||||
Attributes:
|
||||
function_point: 目标功能点
|
||||
success: 是否成功覆盖
|
||||
coverage_delta: 覆盖率变化
|
||||
energy_cost: 消耗的能量
|
||||
code_generated: 生成的代码
|
||||
quality_score: 代码质量分数
|
||||
"""
|
||||
function_point: str
|
||||
success: bool
|
||||
coverage_delta: float = 0.0
|
||||
energy_cost: float = 1.0
|
||||
code_generated: str = ""
|
||||
quality_score: float = 0.0
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 能量初始化器
|
||||
# ============================================================================
|
||||
|
||||
class EnergyInitializer:
|
||||
"""
|
||||
能量初始化器
|
||||
|
||||
根据总能量预算和功能点重要性评分,初始化各功能点的能量分配
|
||||
"""
|
||||
|
||||
# 默认配置
|
||||
DEFAULT_TOTAL_ENERGY = 10.0 # 默认总能量(对应最大迭代次数)
|
||||
MIN_ENERGY_PER_FP = 1.0 # 每个功能点最小能量
|
||||
ENERGY_BUFFER_RATIO = 0.1 # 能量缓冲比例(保留用于重分配)
|
||||
|
||||
def __init__(self,
|
||||
total_energy: float = None,
|
||||
min_energy: float = None,
|
||||
buffer_ratio: float = None):
|
||||
"""
|
||||
Args:
|
||||
total_energy: 总能量预算(默认为 max_iter)
|
||||
min_energy: 每个功能点最小能量
|
||||
buffer_ratio: 能量缓冲比例
|
||||
"""
|
||||
self.total_energy = total_energy or self.DEFAULT_TOTAL_ENERGY
|
||||
self.min_energy = min_energy or self.MIN_ENERGY_PER_FP
|
||||
self.buffer_ratio = buffer_ratio or self.ENERGY_BUFFER_RATIO
|
||||
|
||||
def initialize(self,
|
||||
function_points: List[Dict],
|
||||
max_iterations: int = None) -> Dict[str, EnergyAllocation]:
|
||||
"""
|
||||
初始化能量分配
|
||||
|
||||
Args:
|
||||
function_points: 功能点列表,每个元素包含 name, importance, covered 等
|
||||
max_iterations: 最大迭代次数(用于设置总能量)
|
||||
|
||||
Returns:
|
||||
功能点名称 -> 能量分配记录 的字典
|
||||
"""
|
||||
# 如果提供了最大迭代次数,使用它作为总能量
|
||||
if max_iterations:
|
||||
self.total_energy = float(max_iterations)
|
||||
|
||||
# 过滤出未覆盖的功能点
|
||||
uncovered_fps = [fp for fp in function_points if not fp.get('covered', False)]
|
||||
|
||||
if not uncovered_fps:
|
||||
logger.info("All function points are covered. No energy allocation needed.")
|
||||
return {}
|
||||
|
||||
# 计算总重要性
|
||||
total_importance = sum(fp.get('importance', 0.5) for fp in uncovered_fps)
|
||||
|
||||
# 预留缓冲能量
|
||||
buffer_energy = self.total_energy * self.buffer_ratio
|
||||
available_energy = self.total_energy - buffer_energy
|
||||
|
||||
# 按重要性比例分配能量
|
||||
allocations = {}
|
||||
|
||||
for fp in uncovered_fps:
|
||||
name = fp.get('name', 'unknown')
|
||||
importance = fp.get('importance', 0.5)
|
||||
|
||||
# 按比例计算分配能量,但不少于最小值
|
||||
if total_importance > 0:
|
||||
proportional_energy = (importance / total_importance) * available_energy
|
||||
else:
|
||||
proportional_energy = available_energy / len(uncovered_fps)
|
||||
|
||||
allocated = max(self.min_energy, proportional_energy)
|
||||
|
||||
allocations[name] = EnergyAllocation(
|
||||
function_point=name,
|
||||
importance=importance,
|
||||
allocated=allocated,
|
||||
consumed=0.0,
|
||||
remaining=allocated,
|
||||
consecutive_failures=0,
|
||||
state=EnergyState.ACTIVE,
|
||||
total_attempts=0,
|
||||
successful_attempts=0
|
||||
)
|
||||
|
||||
# 记录分配情况
|
||||
logger.info(f"Energy initialized: total={self.total_energy:.1f}, "
|
||||
f"allocated={sum(a.allocated for a in allocations.values()):.1f}, "
|
||||
f"buffer={buffer_energy:.1f}, "
|
||||
f"targets={len(allocations)}")
|
||||
|
||||
return allocations
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 目标选择器
|
||||
# ============================================================================
|
||||
|
||||
class TargetSelector:
|
||||
"""
|
||||
目标选择器
|
||||
|
||||
选择下一个需要生成测试的目标功能点
|
||||
采用优先级策略:重要性 × (剩余能量/分配能量)
|
||||
"""
|
||||
|
||||
# 连续失败阈值
|
||||
MAX_CONSECUTIVE_FAILURES = 3
|
||||
|
||||
def __init__(self, allocations: Dict[str, EnergyAllocation]):
|
||||
"""
|
||||
Args:
|
||||
allocations: 能量分配字典
|
||||
"""
|
||||
self.allocations = allocations
|
||||
|
||||
def select_next_target(self) -> Optional[EnergyAllocation]:
|
||||
"""
|
||||
选择下一个目标功能点
|
||||
|
||||
优先级计算:importance × (remaining / allocated) × (1 / (1 + consecutive_failures))
|
||||
|
||||
Returns:
|
||||
选中的能量分配记录,如果没有可用目标则返回 None
|
||||
"""
|
||||
# 筛选候选:未覆盖、有剩余能量、非暂停状态
|
||||
candidates = [
|
||||
alloc for alloc in self.allocations.values()
|
||||
if alloc.state == EnergyState.ACTIVE
|
||||
and alloc.remaining > 0
|
||||
]
|
||||
|
||||
if not candidates:
|
||||
logger.info("No active targets with remaining energy.")
|
||||
return None
|
||||
|
||||
# 计算优先级并排序
|
||||
def calculate_priority(alloc: EnergyAllocation) -> float:
|
||||
# 重要性权重
|
||||
importance_weight = alloc.importance
|
||||
|
||||
# 能量剩余比例
|
||||
energy_ratio = alloc.remaining / alloc.allocated if alloc.allocated > 0 else 0
|
||||
|
||||
# 失败惩罚因子
|
||||
failure_penalty = 1.0 / (1.0 + alloc.consecutive_failures * 0.5)
|
||||
|
||||
# 综合优先级
|
||||
priority = importance_weight * energy_ratio * failure_penalty
|
||||
return priority
|
||||
|
||||
candidates.sort(key=calculate_priority, reverse=True)
|
||||
|
||||
selected = candidates[0]
|
||||
logger.debug(f"Selected target: {selected.function_point} "
|
||||
f"(importance={selected.importance:.2f}, "
|
||||
f"remaining={selected.remaining:.1f}, "
|
||||
f"failures={selected.consecutive_failures})")
|
||||
|
||||
return selected
|
||||
|
||||
def get_candidates_count(self) -> int:
|
||||
"""获取候选目标数量"""
|
||||
return len([a for a in self.allocations.values()
|
||||
if a.state == EnergyState.ACTIVE and a.remaining > 0])
|
||||
|
||||
def get_top_candidates(self, n: int = 3) -> List[EnergyAllocation]:
|
||||
"""获取优先级最高的 N 个候选目标"""
|
||||
candidates = [
|
||||
alloc for alloc in self.allocations.values()
|
||||
if alloc.state == EnergyState.ACTIVE and alloc.remaining > 0
|
||||
]
|
||||
|
||||
def calculate_priority(alloc: EnergyAllocation) -> float:
|
||||
importance_weight = alloc.importance
|
||||
energy_ratio = alloc.remaining / alloc.allocated if alloc.allocated > 0 else 0
|
||||
failure_penalty = 1.0 / (1.0 + alloc.consecutive_failures * 0.5)
|
||||
return importance_weight * energy_ratio * failure_penalty
|
||||
|
||||
candidates.sort(key=calculate_priority, reverse=True)
|
||||
return candidates[:n]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 能量消耗跟踪器
|
||||
# ============================================================================
|
||||
|
||||
class EnergyConsumptionTracker:
|
||||
"""
|
||||
能量消耗跟踪器
|
||||
|
||||
跟踪每次生成尝试的能量消耗,根据结果更新状态
|
||||
"""
|
||||
|
||||
# 能量衰减因子(连续失败时)
|
||||
ENERGY_DECAY_FACTOR = 0.7
|
||||
|
||||
def __init__(self, allocations: Dict[str, EnergyAllocation]):
|
||||
"""
|
||||
Args:
|
||||
allocations: 能量分配字典
|
||||
"""
|
||||
self.allocations = allocations
|
||||
self.history: List[GenerationResult] = []
|
||||
|
||||
def record_generation(self, result: GenerationResult) -> Dict[str, Any]:
|
||||
"""
|
||||
记录一次生成尝试
|
||||
|
||||
Args:
|
||||
result: 生成结果
|
||||
|
||||
Returns:
|
||||
更新后的状态信息
|
||||
"""
|
||||
self.history.append(result)
|
||||
|
||||
fp_name = result.function_point
|
||||
if fp_name not in self.allocations:
|
||||
logger.warning(f"Unknown function point: {fp_name}")
|
||||
return {'status': 'unknown', 'message': 'Unknown function point'}
|
||||
|
||||
alloc = self.allocations[fp_name]
|
||||
alloc.total_attempts += 1
|
||||
|
||||
# 消耗能量
|
||||
energy_cost = result.energy_cost
|
||||
alloc.consumed += energy_cost
|
||||
alloc.remaining = max(0, alloc.remaining - energy_cost)
|
||||
|
||||
if result.success:
|
||||
# 成功:重置失败计数,标记完成
|
||||
alloc.consecutive_failures = 0
|
||||
alloc.successful_attempts += 1
|
||||
alloc.state = EnergyState.COMPLETED
|
||||
|
||||
logger.info(f"[SUCCESS] Target covered: {fp_name} (attempts={alloc.total_attempts}, "
|
||||
f"energy_used={alloc.consumed:.1f})")
|
||||
|
||||
return {
|
||||
'status': 'completed',
|
||||
'function_point': fp_name,
|
||||
'attempts': alloc.total_attempts,
|
||||
'energy_used': alloc.consumed
|
||||
}
|
||||
else:
|
||||
# 失败:增加失败计数
|
||||
alloc.consecutive_failures += 1
|
||||
|
||||
# 检查是否需要降低能量或暂停
|
||||
if alloc.consecutive_failures >= 3:
|
||||
# 能量减半
|
||||
old_remaining = alloc.remaining
|
||||
alloc.remaining *= self.ENERGY_DECAY_FACTOR
|
||||
|
||||
logger.warning(f"Consecutive failures for {fp_name}: {alloc.consecutive_failures}. "
|
||||
f"Energy reduced: {old_remaining:.1f} -> {alloc.remaining:.1f}")
|
||||
|
||||
# 如果剩余能量过低,暂停
|
||||
if alloc.remaining < 0.5:
|
||||
alloc.state = EnergyState.SUSPENDED
|
||||
logger.warning(f"Target suspended due to low energy: {fp_name}")
|
||||
|
||||
return {
|
||||
'status': 'suspended',
|
||||
'function_point': fp_name,
|
||||
'consecutive_failures': alloc.consecutive_failures,
|
||||
'remaining_energy': alloc.remaining
|
||||
}
|
||||
|
||||
# 检查能量是否耗尽
|
||||
if alloc.remaining <= 0:
|
||||
alloc.state = EnergyState.DEPLETED
|
||||
logger.warning(f"Target depleted: {fp_name}")
|
||||
|
||||
return {
|
||||
'status': 'depleted',
|
||||
'function_point': fp_name,
|
||||
'total_attempts': alloc.total_attempts
|
||||
}
|
||||
|
||||
return {
|
||||
'status': 'failed',
|
||||
'function_point': fp_name,
|
||||
'consecutive_failures': alloc.consecutive_failures,
|
||||
'remaining_energy': alloc.remaining
|
||||
}
|
||||
|
||||
def get_statistics(self) -> Dict[str, Any]:
|
||||
"""获取统计信息"""
|
||||
total = len(self.history)
|
||||
successful = sum(1 for r in self.history if r.success)
|
||||
|
||||
energy_by_fp = {}
|
||||
for result in self.history:
|
||||
fp = result.function_point
|
||||
if fp not in energy_by_fp:
|
||||
energy_by_fp[fp] = {'consumed': 0, 'attempts': 0, 'success': False}
|
||||
energy_by_fp[fp]['consumed'] += result.energy_cost
|
||||
energy_by_fp[fp]['attempts'] += 1
|
||||
if result.success:
|
||||
energy_by_fp[fp]['success'] = True
|
||||
|
||||
return {
|
||||
'total_attempts': total,
|
||||
'successful_attempts': successful,
|
||||
'success_rate': successful / total if total > 0 else 0,
|
||||
'energy_by_function_point': energy_by_fp
|
||||
}
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 能量重分配器
|
||||
# ============================================================================
|
||||
|
||||
class EnergyRedistributor:
|
||||
"""
|
||||
能量重分配器
|
||||
|
||||
当某个功能点被覆盖后,将其剩余能量重新分配给其他未覆盖功能点
|
||||
"""
|
||||
|
||||
def __init__(self, allocations: Dict[str, EnergyAllocation]):
|
||||
"""
|
||||
Args:
|
||||
allocations: 能量分配字典
|
||||
"""
|
||||
self.allocations = allocations
|
||||
|
||||
def redistribute(self, completed_fp: str) -> Dict[str, float]:
|
||||
"""
|
||||
重分配已完成功能点的剩余能量
|
||||
|
||||
Args:
|
||||
completed_fp: 已完成的功能点名称
|
||||
|
||||
Returns:
|
||||
重分配详情 {target_fp: gained_energy}
|
||||
"""
|
||||
if completed_fp not in self.allocations:
|
||||
return {}
|
||||
|
||||
completed_alloc = self.allocations[completed_fp]
|
||||
|
||||
# 回收剩余能量
|
||||
recovered_energy = completed_alloc.remaining
|
||||
|
||||
if recovered_energy <= 0:
|
||||
logger.debug(f"No remaining energy to recover from {completed_fp}")
|
||||
return {}
|
||||
|
||||
# 找出活跃的未完成目标
|
||||
active_targets = [
|
||||
alloc for alloc in self.allocations.values()
|
||||
if alloc.state == EnergyState.ACTIVE and alloc.function_point != completed_fp
|
||||
]
|
||||
|
||||
if not active_targets:
|
||||
logger.info(f"No active targets to redistribute energy to.")
|
||||
return {}
|
||||
|
||||
# 按重要性比例分配
|
||||
total_importance = sum(a.importance for a in active_targets)
|
||||
redistribution = {}
|
||||
|
||||
for alloc in active_targets:
|
||||
if total_importance > 0:
|
||||
gain = (alloc.importance / total_importance) * recovered_energy
|
||||
else:
|
||||
gain = recovered_energy / len(active_targets)
|
||||
|
||||
alloc.allocated += gain
|
||||
alloc.remaining += gain
|
||||
redistribution[alloc.function_point] = gain
|
||||
|
||||
# 清零已完成目标的剩余能量
|
||||
completed_alloc.remaining = 0
|
||||
|
||||
logger.info(f"Redistributed {recovered_energy:.1f} energy from {completed_fp} "
|
||||
f"to {len(redistribution)} targets")
|
||||
|
||||
return redistribution
|
||||
|
||||
def redistribute_all(self) -> Dict[str, Dict[str, float]]:
|
||||
"""
|
||||
重分配所有已完成/暂停目标的剩余能量
|
||||
|
||||
Returns:
|
||||
完整的重分配详情
|
||||
"""
|
||||
all_redistributions = {}
|
||||
|
||||
# 收集所有可回收能量
|
||||
completed_fps = [
|
||||
name for name, alloc in self.allocations.items()
|
||||
if alloc.state in [EnergyState.COMPLETED, EnergyState.SUSPENDED]
|
||||
and alloc.remaining > 0
|
||||
]
|
||||
|
||||
for fp in completed_fps:
|
||||
redistribution = self.redistribute(fp)
|
||||
if redistribution:
|
||||
all_redistributions[fp] = redistribution
|
||||
|
||||
return all_redistributions
|
||||
|
||||
def revive_suspended(self, min_energy: float = 1.0) -> List[str]:
|
||||
"""
|
||||
尝试复活暂停的目标(如果有足够的回收能量)
|
||||
|
||||
Args:
|
||||
min_energy: 复活所需的最小能量
|
||||
|
||||
Returns:
|
||||
复活的目标列表
|
||||
"""
|
||||
revived = []
|
||||
|
||||
# 计算可用能量(来自已完成目标)
|
||||
available_energy = sum(
|
||||
alloc.remaining for alloc in self.allocations.values()
|
||||
if alloc.state == EnergyState.COMPLETED and alloc.remaining > 0
|
||||
)
|
||||
|
||||
# 找出暂停的目标
|
||||
suspended = [
|
||||
alloc for alloc in self.allocations.values()
|
||||
if alloc.state == EnergyState.SUSPENDED
|
||||
]
|
||||
|
||||
for alloc in suspended:
|
||||
if available_energy >= min_energy:
|
||||
# 复活
|
||||
alloc.state = EnergyState.ACTIVE
|
||||
alloc.remaining = min_energy
|
||||
alloc.allocated += min_energy
|
||||
alloc.consecutive_failures = 0
|
||||
available_energy -= min_energy
|
||||
revived.append(alloc.function_point)
|
||||
|
||||
logger.info(f"Revived suspended target: {alloc.function_point}")
|
||||
|
||||
return revived
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 能量分配器(主入口)
|
||||
# ============================================================================
|
||||
|
||||
class EnergyAllocator:
|
||||
"""
|
||||
能量分配器 - 第4层主入口
|
||||
|
||||
整合所有子模块,提供统一的能量管理接口
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
max_iterations: int = 5,
|
||||
total_energy: float = None):
|
||||
"""
|
||||
Args:
|
||||
max_iterations: 最大迭代次数
|
||||
total_energy: 总能量预算(默认使用 max_iterations)
|
||||
"""
|
||||
self.max_iterations = max_iterations
|
||||
self.total_energy = total_energy or float(max_iterations)
|
||||
|
||||
# 子模块
|
||||
self.initializer = EnergyInitializer(total_energy=self.total_energy)
|
||||
self.allocations: Dict[str, EnergyAllocation] = {}
|
||||
self.selector: Optional[TargetSelector] = None
|
||||
self.tracker: Optional[EnergyConsumptionTracker] = None
|
||||
self.redistributor: Optional[EnergyRedistributor] = None
|
||||
|
||||
# 状态
|
||||
self.initialized = False
|
||||
self.current_target: Optional[EnergyAllocation] = None
|
||||
|
||||
def initialize(self, function_points: List[Dict]) -> Dict[str, Any]:
|
||||
"""
|
||||
初始化能量分配
|
||||
|
||||
Args:
|
||||
function_points: 功能点列表
|
||||
|
||||
Returns:
|
||||
初始化结果摘要
|
||||
"""
|
||||
self.allocations = self.initializer.initialize(
|
||||
function_points,
|
||||
max_iterations=self.max_iterations
|
||||
)
|
||||
|
||||
self.selector = TargetSelector(self.allocations)
|
||||
self.tracker = EnergyConsumptionTracker(self.allocations)
|
||||
self.redistributor = EnergyRedistributor(self.allocations)
|
||||
self.initialized = True
|
||||
|
||||
return {
|
||||
'total_energy': self.total_energy,
|
||||
'targets': len(self.allocations),
|
||||
'allocation_details': {
|
||||
name: {
|
||||
'importance': alloc.importance,
|
||||
'allocated': alloc.allocated,
|
||||
'state': alloc.state.value
|
||||
}
|
||||
for name, alloc in self.allocations.items()
|
||||
}
|
||||
}
|
||||
|
||||
def select_next_target(self) -> Optional[str]:
|
||||
"""
|
||||
选择下一个生成目标
|
||||
|
||||
Returns:
|
||||
目标功能点名称,如果没有可用目标则返回 None
|
||||
"""
|
||||
if not self.initialized:
|
||||
logger.warning("Energy allocator not initialized.")
|
||||
return None
|
||||
|
||||
self.current_target = self.selector.select_next_target()
|
||||
return self.current_target.function_point if self.current_target else None
|
||||
|
||||
def record_generation(self,
|
||||
success: bool,
|
||||
coverage_delta: float = 0.0,
|
||||
energy_cost: float = 1.0,
|
||||
quality_score: float = 0.0) -> Dict[str, Any]:
|
||||
"""
|
||||
记录一次生成尝试
|
||||
|
||||
Args:
|
||||
success: 是否成功覆盖目标
|
||||
coverage_delta: 覆盖率变化
|
||||
energy_cost: 消耗的能量
|
||||
quality_score: 代码质量分数
|
||||
|
||||
Returns:
|
||||
更新结果
|
||||
"""
|
||||
if not self.current_target:
|
||||
return {'status': 'error', 'message': 'No current target'}
|
||||
|
||||
result = GenerationResult(
|
||||
function_point=self.current_target.function_point,
|
||||
success=success,
|
||||
coverage_delta=coverage_delta,
|
||||
energy_cost=energy_cost,
|
||||
quality_score=quality_score
|
||||
)
|
||||
|
||||
update_result = self.tracker.record_generation(result)
|
||||
|
||||
# 如果成功,触发重分配
|
||||
if success:
|
||||
self.redistributor.redistribute(self.current_target.function_point)
|
||||
|
||||
return update_result
|
||||
|
||||
def get_status(self) -> Dict[str, Any]:
|
||||
"""获取当前状态"""
|
||||
if not self.initialized:
|
||||
return {'initialized': False}
|
||||
|
||||
active_count = sum(1 for a in self.allocations.values()
|
||||
if a.state == EnergyState.ACTIVE and a.remaining > 0)
|
||||
completed_count = sum(1 for a in self.allocations.values()
|
||||
if a.state == EnergyState.COMPLETED)
|
||||
|
||||
return {
|
||||
'initialized': True,
|
||||
'total_energy': self.total_energy,
|
||||
'total_targets': len(self.allocations),
|
||||
'active_targets': active_count,
|
||||
'completed_targets': completed_count,
|
||||
'current_target': self.current_target.function_point if self.current_target else None,
|
||||
'statistics': self.tracker.get_statistics() if self.tracker else None
|
||||
}
|
||||
|
||||
def get_target_context(self, target_name: str = None) -> str:
|
||||
"""
|
||||
获取目标功能的上下文信息(用于 Prompt)
|
||||
|
||||
Args:
|
||||
target_name: 目标名称(默认使用当前目标)
|
||||
|
||||
Returns:
|
||||
上下文字符串
|
||||
"""
|
||||
if not target_name and self.current_target:
|
||||
target_name = self.current_target.function_point
|
||||
|
||||
if not target_name or target_name not in self.allocations:
|
||||
return ""
|
||||
|
||||
alloc = self.allocations[target_name]
|
||||
|
||||
context = []
|
||||
context.append(f"[TARGET: {target_name}]")
|
||||
context.append(f"Importance: {alloc.importance:.2f}")
|
||||
context.append(f"Remaining Energy: {alloc.remaining:.1f} / {alloc.allocated:.1f}")
|
||||
context.append(f"Previous Attempts: {alloc.total_attempts}")
|
||||
|
||||
if alloc.consecutive_failures > 0:
|
||||
context.append(f"Warning: {alloc.consecutive_failures} consecutive failures")
|
||||
context.append("Consider a different approach or sequence")
|
||||
|
||||
return "\n".join(context)
|
||||
|
||||
def mark_targets_completed(self, function_names: List[str]) -> Dict[str, str]:
|
||||
"""
|
||||
将已确认覆盖的功能点直接标记为完成。
|
||||
|
||||
这用于基线同步或一次迭代中命中多个功能点的情况,
|
||||
避免仅依赖当前 target 的涨分信号来判断完成状态。
|
||||
"""
|
||||
if not self.initialized:
|
||||
return {}
|
||||
|
||||
updates = {}
|
||||
for name in function_names:
|
||||
if name not in self.allocations:
|
||||
continue
|
||||
|
||||
alloc = self.allocations[name]
|
||||
if alloc.state == EnergyState.COMPLETED:
|
||||
updates[name] = "already_completed"
|
||||
continue
|
||||
|
||||
alloc.state = EnergyState.COMPLETED
|
||||
alloc.consecutive_failures = 0
|
||||
alloc.remaining = 0.0
|
||||
updates[name] = "completed"
|
||||
self.redistributor.redistribute(name)
|
||||
|
||||
return updates
|
||||
|
||||
def generate_report(self) -> str:
|
||||
"""生成能量分配报告"""
|
||||
if not self.initialized:
|
||||
return "Energy allocator not initialized."
|
||||
|
||||
lines = []
|
||||
lines.append("=" * 60)
|
||||
lines.append("ENERGY ALLOCATION REPORT")
|
||||
lines.append("=" * 60)
|
||||
lines.append(f"Total Energy: {self.total_energy:.1f}")
|
||||
lines.append(f"Max Iterations: {self.max_iterations}")
|
||||
lines.append("")
|
||||
|
||||
lines.append("FUNCTION POINT STATUS:")
|
||||
lines.append("-" * 60)
|
||||
|
||||
for name, alloc in sorted(self.allocations.items(),
|
||||
key=lambda x: x[1].importance, reverse=True):
|
||||
status_icon = {
|
||||
EnergyState.ACTIVE: "🔄",
|
||||
EnergyState.COMPLETED: "✅",
|
||||
EnergyState.DEPLETED: "❌",
|
||||
EnergyState.SUSPENDED: "⏸️"
|
||||
}.get(alloc.state, "❓")
|
||||
|
||||
efficiency = (alloc.successful_attempts / alloc.total_attempts * 100
|
||||
if alloc.total_attempts > 0 else 0)
|
||||
|
||||
lines.append(f"{status_icon} {name}")
|
||||
lines.append(f" Importance: {alloc.importance:.2f} | "
|
||||
f"Energy: {alloc.remaining:.1f}/{alloc.allocated:.1f} | "
|
||||
f"Efficiency: {efficiency:.0f}%")
|
||||
lines.append(f" Attempts: {alloc.total_attempts} | "
|
||||
f"Consecutive Failures: {alloc.consecutive_failures}")
|
||||
|
||||
lines.append("")
|
||||
lines.append("SUMMARY:")
|
||||
lines.append("-" * 60)
|
||||
stats = self.tracker.get_statistics()
|
||||
lines.append(f"Total Attempts: {stats['total_attempts']}")
|
||||
lines.append(f"Successful: {stats['successful_attempts']}")
|
||||
lines.append(f"Success Rate: {stats['success_rate']*100:.1f}%")
|
||||
|
||||
completed = sum(1 for a in self.allocations.values()
|
||||
if a.state == EnergyState.COMPLETED)
|
||||
lines.append(f"Targets Covered: {completed} / {len(self.allocations)}")
|
||||
|
||||
lines.append("=" * 60)
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 便捷函数
|
||||
# ============================================================================
|
||||
|
||||
def create_energy_allocator(function_points: List[Dict],
|
||||
max_iterations: int = 5) -> EnergyAllocator:
|
||||
"""
|
||||
便捷函数:创建并初始化能量分配器
|
||||
|
||||
Args:
|
||||
function_points: 功能点列表
|
||||
max_iterations: 最大迭代次数
|
||||
|
||||
Returns:
|
||||
初始化完成的能量分配器
|
||||
"""
|
||||
allocator = EnergyAllocator(max_iterations=max_iterations)
|
||||
allocator.initialize(function_points)
|
||||
return allocator
|
||||
1039
autoline/quality_evaluator.py
Normal file
1039
autoline/quality_evaluator.py
Normal file
File diff suppressed because it is too large
Load Diff
1029
autoline/semantic_analyzer.py
Normal file
1029
autoline/semantic_analyzer.py
Normal file
File diff suppressed because it is too large
Load Diff
580
autoline/test_history.py
Normal file
580
autoline/test_history.py
Normal file
@@ -0,0 +1,580 @@
|
||||
"""
|
||||
Description : Test History Manager (Layer 1 Support Module)
|
||||
- Store and manage test case history
|
||||
- Support sequence pattern analysis
|
||||
- Provide diversity statistics
|
||||
Author : CGA Enhancement Project
|
||||
Time : 2026/03/16
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
from typing import List, Dict, Optional, Any, Tuple, Set
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from datetime import datetime
|
||||
from collections import defaultdict
|
||||
import hashlib
|
||||
import re
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 数据结构定义
|
||||
# ============================================================================
|
||||
|
||||
@dataclass
|
||||
class InputSequence:
|
||||
"""
|
||||
输入序列记录
|
||||
|
||||
Attributes:
|
||||
signal_name: 信号名称
|
||||
values: 赋值序列 [(time, value), ...]
|
||||
"""
|
||||
signal_name: str
|
||||
values: List[Tuple[int, Any]] = field(default_factory=list)
|
||||
|
||||
def to_pattern_string(self) -> str:
|
||||
"""转换为模式字符串(仅包含值)"""
|
||||
return "->".join(str(v[1]) for v in self.values)
|
||||
|
||||
def get_hash(self) -> str:
|
||||
"""获取序列哈希值"""
|
||||
return hashlib.md5(self.to_pattern_string().encode()).hexdigest()[:8]
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestRecord:
|
||||
"""
|
||||
测试用例记录
|
||||
|
||||
Attributes:
|
||||
test_id: 测试ID
|
||||
code: 生成的测试代码
|
||||
input_sequences: 输入信号序列列表
|
||||
target_function: 目标功能点
|
||||
covered_lines: 覆盖的代码行
|
||||
covered_functions: 覆盖的功能点
|
||||
coverage_score: 覆盖率分数
|
||||
diversity_scores: 多样性得分字典
|
||||
iteration: 迭代次数
|
||||
timestamp: 时间戳
|
||||
success: 是否成功
|
||||
"""
|
||||
test_id: str
|
||||
code: str = ""
|
||||
input_sequences: List[InputSequence] = field(default_factory=list)
|
||||
target_function: str = ""
|
||||
covered_lines: List[int] = field(default_factory=list)
|
||||
covered_functions: List[str] = field(default_factory=list)
|
||||
coverage_score: float = 0.0
|
||||
diversity_scores: Dict[str, float] = field(default_factory=dict)
|
||||
iteration: int = 0
|
||||
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
|
||||
success: bool = False
|
||||
|
||||
def get_sequence_patterns(self) -> Dict[str, str]:
|
||||
"""获取所有输入序列的模式"""
|
||||
return {seq.signal_name: seq.to_pattern_string() for seq in self.input_sequences}
|
||||
|
||||
|
||||
@dataclass
|
||||
class SequencePattern:
|
||||
"""
|
||||
序列模式统计
|
||||
|
||||
Attributes:
|
||||
pattern: 模式字符串
|
||||
count: 出现次数
|
||||
signal_name: 所属信号
|
||||
test_ids: 关联的测试ID列表
|
||||
"""
|
||||
pattern: str
|
||||
count: int = 0
|
||||
signal_name: str = ""
|
||||
test_ids: List[str] = field(default_factory=list)
|
||||
|
||||
def is_overused(self, threshold: int = 3) -> bool:
|
||||
"""判断是否过度使用"""
|
||||
return self.count >= threshold
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 序列提取器
|
||||
# ============================================================================
|
||||
|
||||
class SequenceExtractor:
|
||||
"""
|
||||
从测试代码中提取输入序列
|
||||
|
||||
解析Verilog测试代码,提取信号赋值序列
|
||||
"""
|
||||
|
||||
# 匹配信号赋值语句
|
||||
ASSIGNMENT_PATTERNS = [
|
||||
# 阻塞赋值: signal = value;
|
||||
r'(\w+)\s*=\s*([0-9]+\'[bdh][0-9a-fA-FxXzZ_]+|\d+|x|z)\s*;',
|
||||
# 非阻塞赋值: signal <= value;
|
||||
r'(\w+)\s*<=\s*([0-9]+\'[bdh][0-9a-fA-FxXzZ_]+|\d+|x|z)\s*;',
|
||||
# 简单赋值(无位宽)
|
||||
r'(\w+)\s*=\s*(\d+)\s*;',
|
||||
]
|
||||
|
||||
# 匹配延时
|
||||
DELAY_PATTERN = r'#\s*(\d+)\s*;'
|
||||
|
||||
# 匹配时钟周期等待
|
||||
CLOCK_WAIT_PATTERN = r'repeat\s*\(\s*(\d+)\s*\)\s*@\s*\(\s*posedge\s+(\w+)\s*\)'
|
||||
|
||||
def __init__(self):
|
||||
self.known_signals: Set[str] = set()
|
||||
|
||||
def set_known_signals(self, signals: List[str]):
|
||||
"""设置已知信号列表(用于过滤)"""
|
||||
self.known_signals = set(signals)
|
||||
|
||||
def extract(self, code: str) -> List[InputSequence]:
|
||||
"""
|
||||
从代码中提取输入序列
|
||||
|
||||
Args:
|
||||
code: Verilog测试代码
|
||||
|
||||
Returns:
|
||||
输入序列列表
|
||||
"""
|
||||
sequences = {}
|
||||
current_time = 0
|
||||
|
||||
# 按行处理代码
|
||||
lines = code.split('\n')
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
|
||||
# 跳过注释和空行
|
||||
if not line or line.startswith('//'):
|
||||
continue
|
||||
|
||||
# 检测延时,更新时间
|
||||
delay_match = re.search(self.DELAY_PATTERN, line)
|
||||
if delay_match:
|
||||
current_time += int(delay_match.group(1))
|
||||
continue
|
||||
|
||||
# 检测时钟周期等待
|
||||
clock_match = re.search(self.CLOCK_WAIT_PATTERN, line, re.IGNORECASE)
|
||||
if clock_match:
|
||||
cycles = int(clock_match.group(1))
|
||||
current_time += cycles * 10 # 假设每周期10时间单位
|
||||
continue
|
||||
|
||||
# 检测赋值语句
|
||||
for pattern in self.ASSIGNMENT_PATTERNS:
|
||||
matches = re.finditer(pattern, line, re.IGNORECASE)
|
||||
for match in matches:
|
||||
signal = match.group(1)
|
||||
value = match.group(2)
|
||||
|
||||
# 过滤非目标信号
|
||||
if self.known_signals and signal not in self.known_signals:
|
||||
continue
|
||||
|
||||
# 跳过明显的非输入信号
|
||||
if signal.lower() in ['i', 'j', 'k', 'cnt', 'count', 'temp']:
|
||||
continue
|
||||
|
||||
if signal not in sequences:
|
||||
sequences[signal] = InputSequence(signal_name=signal)
|
||||
|
||||
sequences[signal].values.append((current_time, value))
|
||||
current_time += 1 # 赋值语句本身占用1时间单位
|
||||
|
||||
return list(sequences.values())
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 测试历史管理器
|
||||
# ============================================================================
|
||||
|
||||
class TestHistoryManager:
|
||||
"""
|
||||
测试历史管理器
|
||||
|
||||
管理已生成测试用例的历史记录,支持:
|
||||
- 测试用例存储和检索
|
||||
- 序列模式统计分析
|
||||
- 多样性分布统计
|
||||
"""
|
||||
|
||||
def __init__(self, history_file: str = None):
|
||||
"""
|
||||
Args:
|
||||
history_file: 历史记录文件路径(可选)
|
||||
"""
|
||||
|
||||
#必须先保存 history_file,否则 save() 方法无法找到文件路径
|
||||
self.history_file = history_file
|
||||
|
||||
self.records: List[TestRecord] = []
|
||||
self.patterns: Dict[str, SequencePattern] = {} # pattern_hash -> SequencePattern
|
||||
self.signal_patterns: Dict[str, List[str]] = defaultdict(list) # signal_name -> [pattern_hashes]
|
||||
self.sequence_extractor = SequenceExtractor()
|
||||
|
||||
# 统计信息
|
||||
self.stats = {
|
||||
'total_tests': 0,
|
||||
'successful_tests': 0,
|
||||
'total_coverage': 0.0,
|
||||
'avg_diversity': 0.0
|
||||
}
|
||||
|
||||
if history_file and os.path.exists(history_file):
|
||||
self.load(history_file)
|
||||
|
||||
# ==================== 记录管理 ====================
|
||||
|
||||
def add_record(self,
|
||||
code: str,
|
||||
test_id: str = None,
|
||||
target_function: str = "",
|
||||
covered_lines: List[int] = None,
|
||||
covered_functions: List[str] = None,
|
||||
coverage_score: float = 0.0,
|
||||
iteration: int = 0,
|
||||
success: bool = False,
|
||||
known_signals: List[str] = None) -> TestRecord:
|
||||
"""
|
||||
添加测试记录
|
||||
|
||||
Args:
|
||||
code: 测试代码
|
||||
test_id: 测试ID(自动生成如果未提供)
|
||||
target_function: 目标功能点
|
||||
covered_lines: 覆盖的代码行
|
||||
covered_functions: 覆盖的功能点
|
||||
coverage_score: 覆盖率分数
|
||||
iteration: 迭代次数
|
||||
success: 是否成功
|
||||
known_signals: 已知信号列表
|
||||
|
||||
Returns:
|
||||
创建的测试记录
|
||||
"""
|
||||
if test_id is None:
|
||||
test_id = f"test_{len(self.records)}_{datetime.now().strftime('%H%M%S')}"
|
||||
|
||||
# 提取输入序列
|
||||
if known_signals:
|
||||
self.sequence_extractor.set_known_signals(known_signals)
|
||||
input_sequences = self.sequence_extractor.extract(code)
|
||||
|
||||
# 创建记录
|
||||
record = TestRecord(
|
||||
test_id=test_id,
|
||||
code=code,
|
||||
input_sequences=input_sequences,
|
||||
target_function=target_function,
|
||||
covered_lines=covered_lines or [],
|
||||
covered_functions=covered_functions or [],
|
||||
coverage_score=coverage_score,
|
||||
iteration=iteration,
|
||||
success=success
|
||||
)
|
||||
|
||||
self.records.append(record)
|
||||
|
||||
# 更新模式统计
|
||||
self._update_patterns(record)
|
||||
|
||||
# 更新统计信息
|
||||
self._update_stats()
|
||||
|
||||
logger.debug(f"Added test record: {test_id}, sequences: {len(input_sequences)}")
|
||||
|
||||
return record
|
||||
|
||||
def get_record(self, test_id: str) -> Optional[TestRecord]:
|
||||
"""根据ID获取记录"""
|
||||
for record in self.records:
|
||||
if record.test_id == test_id:
|
||||
return record
|
||||
return None
|
||||
|
||||
def get_recent_records(self, n: int = 10) -> List[TestRecord]:
|
||||
"""获取最近的N条记录"""
|
||||
return self.records[-n:] if len(self.records) >= n else self.records
|
||||
|
||||
def get_successful_records(self) -> List[TestRecord]:
|
||||
"""获取所有成功的记录"""
|
||||
return [r for r in self.records if r.success]
|
||||
|
||||
# ==================== 模式分析 ====================
|
||||
|
||||
def _update_patterns(self, record: TestRecord):
|
||||
"""更新序列模式统计"""
|
||||
for seq in record.input_sequences:
|
||||
pattern_str = seq.to_pattern_string()
|
||||
pattern_hash = seq.get_hash()
|
||||
|
||||
if pattern_hash not in self.patterns:
|
||||
self.patterns[pattern_hash] = SequencePattern(
|
||||
pattern=pattern_str,
|
||||
count=1,
|
||||
signal_name=seq.signal_name,
|
||||
test_ids=[record.test_id]
|
||||
)
|
||||
else:
|
||||
self.patterns[pattern_hash].count += 1
|
||||
self.patterns[pattern_hash].test_ids.append(record.test_id)
|
||||
|
||||
# 按信号索引
|
||||
if pattern_hash not in self.signal_patterns[seq.signal_name]:
|
||||
self.signal_patterns[seq.signal_name].append(pattern_hash)
|
||||
|
||||
def get_overused_patterns(self, threshold: int = 3) -> List[SequencePattern]:
|
||||
"""
|
||||
获取过度使用的模式
|
||||
|
||||
Args:
|
||||
threshold: 过度使用阈值
|
||||
|
||||
Returns:
|
||||
过度使用的模式列表
|
||||
"""
|
||||
return [p for p in self.patterns.values() if p.is_overused(threshold)]
|
||||
|
||||
def get_common_patterns(self, top_n: int = 5) -> List[Tuple[str, int]]:
|
||||
"""
|
||||
获取最常见的模式
|
||||
|
||||
Args:
|
||||
top_n: 返回数量
|
||||
|
||||
Returns:
|
||||
[(pattern, count), ...]
|
||||
"""
|
||||
sorted_patterns = sorted(
|
||||
self.patterns.items(),
|
||||
key=lambda x: x[1].count,
|
||||
reverse=True
|
||||
)
|
||||
return [(p[1].pattern, p[1].count) for p in sorted_patterns[:top_n]]
|
||||
|
||||
def get_pattern_for_signal(self, signal_name: str) -> List[SequencePattern]:
|
||||
"""获取特定信号的所有模式"""
|
||||
pattern_hashes = self.signal_patterns.get(signal_name, [])
|
||||
return [self.patterns[h] for h in pattern_hashes if h in self.patterns]
|
||||
|
||||
# ==================== 多样性分析 ====================
|
||||
|
||||
def calculate_sequence_diversity(self, new_sequences: List[InputSequence]) -> float:
|
||||
"""
|
||||
计算新序列与历史记录的多样性得分
|
||||
|
||||
Args:
|
||||
new_sequences: 新的输入序列列表
|
||||
|
||||
Returns:
|
||||
多样性得分 (0.0 - 1.0)
|
||||
"""
|
||||
if not self.records:
|
||||
return 1.0 # 没有历史记录时,认为完全多样
|
||||
|
||||
if not new_sequences:
|
||||
return 0.0 # 没有序列时,多样性为0
|
||||
|
||||
# 检查模式重复度
|
||||
new_patterns = {seq.get_hash() for seq in new_sequences}
|
||||
total_patterns = len(new_patterns)
|
||||
|
||||
if total_patterns == 0:
|
||||
return 0.0
|
||||
|
||||
# 计算新模式比例
|
||||
new_pattern_count = sum(1 for h in new_patterns if h not in self.patterns)
|
||||
pattern_diversity = new_pattern_count / total_patterns
|
||||
|
||||
return pattern_diversity
|
||||
|
||||
def calculate_edit_distance_diversity(self, new_code: str) -> float:
|
||||
"""
|
||||
基于编辑距离计算多样性
|
||||
|
||||
使用简化的编辑距离计算
|
||||
"""
|
||||
if not self.records:
|
||||
return 1.0
|
||||
|
||||
# 获取最近的记录作为参考
|
||||
recent_records = self.get_recent_records(5)
|
||||
|
||||
min_distance = float('inf')
|
||||
for record in recent_records:
|
||||
distance = self._levenshtein_distance(new_code, record.code)
|
||||
min_distance = min(min_distance, distance)
|
||||
|
||||
# 归一化到 [0, 1]
|
||||
max_len = max(len(new_code), max(len(r.code) for r in recent_records))
|
||||
if max_len == 0:
|
||||
return 0.0
|
||||
|
||||
return min_distance / max_len
|
||||
|
||||
def _levenshtein_distance(self, s1: str, s2: str) -> int:
|
||||
"""计算Levenshtein编辑距离(简化版)"""
|
||||
if len(s1) < len(s2):
|
||||
return self._levenshtein_distance(s2, s1)
|
||||
|
||||
if len(s2) == 0:
|
||||
return len(s1)
|
||||
|
||||
# 使用简化的计算(抽样)
|
||||
if len(s1) > 500:
|
||||
s1 = s1[:500]
|
||||
if len(s2) > 500:
|
||||
s2 = s2[:500]
|
||||
|
||||
previous_row = range(len(s2) + 1)
|
||||
for i, c1 in enumerate(s1):
|
||||
current_row = [i + 1]
|
||||
for j, c2 in enumerate(s2):
|
||||
insertions = previous_row[j + 1] + 1
|
||||
deletions = current_row[j] + 1
|
||||
substitutions = previous_row[j] + (c1 != c2)
|
||||
current_row.append(min(insertions, deletions, substitutions))
|
||||
previous_row = current_row
|
||||
|
||||
return previous_row[-1]
|
||||
|
||||
# ==================== 统计信息 ====================
|
||||
|
||||
def _update_stats(self):
|
||||
"""更新统计信息"""
|
||||
self.stats['total_tests'] = len(self.records)
|
||||
self.stats['successful_tests'] = sum(1 for r in self.records if r.success)
|
||||
|
||||
if self.records:
|
||||
self.stats['total_coverage'] = sum(r.coverage_score for r in self.records)
|
||||
self.stats['avg_coverage'] = self.stats['total_coverage'] / len(self.records)
|
||||
|
||||
def get_statistics(self) -> Dict[str, Any]:
|
||||
"""获取统计信息"""
|
||||
return {
|
||||
**self.stats,
|
||||
'total_patterns': len(self.patterns),
|
||||
'overused_patterns': len(self.get_overused_patterns()),
|
||||
'unique_signals': len(self.signal_patterns)
|
||||
}
|
||||
|
||||
def get_diversity_report(self) -> str:
|
||||
"""生成多样性报告"""
|
||||
lines = []
|
||||
lines.append("=" * 50)
|
||||
lines.append("TEST HISTORY DIVERSITY REPORT")
|
||||
lines.append("=" * 50)
|
||||
lines.append(f"Total Tests: {self.stats['total_tests']}")
|
||||
lines.append(f"Successful Tests: {self.stats['successful_tests']}")
|
||||
lines.append(f"Total Patterns: {len(self.patterns)}")
|
||||
lines.append("")
|
||||
|
||||
# 常见模式
|
||||
lines.append("TOP 5 COMMON PATTERNS:")
|
||||
common = self.get_common_patterns(5)
|
||||
for i, (pattern, count) in enumerate(common, 1):
|
||||
lines.append(f" {i}. {pattern[:40]}... (x{count})")
|
||||
|
||||
# 过度使用的模式
|
||||
overused = self.get_overused_patterns()
|
||||
if overused:
|
||||
lines.append("")
|
||||
lines.append("OVERUSED PATTERNS (need diversification):")
|
||||
for p in overused[:5]:
|
||||
lines.append(f" - {p.signal_name}: {p.pattern[:30]}... (used {p.count} times)")
|
||||
|
||||
lines.append("=" * 50)
|
||||
return "\n".join(lines)
|
||||
|
||||
# ==================== 持久化 ====================
|
||||
|
||||
def save(self, filepath: str = None):
|
||||
"""保存历史记录到文件"""
|
||||
filepath = filepath or self.history_file
|
||||
if not filepath:
|
||||
return
|
||||
|
||||
# 手动构建可序列化的数据结构
|
||||
records_data = []
|
||||
for r in self.records:
|
||||
record_dict = {
|
||||
'test_id': r.test_id,
|
||||
'code': r.code,
|
||||
'input_sequences': [],
|
||||
'target_function': r.target_function,
|
||||
'covered_lines': r.covered_lines,
|
||||
'covered_functions': r.covered_functions,
|
||||
'coverage_score': r.coverage_score,
|
||||
'diversity_scores': r.diversity_scores,
|
||||
'iteration': r.iteration,
|
||||
'timestamp': r.timestamp,
|
||||
'success': r.success
|
||||
}
|
||||
# 手动转换 InputSequence 对象
|
||||
for seq in r.input_sequences:
|
||||
record_dict['input_sequences'].append({
|
||||
'signal_name': seq.signal_name,
|
||||
'values': seq.values
|
||||
})
|
||||
records_data.append(record_dict)
|
||||
|
||||
data = {
|
||||
'records': records_data,
|
||||
'stats': self.stats
|
||||
}
|
||||
|
||||
with open(filepath, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, indent=2, ensure_ascii=False)
|
||||
|
||||
logger.info(f"Test history saved to {filepath}")
|
||||
|
||||
def load(self, filepath: str):
|
||||
"""从文件加载历史记录"""
|
||||
if not os.path.exists(filepath):
|
||||
return
|
||||
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
|
||||
self.records = []
|
||||
for r in data.get('records', []):
|
||||
sequences = [
|
||||
InputSequence(**s) for s in r.get('input_sequences', [])
|
||||
]
|
||||
record = TestRecord(
|
||||
test_id=r['test_id'],
|
||||
code=r['code'],
|
||||
input_sequences=sequences,
|
||||
target_function=r.get('target_function', ''),
|
||||
covered_lines=r.get('covered_lines', []),
|
||||
covered_functions=r.get('covered_functions', []),
|
||||
coverage_score=r.get('coverage_score', 0.0),
|
||||
iteration=r.get('iteration', 0),
|
||||
timestamp=r.get('timestamp', ''),
|
||||
success=r.get('success', False)
|
||||
)
|
||||
self.records.append(record)
|
||||
self._update_patterns(record)
|
||||
|
||||
self.stats = data.get('stats', self.stats)
|
||||
logger.info(f"Loaded {len(self.records)} test records from {filepath}")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 便捷函数
|
||||
# ============================================================================
|
||||
|
||||
def create_test_history(history_file: str = None) -> TestHistoryManager:
|
||||
"""创建测试历史管理器"""
|
||||
return TestHistoryManager(history_file=history_file)
|
||||
8
config/__init__.py
Normal file
8
config/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""
|
||||
Description : description
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/7/3 18:17:10
|
||||
LastEdited : 2024/7/3 23:25:11
|
||||
"""
|
||||
|
||||
from .config import *
|
||||
BIN
config/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
config/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
BIN
config/__pycache__/config.cpython-312.pyc
Normal file
BIN
config/__pycache__/config.cpython-312.pyc
Normal file
Binary file not shown.
330
config/config.py
Normal file
330
config/config.py
Normal file
@@ -0,0 +1,330 @@
|
||||
"""
|
||||
Description : This is the config module of the project. This file is copied and modified from OplixNet project.
|
||||
Author : Ruidi Qiu (ruidi.qiu@tum.de)
|
||||
Time : 2023/11/28 11:20:33
|
||||
LastEdited : 2024/9/16 09:39:02
|
||||
"""
|
||||
from datetime import datetime
|
||||
import yaml
|
||||
import json
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import utils.utils as utils
|
||||
sys.path.append("..")
|
||||
|
||||
CONFIG_FORMAT = "yaml" # "yaml" or "json"
|
||||
|
||||
# path:
|
||||
DIR_PATH = 'config' # to ONN
|
||||
CFG_DEF_NAME = 'default' + "." + CONFIG_FORMAT
|
||||
CFG_DEF_PATH = '%s/%s' % (DIR_PATH, CFG_DEF_NAME)
|
||||
CFG_CUS_NAME = 'custom' + "." + CONFIG_FORMAT
|
||||
CFG_CUS_PATH = '%s/%s' % (DIR_PATH, CFG_CUS_NAME)
|
||||
|
||||
# discarded on 2024/2/1 23:16:25
|
||||
# GPT_MODELS = {
|
||||
# "3.5" : "gpt-3.5-turbo-1106",
|
||||
# "4" : "gpt-4-1106-preview"
|
||||
# }
|
||||
|
||||
GPT_MODELS = {
|
||||
"4o" : "gpt-4o-2024-08-06",
|
||||
"4omini" : "gpt-4o-mini-2024-07-18",
|
||||
"4t" : "gpt-4-turbo-2024-04-09",
|
||||
"3.5" : "gpt-3.5-turbo-0125",
|
||||
"4" : "gpt-4-0125-preview",
|
||||
"3.5old" : "gpt-3.5-turbo-1106",
|
||||
"4old" : "gpt-4-1106-preview",
|
||||
"o1": "o1-preview-2024-09-12",
|
||||
"o1mini": "o1-mini-2024-09-12"
|
||||
}
|
||||
|
||||
CLAUDE_MODELS = {
|
||||
"sonnet3.5": "claude-3-5-sonnet-20240620",
|
||||
"3.5sonnet": "claude-3-5-sonnet-20240620",
|
||||
"claude3.5sonnet": "claude-3-5-sonnet-20240620",
|
||||
"claude3.5": "claude-3-5-sonnet-20240620",
|
||||
|
||||
"opus":"claude-3-opus-20240229",
|
||||
"sonnet": "claude-3-sonnet-20240229",
|
||||
"haiku": "claude-3-haiku-20240307",
|
||||
"claude3_opus":"claude-3-opus-20240229",
|
||||
"claude3_sonnet": "claude-3-sonnet-20240229",
|
||||
"claude3_haiku": "claude-3-haiku-20240307",
|
||||
|
||||
"claude2.1": "claude-2.1",
|
||||
"claude2.0": "claude-2.0",
|
||||
"claude2": "claude-2.0"
|
||||
}
|
||||
|
||||
LLM_MODEL_REDIRECTION = {
|
||||
'4omini' : GPT_MODELS["4omini"],
|
||||
'4o' : GPT_MODELS["4o"],
|
||||
'4t' : GPT_MODELS["4t"],
|
||||
'3.5' : GPT_MODELS["3.5"],
|
||||
3.5 : GPT_MODELS["3.5"],
|
||||
'4' : GPT_MODELS["4"],
|
||||
'4.0' : GPT_MODELS["4"],
|
||||
4 : GPT_MODELS["4"],
|
||||
"3.5old" : GPT_MODELS["3.5old"],
|
||||
"4old" : GPT_MODELS["4old"],
|
||||
"o1": GPT_MODELS["o1"],
|
||||
"o1mini": GPT_MODELS["o1mini"]
|
||||
}
|
||||
|
||||
LLM_MODEL_REDIRECTION = {**LLM_MODEL_REDIRECTION, **CLAUDE_MODELS}
|
||||
WEEKRANGEMARK = "$weekrange$"
|
||||
|
||||
######################################## utils ###########################################
|
||||
def str2bool(v):
|
||||
if isinstance(v, bool):
|
||||
return v
|
||||
if v.lower() in ('yes', 'true', 't', 'y', '1'):
|
||||
return True
|
||||
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
|
||||
return False
|
||||
else:
|
||||
raise TypeError('Boolean value expected.')
|
||||
|
||||
def get_time():
|
||||
now = datetime.now()
|
||||
time_str = now.strftime("%Y%m%d_%H%M%S")
|
||||
return time_str
|
||||
|
||||
def get_runinfo():
|
||||
hostname = socket.gethostname(),
|
||||
pid = os.getpid(),
|
||||
return hostname[0], pid[0]
|
||||
|
||||
################################ agparser (for future) ###################################
|
||||
################################## load .yaml/.json config #####################################
|
||||
def load_yaml_dict(path: str):
|
||||
with open(path, 'rb') as f:
|
||||
yaml_dict = yaml.safe_load(f)
|
||||
return yaml_dict
|
||||
|
||||
def load_json_dict(path: str):
|
||||
with open(path, 'r') as f:
|
||||
json_dict = json.load(f)
|
||||
return json_dict
|
||||
|
||||
def merge_config_dict(old_dict: dict, new_dict):
|
||||
merge_dict = {}
|
||||
if new_dict is None:
|
||||
return old_dict
|
||||
keys_old = old_dict.keys()
|
||||
keys_new = new_dict.keys()
|
||||
# we ignore the case when a key exists in new_dict but not in old_dict, because that is forbidden in my design
|
||||
# but raising an error is still needed
|
||||
for key in keys_new:
|
||||
if key not in keys_old:
|
||||
raise Exception("%s is in custom_config but not in default_config, which is forbidden. Please modify related tree structure or add it to %s"%(key, CFG_DEF_PATH))
|
||||
for key in keys_old:
|
||||
if key not in keys_new:
|
||||
merge_dict[key] = old_dict[key]
|
||||
else:
|
||||
if isinstance(old_dict[key], dict): # if the value is a dict
|
||||
if not isinstance(new_dict[key], dict):
|
||||
raise TypeError("value of %s isn't a dict in custom_config but is a dict in default_config"%(key))
|
||||
else:
|
||||
merge_dict[key] = merge_config_dict(old_dict[key], new_dict[key])
|
||||
else: #if the value is not a dict:
|
||||
if isinstance(new_dict[key], dict):
|
||||
raise TypeError("value of %s is a dict in custom_config but isn't a dict in default_config"%(key))
|
||||
else:
|
||||
if new_dict[key] is None:
|
||||
merge_dict[key] = old_dict[key]
|
||||
else:
|
||||
merge_dict[key] = new_dict[key]
|
||||
return merge_dict
|
||||
|
||||
def load_config_dict(mode='merge', config_old_path = CFG_DEF_PATH, config_new_path = CFG_CUS_PATH, config_format = CONFIG_FORMAT):
|
||||
if config_format == "yaml":
|
||||
load_config_func = load_yaml_dict
|
||||
elif config_format == "json":
|
||||
load_config_func = load_json_dict
|
||||
else:
|
||||
raise Exception("wrong config format input: %s (can only be yaml or json)"%(config_format))
|
||||
config_old = load_config_func(config_old_path)
|
||||
config_new = load_config_func(config_new_path)
|
||||
if mode == "merge":
|
||||
return merge_config_dict(config_old, config_new)
|
||||
elif mode == "split":
|
||||
return config_new, config_old
|
||||
else:
|
||||
raise Exception("wrong mode input: %s"%(mode))
|
||||
|
||||
################################### dict to object ######################################
|
||||
class Dict(dict):
|
||||
"""a class generated from python dict class"""
|
||||
__setattr__ = dict.__setitem__
|
||||
__getattr__ = dict.__getitem__
|
||||
|
||||
def get_copy(self):
|
||||
internal_dict = DictTodict(self)
|
||||
return dictToObj(internal_dict)
|
||||
|
||||
def get_item(self, *key_iters, default=None):
|
||||
internal_dict = DictTodict(self)
|
||||
mid_val = internal_dict
|
||||
for key in key_iters:
|
||||
if key not in mid_val:
|
||||
return default
|
||||
mid_val = mid_val[key]
|
||||
return mid_val
|
||||
|
||||
def dictToObj(dictObj):
|
||||
if not isinstance(dictObj, dict):
|
||||
raise TypeError("this variable is not a instance of 'dict' type")
|
||||
d = Dict()
|
||||
for k, v in dictObj.items():
|
||||
d[k] = recur_dictToObj(v)
|
||||
return d
|
||||
|
||||
def recur_dictToObj(dictObj):
|
||||
if not isinstance(dictObj, dict):
|
||||
return dictObj
|
||||
d = Dict()
|
||||
for k, v in dictObj.items():
|
||||
d[k] = recur_dictToObj(v)
|
||||
return d
|
||||
|
||||
def DictTodict(Dict_obj):
|
||||
if not isinstance(Dict_obj, Dict):
|
||||
raise TypeError("this variable is not a instance of 'Dict' type")
|
||||
d = dict()
|
||||
# recursive the Dict attr
|
||||
for k, v in Dict_obj.items():
|
||||
d[k] = recur_DictTodict(v)
|
||||
return d
|
||||
|
||||
def recur_DictTodict(Dict_obj):
|
||||
if not isinstance(Dict_obj, Dict):
|
||||
return Dict_obj
|
||||
d = dict()
|
||||
for k, v in Dict_obj.items():
|
||||
d[k] = recur_DictTodict(v)
|
||||
return d
|
||||
|
||||
def load_config_obj(custom_config_path):
|
||||
return dictToObj(load_config_dict(config_new_path = custom_config_path))
|
||||
|
||||
################################# config validation #####################################
|
||||
def config_val(config_obj):
|
||||
# run_mode_val = ['normal', 'postproc', 'para', 'sensprune', 'custom']
|
||||
# cvnn_dinmode_val = [] # has val func in ComplexNN
|
||||
# assert config_obj.run.mode in run_mode_val, "There is no run mode named %s, only names in %s are valid"%(config_obj.run.mode, str_list(run_mode_val))
|
||||
return True
|
||||
|
||||
####################################### get config ######################################
|
||||
class Config(Dict):
|
||||
# singleton pattern
|
||||
_instance = None
|
||||
_initialized = False
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if not cls._instance:
|
||||
cls._instance = super(Config, cls).__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self, custom_config_path=None, *args, **kwargs):
|
||||
if not self._initialized:
|
||||
super(Config, self).__init__(*args, **kwargs)
|
||||
if custom_config_path is not None:
|
||||
self.load_config(custom_config_path)
|
||||
self._initialized = True
|
||||
|
||||
def load_config(self, custom_config_path):
|
||||
time = get_time()
|
||||
hostname, pid = get_runinfo()
|
||||
config = load_config_obj(custom_config_path)
|
||||
config.run.custom_path = custom_config_path
|
||||
config.run.time = time
|
||||
config.run.hostname = hostname
|
||||
config.run.pid = pid
|
||||
config.run.pyversion = sys.version
|
||||
current_week = utils.get_week_range()
|
||||
config.save.pub.dir = config.save.pub.dir.replace(WEEKRANGEMARK, current_week)
|
||||
if not config.save.en:
|
||||
# config.save.log.en = False
|
||||
# config.save.message.en = False
|
||||
# iterate the attr of config.save, if they have en attr, set it to False
|
||||
for attr in DictTodict(config.save).keys():
|
||||
sub_config = getattr(config.save, attr)
|
||||
if isinstance(sub_config, Dict):
|
||||
try: # will raise keyerror if sub_config doesn't have en attr (it will only return false when come across AttributeError, but Dict doesn't have en attr, so it will raise KeyError)
|
||||
hasattr(sub_config, "en")
|
||||
except:
|
||||
continue
|
||||
setattr(sub_config, "en", False)
|
||||
if config.gpt.model in LLM_MODEL_REDIRECTION.keys():
|
||||
config.gpt.model = LLM_MODEL_REDIRECTION[config.gpt.model]
|
||||
if config.gpt.rtlgen_model is None:
|
||||
config.gpt.rtlgen_model = config.gpt.model
|
||||
else:
|
||||
if config.gpt.rtlgen_model in LLM_MODEL_REDIRECTION.keys():
|
||||
config.gpt.rtlgen_model = LLM_MODEL_REDIRECTION[config.gpt.rtlgen_model]
|
||||
# config_val(config)
|
||||
self.update(config)
|
||||
|
||||
# def load_config(custom_config_path)->Dict: #str = CFG_CUS_PATH):
|
||||
# time = get_time()
|
||||
# hostname, pid = get_runinfo()
|
||||
# config = load_config_obj(custom_config_path)
|
||||
# config.run.custom_path = custom_config_path
|
||||
# config.run.time = time
|
||||
# config.run.hostname = hostname
|
||||
# config.run.pid = pid
|
||||
# if not config.save.en:
|
||||
# # config.save.log.en = False
|
||||
# # config.save.message.en = False
|
||||
# # iterate the attr of config.save, if they have en attr, set it to False
|
||||
# for attr in DictTodict(config.save).keys():
|
||||
# sub_config = getattr(config.save, attr)
|
||||
# if isinstance(sub_config, Dict):
|
||||
# try: # will raise keyerror if sub_config doesn't have en attr (it will only return false when come across AttributeError, but Dict doesn't have en attr, so it will raise KeyError)
|
||||
# hasattr(sub_config, "en")
|
||||
# except:
|
||||
# continue
|
||||
# setattr(sub_config, "en", False)
|
||||
# if config.gpt.model in LLM_MODEL_REDIRECTION.keys():
|
||||
# config.gpt.model = LLM_MODEL_REDIRECTION[config.gpt.model]
|
||||
# # config_val(config)
|
||||
# return config
|
||||
|
||||
def load_split_config(custom_config_path = CFG_CUS_PATH, default_config_path = CFG_DEF_PATH):
|
||||
# no validation. only for printing
|
||||
# used in loader_saver.py
|
||||
(custom_cfg_dict, default_cfg_dict) = load_config_dict('split', default_config_path, custom_config_path)
|
||||
if custom_cfg_dict is None:
|
||||
# custom_config can be None
|
||||
custom_cfg = None
|
||||
else:
|
||||
custom_cfg = dictToObj(custom_cfg_dict)
|
||||
default_cfg = dictToObj(default_cfg_dict)
|
||||
return custom_cfg, default_cfg
|
||||
|
||||
def get_cfg_path_from_alias(custom_cfg_alias: str|int) -> str:
|
||||
if isinstance(custom_cfg_alias, str):
|
||||
if custom_cfg_alias.isdigit():
|
||||
custom_cfg_filename = "custom" + str(custom_cfg_alias) + "." + "yaml"
|
||||
config_path = os.path.join("config/configs", custom_cfg_filename)
|
||||
elif "/" in custom_cfg_alias and ".yaml" in custom_cfg_alias:
|
||||
config_path = custom_cfg_alias
|
||||
else:
|
||||
config_path = os.path.join("config/configs", custom_cfg_alias + "." + "yaml")
|
||||
elif isinstance(custom_cfg_alias, int):
|
||||
custom_cfg_filename = "custom" + str(custom_cfg_alias) + "." + "yaml"
|
||||
config_path = os.path.join("config/configs", custom_cfg_filename)
|
||||
else:
|
||||
raise ValueError("Invalid custom config alias: '--config %s'"%(custom_cfg_alias))
|
||||
if not os.path.exists(config_path):
|
||||
raise FileNotFoundError("alias [%s]: The custom config file %s doesn't exist"%(custom_cfg_alias, config_path))
|
||||
return config_path
|
||||
|
||||
### test ###
|
||||
if __name__ == "__main__":
|
||||
my_config = Config(CFG_CUS_PATH)
|
||||
print(my_config.get_item("run", "mode"))
|
||||
print(my_config.run.mode)
|
||||
print(my_config.get_item("run"))
|
||||
15
config/custom.yaml
Normal file
15
config/custom.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
|
||||
run:
|
||||
mode: 'autoline'
|
||||
gpt:
|
||||
model: "qwen-max"
|
||||
key_path: "config/key_API.json"
|
||||
save:
|
||||
en: True
|
||||
root: "/home/zhang/CorrectBench/TBgen_App/output/example_mul/"
|
||||
autoline:
|
||||
cga:
|
||||
enabled: True
|
||||
max_iter: 10
|
||||
promptscript: "pychecker"
|
||||
onlyrun: "TBgensimeval"
|
||||
96
config/default.yaml
Normal file
96
config/default.yaml
Normal file
@@ -0,0 +1,96 @@
|
||||
################# running #################
|
||||
run:
|
||||
version: '2.0' # last stable version of the code
|
||||
time: ~ # preserved, will be set in program
|
||||
custom_path: ~ #preserved, will be set in program
|
||||
mode: 'qwen-max' # 'chatgpt': like a chatgpt but can load prompt or previous messages. 'autoline': run pipeline of Chatbench automatically. 'iverilog': run iverilog. 'test': test mode.
|
||||
|
||||
################# saving ##################
|
||||
save:
|
||||
en: True # True -> save according to settings below; False -> do not save anything.
|
||||
root: ~ # the root saving dir ends with '/'; will be set in program.
|
||||
pub:
|
||||
prefix: # the real name of one experiment, fill it in custom_path;
|
||||
dir: 'saves/$weekrange$/' # Ended with '/'; if not empty, log/tb/data will share one dir, recommended.
|
||||
subdir: '' # the final dir of one experiment would be dir + subdir;
|
||||
|
||||
log:
|
||||
en: True
|
||||
dir: 'logs/'
|
||||
notes: ~
|
||||
cfg_pmode: iwantall # available: ['split', 'merge', 'iwantall'] # config print mode; see loader_saver.py;
|
||||
debug_en: False
|
||||
level: "TRACE"
|
||||
|
||||
message:
|
||||
en: True
|
||||
dir: 'messages/'
|
||||
format: 'json' # 'json' or 'txt'
|
||||
|
||||
iverilog:
|
||||
en: True
|
||||
subdir: 'ivcode_nodebug' # for pub mode
|
||||
|
||||
################# loading #################
|
||||
load:
|
||||
prompt:
|
||||
path: "config/initial_prompts/prompt1.txt" # Only valid when gpt.start_form = "prompt"; path to the prompt file
|
||||
pick_idx: [] # TODO: valid only when path is ended with '.json', else ignored
|
||||
|
||||
stage_template:
|
||||
path: "config/templates/stage_template0301.txt"
|
||||
|
||||
############### GPT related ###############
|
||||
gpt:
|
||||
model: "4o" # model name. Now we only use "gpt-4-1106-preview" or "gpt-3.5-turbo-1106" (20231128). "3.5" or "4" will be redirected to the above two models. see https://platform.openai.com/docs/models.
|
||||
key_path: "config/key_API.json" # path to the key file
|
||||
temperature: ~ # currently only in mode "chatgpt". not valid in mode "autoline" because it is too complicated. For chatgpt, it is default to 1.0. see https://platform.openai.com/docs/api-reference/chat/create.
|
||||
json_mode: False # if True, the output of GPT will be in json format. Don't forget to ask GPT to return the json format.
|
||||
chatgpt: # settings below are valid only at mode "chatgpt";
|
||||
start_form: "chat" # "chat": input messages by hand; "prompt": input messages from file; only valid for the first message. In the future there will be a new start_form "competion".
|
||||
# follow_form: "chat" #
|
||||
one_time_talk: False # will not continue to talk after the first message.
|
||||
rtlgen_model: ~ # model used in autoline-3-TBcheck-discriminator, if None, use the same model as gpt.model.
|
||||
|
||||
################# iverilog ################
|
||||
iverilog:
|
||||
dir: "" # valid at mode "iverilog"; path to the dir of the iverilog files
|
||||
task_id: "" # valid at mode "iverilog"; the task id of the iverilog task
|
||||
|
||||
################# autoline ################
|
||||
autoline:
|
||||
result_path: "results"
|
||||
cga:
|
||||
enabled: True
|
||||
max_iter: 10
|
||||
target_coverage: 100.0
|
||||
probset: # integret probset/mutantset/prob_except; 20240304
|
||||
path: ~ # default:"data/HDLBits/HDLBits_data.jsonl", you must enter this path in custom.yaml to avoid unconscious use.
|
||||
mutant_path: ~ # used in TB_eval2; you must enter this path in custom.yaml to avoid unconscious use.
|
||||
gptgenRTL_path: ~ # used in TB_eval2b; you must enter this path in custom.yaml to avoid unconscious use.
|
||||
more_info_paths: [] # more info added to the probset;
|
||||
only: ['lemmings3','lemmings4','ece241_2013_q8','2014_q3fsm','m2014_q6','review2015_fsm','rule110','fsm_ps2'] # only import data in the list by task_id;
|
||||
exclude: [] # excludes data in the list by task_id;
|
||||
exclude_json: ~ # excludes data in the json file by task_id;
|
||||
filter: [{}] # only imports data containing key-value pairs; example: {"circuit_type": "CMB"} means only import data with "circuit_type" = "CMB"; x
|
||||
checklist:
|
||||
max: 3
|
||||
debug:
|
||||
max: 5 # max iterations of debug; if 0, no debug.
|
||||
reboot: 1 # rerun stage 4 after every x iterations of debug. if 0, not debug but only reboot
|
||||
py_rollback: 2 # reboot both after every x iterations of py_debug; if 0, no py debug; if 1, only debug 1 time and then reboot;
|
||||
onlyrun: ~ # valid: [~, "TBgen", "TBgensim", "TBgensimeval"] ; if none, run all tasks; if not none, run only the tasks in the list.
|
||||
promptscript: ~
|
||||
timeout: 300 # timeout for run a python or iverilog code; unit: second.
|
||||
TBcheck:
|
||||
rtl_num: 20 # the number of rtls used in the TB_check; will be ignored if the llmgen_rtls are provided in probset.
|
||||
correct_max: 3
|
||||
discrim_mode: "col_full_wrong"
|
||||
correct_mode: "naive"
|
||||
rtl_compens_en: True # if True, when the half of the rtls contain syntax error, will generate more rtls to compensate.
|
||||
rtl_compens_max_iter: 3 # the max iteration of generating rtls to compensate.
|
||||
itermax: 10 # the max reboot times of the whole program; this reboot is trigered by TBcheck's next action
|
||||
update_desc: False # if True, when reboot the program, will use the updated description of the task (from TBcheck)
|
||||
save_compile: True # if True, save the compiling codes and files (codes in TBeval and TBcheck.discriminator); if False, not save.
|
||||
save_finalcodes: True # if True, save the eventually generated Testbench codes (Verilog + Python); if False, not save.
|
||||
error_interruption: False # if True, the program will stop when error occurs; Usually used in debugging.
|
||||
5
config/key_API.json
Normal file
5
config/key_API.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"OPENAI_API_KEY" : "",
|
||||
"ANTHROPIC_API_KEY" : "",
|
||||
"dashscope" : ""
|
||||
}
|
||||
0
data/__init__.py
Normal file
0
data/__init__.py
Normal file
BIN
data/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
data/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
BIN
data/__pycache__/probset.cpython-312.pyc
Normal file
BIN
data/__pycache__/probset.cpython-312.pyc
Normal file
Binary file not shown.
268
data/probset.py
Normal file
268
data/probset.py
Normal file
@@ -0,0 +1,268 @@
|
||||
"""
|
||||
Description : the class to manage the problem set data
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/3/6 14:01:22
|
||||
LastEdited : 2024/8/13 00:56:57
|
||||
"""
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
sys.path.append(".") # add the root folder to the python path
|
||||
from copy import deepcopy
|
||||
import loader_saver as ls
|
||||
from config import Config
|
||||
|
||||
def main():
|
||||
# test the class
|
||||
HDLBITS_DATA_PATH = "data/HDLBits/HDLBits_data.jsonl"
|
||||
CIRCUIT_TYPE_PATH = "data/HDLBits/HDLBits_circuit_type.jsonl"
|
||||
probset = HDLBitsProbset(HDLBITS_DATA_PATH, circuit_type_path=CIRCUIT_TYPE_PATH, only_tasks=["rule110"])
|
||||
print(probset.num)
|
||||
print(probset.data[0])
|
||||
|
||||
class dictlist:
|
||||
"""
|
||||
- a class to manage the list of dict
|
||||
- form:
|
||||
|
||||
{
|
||||
id_key: "xxx", #the key to identify the dict
|
||||
content1: xxx,
|
||||
content2: xxx,
|
||||
...
|
||||
}
|
||||
"""
|
||||
def __init__(self, id_key:str, path:str=None, moreinfo_path_list:list=[], only:list=None, exclude:list=[], filter:dict={}):
|
||||
"""
|
||||
if path is None, will not load any data
|
||||
"""
|
||||
self.id_key = id_key
|
||||
if path is not None:
|
||||
try:
|
||||
self.data = ls.load_json_lines(path)
|
||||
except:
|
||||
self.data = ls.load_json_dict(path)
|
||||
if moreinfo_path_list != []:
|
||||
try:
|
||||
moreinfo = [ls.load_json_lines(moreinfo_path) for moreinfo_path in moreinfo_path_list]
|
||||
except:
|
||||
moreinfo = [ls.load_json_dict(moreinfo_path) for moreinfo_path in moreinfo_path_list]
|
||||
for info in moreinfo:
|
||||
self.merge(info)
|
||||
self.filter(filter)
|
||||
self.del_items(only, del_by_list=False)
|
||||
self.del_items(exclude)
|
||||
else:
|
||||
self.data = []
|
||||
|
||||
@property
|
||||
def num(self):
|
||||
return len(self.data)
|
||||
|
||||
def data_clean(self, only=None, exclude=[], filter={}):
|
||||
self.del_items(only, del_by_list=False)
|
||||
self.del_items(exclude)
|
||||
self.filter(filter)
|
||||
|
||||
def find_data_by_id(self, id):
|
||||
for prob_data in self.data:
|
||||
if prob_data[self.id_key] == id:
|
||||
return prob_data
|
||||
raise ValueError("Cannot find the problem infomation with %s: "%(self.id_key) + id + ".")
|
||||
|
||||
def merge(self, additional_data):
|
||||
"""merge additional data into the original data"""
|
||||
for data in self.data:
|
||||
for add_data in additional_data:
|
||||
if data[self.id_key] == add_data[self.id_key]:
|
||||
for key, value in add_data.items():
|
||||
if key != self.id_key:
|
||||
data[key] = value
|
||||
|
||||
def filter(self, filter_dict, del_en=True):
|
||||
"""
|
||||
#### Function
|
||||
- filtering the data by the key and value.
|
||||
- only the data that has the key and value will remain
|
||||
- the output will always be the filtered data, but I recommend to directly use `self.data` to get the filtered data if del_en is True
|
||||
#### Input
|
||||
- filter_dict: dict; the key and value to filter the data
|
||||
- del_en: bool; if True, the data that doesn't have the key and value will be deleted from the data. If False, the data will not change but output the filtered data
|
||||
"""
|
||||
if del_en:
|
||||
for key, value in filter_dict.items():
|
||||
self.data = [prob_data for prob_data in self.data if prob_data.get(key) == value]
|
||||
else:
|
||||
filtered_data = deepcopy(self.data)
|
||||
for key, value in filter_dict.items():
|
||||
filtered_data = [prob_data for prob_data in filtered_data if prob_data.get(key) == value]
|
||||
return filtered_data
|
||||
|
||||
def del_items(self, id_list, del_by_list=True):
|
||||
"""
|
||||
- id_list: list of ids
|
||||
- del_by_list: bool; if True, data having the task_id in the list will be deleted. If False, the data that doesn't have the task_id in the list will be deleted
|
||||
"""
|
||||
# avoid default list = [] and del_by_list = False to del all the data
|
||||
if id_list is not None and id_list != []:
|
||||
if del_by_list:
|
||||
self.data = [prob_data for prob_data in self.data if prob_data[self.id_key] not in id_list]
|
||||
else: # del the data that doesn't have the task_id in the list
|
||||
self.data = [prob_data for prob_data in self.data if prob_data[self.id_key] in id_list]
|
||||
|
||||
class muti_dictlist:
|
||||
"""
|
||||
- mutiple dictlists, can perform the same operation on all the dictlists
|
||||
- self.dictlists: a list of dictlist
|
||||
"""
|
||||
def __init__(self, id_key:str, path_list:list=None, moreinfo_path_list:list=[], only:list=None, exclude:list=[], filter:dict={}):
|
||||
"""
|
||||
you can only determing the id_key (mostly, "task_id"); the dictlists can be added later
|
||||
"""
|
||||
self.dictlists = []
|
||||
self.id_key = id_key
|
||||
if path_list is not None:
|
||||
self.load_dictlists(id_key, path_list, moreinfo_path_list, only, exclude, filter)
|
||||
|
||||
def load_dictlists(self, id_key, path_list, moreinfo_path_list=[], only=None, exclude=[], filter={}):
|
||||
self.dictlists = [dictlist(id_key, path, moreinfo_path_list, only, exclude, filter) for path in path_list]
|
||||
|
||||
def load_dictlist(self, id_key, path, moreinfo_path_list=[], only=None, exclude=[], filter={}):
|
||||
self.dictlists.append(dictlist(id_key, path, moreinfo_path_list, only, exclude, filter))
|
||||
|
||||
def data_clean(self, only=None, exclude=[], filter={}):
|
||||
for dictlist in self.dictlists:
|
||||
dictlist.data_clean(only, exclude, filter)
|
||||
|
||||
def merge(self, additional_data):
|
||||
"""merge additional data into the original data"""
|
||||
for dictlist in self.dictlists:
|
||||
dictlist.merge(additional_data)
|
||||
|
||||
def filter(self, filter_dict, del_en=True):
|
||||
"""
|
||||
#### Function
|
||||
- filtering the data by the key and value.
|
||||
- only the data that has the key and value will remain
|
||||
- the output will always be the filtered data, but I recommend to directly use `self.data` to get the filtered data if del_en is True
|
||||
#### Input
|
||||
- filter_dict: dict; the key and value to filter the data
|
||||
- del_en: bool; if True, the data that doesn't have the key and value will be deleted from the data. If False, the data will not change but output the filtered data
|
||||
"""
|
||||
for dictlist in self.dictlists:
|
||||
dictlist.filter(filter_dict, del_en)
|
||||
|
||||
def del_items(self, id_list, del_by_list=True):
|
||||
"""
|
||||
- id_list: list of ids
|
||||
- del_by_list: bool; if True, data having the task_id in the list will be deleted. If False, the data that doesn't have the task_id in the list will be deleted
|
||||
"""
|
||||
for dictlist in self.dictlists:
|
||||
dictlist.del_items(id_list, del_by_list)
|
||||
|
||||
def do(self, func:str, *args, **kwargs):
|
||||
"""
|
||||
this function will perform the function `func` on all the dictlists
|
||||
for example, if you want to delete the data with task_id in the list, you can use `do("del_items", task_id_list)`
|
||||
"""
|
||||
result_list = []
|
||||
for dictlist in self.dictlists:
|
||||
try:
|
||||
result_list.append(getattr(dictlist, func)(*args, **kwargs))
|
||||
except AttributeError:
|
||||
print("The function '%s' is not in dictlist"%func)
|
||||
return result_list
|
||||
|
||||
def access(self, attr:str):
|
||||
"""
|
||||
return the list of the attribute of the dictlist
|
||||
"""
|
||||
return [getattr(dictlist, attr) for dictlist in self.dictlists]
|
||||
|
||||
def all_equal(self, attr:str):
|
||||
"""
|
||||
return True if all the attribute of the dictlist are the same
|
||||
"""
|
||||
attr_list = self.access(attr)
|
||||
return all(attr == attr_list[0] for attr in attr_list)
|
||||
|
||||
@property
|
||||
def num(self):
|
||||
return [dictlist.num for dictlist in self.dictlists]
|
||||
|
||||
@property
|
||||
def datasets(self):
|
||||
return self.dictlists
|
||||
|
||||
@property
|
||||
def groups(self):
|
||||
return self.dictlists
|
||||
|
||||
|
||||
class HDLBitsProbset(dictlist):
|
||||
|
||||
""" has many similarities with HDLBitsData in HDLBits_data_manager.py"""
|
||||
def __init__(self, path:str=None, more_info_paths:list=[], only_tasks:list=None, exclude_tasks:list=[], filter_content:dict={}):
|
||||
super().__init__("task_id", path=path, moreinfo_path_list=more_info_paths, only=only_tasks, exclude=exclude_tasks, filter=filter_content)
|
||||
|
||||
@property
|
||||
def task_id_list(self):
|
||||
"""
|
||||
return a list of task_id
|
||||
"""
|
||||
return [i["task_id"] for i in self.data]
|
||||
|
||||
def create_empty_set_via_taskids(self, task_id_list):
|
||||
"""
|
||||
return a dictlist with only the task_id in the task_id_list
|
||||
"""
|
||||
self.data = [{"task_id": i} for i in task_id_list]
|
||||
|
||||
def access_data_via_taskid(self, task_id):
|
||||
"""
|
||||
return a dict in all the information of the task_id
|
||||
"""
|
||||
for i in self.data:
|
||||
if i["task_id"] == task_id:
|
||||
return i
|
||||
raise ValueError("task_id %s not found!!!" % (task_id))
|
||||
|
||||
def load_by_config(self, probset_config:Config):
|
||||
"""
|
||||
- if use this, please leave path as Empty when initializing
|
||||
- probset_config = config.autoline.probset
|
||||
"""
|
||||
paras = {
|
||||
"path": probset_config.path,
|
||||
"more_info_paths": probset_config.more_info_paths,
|
||||
"only_tasks": probset_config.only,
|
||||
"exclude_tasks": probset_config.exclude,
|
||||
"filter_content": probset_config.filter[0]
|
||||
}
|
||||
if probset_config.gptgenRTL_path is not None:
|
||||
paras["more_info_paths"].append(probset_config.gptgenRTL_path)
|
||||
if probset_config.mutant_path is not None:
|
||||
paras["more_info_paths"].append(probset_config.mutant_path)
|
||||
self.__init__(**paras)
|
||||
if probset_config.exclude_json is not None:
|
||||
if type(probset_config.exclude_json) == str:
|
||||
exclude_tasks = HDLBitsProbset()
|
||||
exclude_tasks.data = ls.load_json_dict(probset_config.exclude_json)
|
||||
exclude_task_id_list = exclude_tasks.task_id_list
|
||||
self.del_items(exclude_task_id_list, del_by_list=True)
|
||||
elif type(probset_config.exclude_json) == list:
|
||||
for exclude_json in probset_config.exclude_json:
|
||||
exclude_tasks = HDLBitsProbset()
|
||||
exclude_tasks.data = ls.load_json_dict(exclude_json)
|
||||
exclude_task_id_list = exclude_tasks.task_id_list
|
||||
self.del_items(exclude_task_id_list, del_by_list=True)
|
||||
|
||||
def random_task(self):
|
||||
"""
|
||||
return a random task
|
||||
"""
|
||||
import random
|
||||
return random.choice(self.data)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main() # run the main function
|
||||
156
iverilog_call.py
Normal file
156
iverilog_call.py
Normal file
@@ -0,0 +1,156 @@
|
||||
"""
|
||||
Description : This file is related to iverilog calling. Some codes are modified from autosim.py v0.2 by Rain Bellinsky.
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2023/12/9 23:22:51
|
||||
LastEdited : 2025/2/22 16:13:21
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
from utils.utils import run_in_dir
|
||||
from utils.subproc import subproc_call
|
||||
|
||||
if os.name == 'nt':
|
||||
IC = '\\' # IC: Iterval Character
|
||||
else:
|
||||
IC = '/'
|
||||
|
||||
RUN_DIR = "ipynb_demo/verilog_test/" # this is only used when directly run this file
|
||||
|
||||
IVERILOG_PATH = "/usr/bin/iverilog"
|
||||
IVERILOG_VVP_PATH = "/usr/bin/vvp"
|
||||
|
||||
def iverilog_call(dir, silent = False, timeout = 120):
|
||||
"""
|
||||
#### input:
|
||||
- dir: the name of the directory that contains all verilog files; can end with or without "/"
|
||||
- task_id: the name of the task, will be used as the name of the vvp file
|
||||
|
||||
#### output:
|
||||
return a list of 5 elements:
|
||||
- [0] (pass or not): bool, whether the simulation is successful
|
||||
- [1] (cmd1): str, the iverilog compiling command
|
||||
- [2] (run1_info): dict, the iverilog compiling result {"out": out_reg, "err": err_reg, "haserror": error_exist}
|
||||
- [3] (cmd2): str, the vvp running command
|
||||
- [4] (run2_info): dict, the vvp running result {"out": out_reg, "err": err_reg, "haserror": error_exist}
|
||||
- [5]/[-1] (error_msg): str, the error message if there is any error; This is for convenience, the error message is also included in [2] or [4]
|
||||
|
||||
#### functionality:
|
||||
given the name of the directory that contains all verilog files, create a vvp file and run it.
|
||||
|
||||
#### iverilog command explanation:
|
||||
- -o + file_path: output file name (.vvp's name)
|
||||
- -c + list_file_path: read file list from file
|
||||
"""
|
||||
def s_print(*args, **kwargs):
|
||||
if not silent:
|
||||
print(*args, **kwargs)
|
||||
|
||||
if not dir.endswith(IC):
|
||||
dir += IC
|
||||
vlist_data = vList_gen(dir)["data"]
|
||||
vlist_str = "".join(vlist_data).replace("\n", " ") # eg: saves/1204~1210/test/Mux256to1v/Mux256to1v.v saves/1204~1210/test/Mux256to1v/Mux256to1v_tb.v
|
||||
# vvp_filename = "%s.vvp"%(task_id)
|
||||
vvp_filename = "run.vvp"
|
||||
# cmd1 = "iverilog -g2012 -o %s %s"%(vvp_filename, vlist_str) # used to be vvp_path
|
||||
cmd1 = "%s -g2012 -o %s %s"%(IVERILOG_PATH, vvp_filename, vlist_str) # used to be vvp_path
|
||||
s_print(cmd1)
|
||||
with run_in_dir(dir):
|
||||
run1_info = subproc_call(cmd1, timeout) # {"out": out_reg, "err": err_reg, "haserror": error_exist}
|
||||
if run1_info["haserror"]:
|
||||
s_print("iverilog compiling failed")
|
||||
return [False, cmd1, run1_info, None, None, run1_info["err"]]
|
||||
cmd2 = "%s %s"%(IVERILOG_VVP_PATH, vvp_filename) # used to be vvp_path
|
||||
s_print(cmd2)
|
||||
with run_in_dir(dir):
|
||||
run2_info = subproc_call(cmd2, timeout)
|
||||
if run2_info["haserror"]:
|
||||
s_print("vvp failed")
|
||||
return [False, cmd1, run1_info, cmd2, run2_info, run2_info["err"]]
|
||||
return [True, cmd1, run1_info, cmd2, run2_info, '']
|
||||
|
||||
def save_iv_runinfo(ivrun_info, dir):
|
||||
"""
|
||||
save the run info of iverilog to dir
|
||||
"""
|
||||
run_info_path = os.path.join(dir, "run_info.txt")
|
||||
lines = ""
|
||||
if ivrun_info[0]:
|
||||
lines += "iverilog simulation passed!\n\n"
|
||||
else:
|
||||
lines += "iverilog simulation failed!\n\n"
|
||||
# cmd 1:
|
||||
if ivrun_info[1] is not None:
|
||||
lines += "iverilog cmd 1:\n%s\n" % (ivrun_info[1])
|
||||
# output and error of cmd 1:
|
||||
if ivrun_info[2] is not None:
|
||||
lines += "iverilog cmd 1 output:\n%s\n" % (ivrun_info[2]["out"])
|
||||
lines += "iverilog cmd 1 error:\n%s\n" % (ivrun_info[2]["err"])
|
||||
# cmd 2:
|
||||
if ivrun_info[3] is not None:
|
||||
lines += "iverilog cmd 2:\n%s\n" % (ivrun_info[3])
|
||||
# output and error of cmd 2:
|
||||
if ivrun_info[4] is not None:
|
||||
lines += "iverilog cmd 2 output:\n%s\n" % (ivrun_info[4]["out"])
|
||||
lines += "iverilog cmd 2 error:\n%s\n" % (ivrun_info[4]["err"])
|
||||
# save to file:
|
||||
with open(run_info_path, "w") as f:
|
||||
f.write(lines)
|
||||
|
||||
def iverilog_call_and_save(dir, silent = False, timeout = 120):
|
||||
"""
|
||||
run the iverilog and save the run info
|
||||
"""
|
||||
iv_run_result = iverilog_call(dir, silent, timeout)
|
||||
save_iv_runinfo(iv_run_result, dir)
|
||||
return iv_run_result
|
||||
|
||||
def getVerilog(dir):
|
||||
"""
|
||||
dir: directory to search
|
||||
"""
|
||||
v_list = []
|
||||
for root, dirs, files in os.walk(dir):
|
||||
for name in files:
|
||||
# if name[-2 : ] == ".v" and "_tb" not in name:
|
||||
if name[-2 : ] == ".v":
|
||||
# v_list.append(str(root) + str(name) + '\n')
|
||||
v_list.append(str(name) + '\n')
|
||||
return v_list
|
||||
|
||||
def vList_gen(dir):
|
||||
"""
|
||||
dir: directory to search, will save the list file in this dir
|
||||
"""
|
||||
# called by iverilog_call in new dir
|
||||
file_path = "%svlist.txt"%(dir)
|
||||
filelist = getVerilog(dir) #get all .v files in dir
|
||||
with open(file_path, "w") as f:
|
||||
f.writelines(filelist)
|
||||
return {"path": file_path, "data": filelist}
|
||||
|
||||
def run_iverilog(config):
|
||||
"""
|
||||
for main.py to directly call.
|
||||
"""
|
||||
iverilog_call(config.iverilog.dir)
|
||||
|
||||
def main(dir=None):
|
||||
"""
|
||||
directly run this file: to run iverilog
|
||||
"""
|
||||
if dir is None:
|
||||
dir = input("Please enter project name (dir name):\n>> ")
|
||||
# if task_id is None:
|
||||
# task_id = input("Please enter task_id:\n>> ")
|
||||
msg = iverilog_call(dir)
|
||||
save_iv_runinfo(msg, dir)
|
||||
print(msg)
|
||||
|
||||
if __name__ == '__main__':
|
||||
arg = len(sys.argv)
|
||||
if arg == 2:
|
||||
main(sys.argv[1])
|
||||
else:
|
||||
dir_path = RUN_DIR if RUN_DIR.endswith(IC) else RUN_DIR + IC
|
||||
main(RUN_DIR)
|
||||
584
loader_saver.py
Normal file
584
loader_saver.py
Normal file
@@ -0,0 +1,584 @@
|
||||
"""
|
||||
Description : This module contains all functions related to loading and saving (except config loading).
|
||||
Author : Ruidi Qiu (ruidi.qiu@tum.de)
|
||||
Time : 2023/11/28 14:03:56
|
||||
LastEdited : 2024/8/23 12:37:25
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import inspect
|
||||
from config.config import Dict
|
||||
import config as cfg
|
||||
from config import Config
|
||||
# import matplotlib.pyplot as plt
|
||||
import yaml
|
||||
import json
|
||||
from io import StringIO
|
||||
from loguru import logger as logurulogger
|
||||
from utils.utils import print_time, Timer, str_list
|
||||
|
||||
ORIGINAL_RUN_PATH = os.getcwd()
|
||||
|
||||
########################## yaml/json utils ############################
|
||||
def load_yaml_dict(path: str):
|
||||
with open(path, 'rb') as f:
|
||||
yaml_dict = yaml.safe_load(f)
|
||||
return yaml_dict
|
||||
|
||||
def load_json_dict(path: str):
|
||||
with open(path, 'r') as f:
|
||||
json_dict = json.load(f)
|
||||
return json_dict
|
||||
|
||||
def save_dict_json_form(json_dict, path: str):
|
||||
with open(path, 'w') as f:
|
||||
json.dump(json_dict, f, indent=4)
|
||||
|
||||
def load_txt(path: str):
|
||||
with open(path, 'r') as f:
|
||||
txt = f.read()
|
||||
return txt
|
||||
|
||||
# jsonl related:
|
||||
def load_json_lines(path: str):
|
||||
with open(path, 'r') as f:
|
||||
data = [json.loads(line) for line in f]
|
||||
return data
|
||||
|
||||
def save_json_lines(data, path: str):
|
||||
with open(path, 'w') as f:
|
||||
for line in data:
|
||||
json.dump(line, f)
|
||||
f.write('\n')
|
||||
|
||||
def find_line_jsonl(id_or_number, data):
|
||||
"""
|
||||
quickly find the line in data by task_id or task_number
|
||||
"""
|
||||
if isinstance(id_or_number, str):
|
||||
for line in data:
|
||||
if line["task_id"] == id_or_number:
|
||||
return line
|
||||
elif isinstance(id_or_number, int):
|
||||
for line in data:
|
||||
if line["task_number"] == id_or_number:
|
||||
return line
|
||||
|
||||
########################## set save path ############################
|
||||
def save_path_setting(config, save_type, custom_name=''):
|
||||
'''support log/data/plot/postproc saving'''
|
||||
SAVE_TYPE_DICT = {
|
||||
'log': config.save.log,
|
||||
'message': config.save.message,
|
||||
'tb': config.save.iverilog,
|
||||
'dir': config.save.pub
|
||||
}
|
||||
type_config = SAVE_TYPE_DICT[save_type]
|
||||
run = config.run
|
||||
pub = config.save.pub
|
||||
if not save_type in SAVE_TYPE_DICT.keys():
|
||||
raise Exception('no such saving type named \"%s\"' % (save_type))
|
||||
# file name:
|
||||
if pub.prefix is None:
|
||||
unique_name = '%s'%(run.time)
|
||||
else:
|
||||
unique_name = '%s_%s'%(str(pub.prefix), run.time)
|
||||
if custom_name != '':
|
||||
custom_name = '%s_'%(custom_name)
|
||||
file_name = custom_name + unique_name
|
||||
# dir:
|
||||
# if (pub.dir is None):
|
||||
# save_dir = type_config.dir
|
||||
# else:
|
||||
# if pub.subdir not in ['', None] and not pub.subdir.endswith('/'): # in case of missing '/'
|
||||
# pub.subdir = pub.subdir+'/'
|
||||
# else:
|
||||
# pub.subdir = pub.subdir
|
||||
# save_dir = '%s%s%s/'%(pub.dir, pub.subdir, unique_name)
|
||||
if pub.subdir not in ['', None] and not pub.subdir.endswith('/'): # in case of missing '/'
|
||||
pub.subdir = pub.subdir+'/'
|
||||
else:
|
||||
pub.subdir = pub.subdir
|
||||
save_dir = '%s%s%s/'%(pub.dir, pub.subdir, unique_name)
|
||||
if save_type == 'dir':
|
||||
return save_dir
|
||||
if os.path.exists(save_dir) != True: #dir check
|
||||
os.makedirs(save_dir)
|
||||
# suffix:
|
||||
suffix_dict = {'log': '.log', 'tb': '.v'}#, 'model': '.pt'}
|
||||
if save_type == 'message': # leave the suffix to the user
|
||||
suffix = ''
|
||||
else:
|
||||
suffix = suffix_dict[save_type]
|
||||
# merge:
|
||||
save_path = save_dir + file_name + suffix
|
||||
return save_path
|
||||
|
||||
def add_save_root_to(config):
|
||||
"""save root; ends with '/'"""
|
||||
config.save.root = save_path_setting(config, 'dir')
|
||||
return config
|
||||
|
||||
############################# log save ###############################
|
||||
class AutoLogger:
|
||||
"""singleton class, used as the logger in the project. supported by loguru."""
|
||||
_instance = None
|
||||
_initialized = False
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls._instance is None:
|
||||
cls._instance = super(AutoLogger, cls).__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if not AutoLogger._initialized:
|
||||
if Config._instance is not None: # make sure that the initialization of logger is after the config
|
||||
AutoLogger._initialized = True
|
||||
self.config = Config()
|
||||
self.debug_en = self.config.save.log.debug_en
|
||||
self.log_level = self.config.save.log.level
|
||||
self.logger = logurulogger
|
||||
# self.log_root_dir = ""
|
||||
# self.runing_log_path = os.path.join(self.log_root_dir, 'running.log')
|
||||
self.running_log_path = save_path_setting(self.config, 'log')
|
||||
self._save_config() # now the log is always saved
|
||||
self._init_levels()
|
||||
self._init_handlers()
|
||||
self.log_stream = None
|
||||
self.running_log_id = None
|
||||
self.temp_log_id = None
|
||||
self.logline_prefix_list:list[str] = []
|
||||
|
||||
def __call__(self, string:str):
|
||||
self.info(string)
|
||||
|
||||
def _init_levels(self):
|
||||
"""initialize the customized log levels"""
|
||||
self.logger.level("POSITIVE", no=22, color="<fg 194>") # #d7ffd7 in console
|
||||
self.logger.level("NEGATIVE", no=23, color="<fg 225>") # #ffd7ff in console
|
||||
self.logger.level("FAILED", no=26, color="<red>")
|
||||
|
||||
def _init_handlers(self):
|
||||
"""set the loggers for the project"""
|
||||
self.logger.remove(0) # remove the original stderr handler (console handler)
|
||||
##### console logger:
|
||||
console_level = "INFO" if not self.debug_en else "DEBUG"
|
||||
self.logger.add(sys.stdout, level=console_level, format="<green>{time:YYYY-MM-DD HH:mm:ss.S}</> | <level>{level: <8}</> | <level>{message}</>") # <color>...</> means colorize the text
|
||||
##### running logger:
|
||||
self.running_log_id = self._set_handler(self.running_log_path, level=self.log_level)
|
||||
|
||||
def _set_handler(self, path:str|StringIO, level:str=None, filter=None, format=None) -> int:
|
||||
"""
|
||||
- set one handler for the logger (see loguru documentation to know what is a handler)
|
||||
- input:
|
||||
- level: the lowest level of the log that will be saved for this handler
|
||||
- filter: the customized filter (func/dict/str) to match what to be saved
|
||||
- format: the format of each record
|
||||
"""
|
||||
if format is None:
|
||||
format = "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {message}"
|
||||
# format = "{time: HH:mm:ss} | {level} | {message}"
|
||||
format : str
|
||||
paras = {
|
||||
"sink": path,
|
||||
"format": format,
|
||||
"level": level,
|
||||
"filter": filter
|
||||
}
|
||||
if isinstance(path, str):
|
||||
paras["rotation"] = '500 MB'
|
||||
paras["encoding"] = 'utf-8'
|
||||
handler_id = self.logger.add(**paras)
|
||||
return handler_id
|
||||
|
||||
# methods for temporary logging
|
||||
def set_temp_log(self):
|
||||
"""initialize a temporary log stream, if already exists, reset it first"""
|
||||
if self.temp_log_id is not None:
|
||||
self.reset_temp_log()
|
||||
self.log_stream = StringIO()
|
||||
self.temp_log_id = self._set_handler(self.log_stream, level=self.log_level) # same as the running logger
|
||||
|
||||
def reset_temp_log(self):
|
||||
"""delete the temporary log and return the content"""
|
||||
if self.temp_log_id is not None:
|
||||
log_content = self.get_temp_log()
|
||||
self.logger.remove(self.temp_log_id)
|
||||
self.log_stream.close()
|
||||
self.log_stream = None
|
||||
self.temp_log_id = None
|
||||
return log_content
|
||||
else:
|
||||
self.logger.warning("logger warning: no temp log to reset")
|
||||
return ""
|
||||
|
||||
def get_temp_log(self)->str:
|
||||
"""get the content of the temporary log"""
|
||||
if self.log_stream is not None:
|
||||
return self.log_stream.getvalue()
|
||||
else:
|
||||
return ""
|
||||
|
||||
# methods for prefix of a line of log
|
||||
def set_prefix(self, prefix:str):
|
||||
"""
|
||||
- set the prefix for the log line. the prefix will be stacked.
|
||||
- for example, if you set prefix1, prefix2, prefix3, the log line will be like: [prefix1] (prefix2) (prefix3) log_content
|
||||
"""
|
||||
self.logline_prefix_list.append(prefix)
|
||||
|
||||
def pop_prefix(self):
|
||||
"""pop the last prefix out of the prefix stack"""
|
||||
if len(self.logline_prefix_list) > 0:
|
||||
self.logline_prefix_list.pop()
|
||||
else:
|
||||
self.logger.warning("logger warning: no prefix to pop")
|
||||
|
||||
def clear_prefix(self):
|
||||
"""clear all the prefixes"""
|
||||
self.logline_prefix_list.clear()
|
||||
|
||||
def show_prefix(self):
|
||||
"""show the current prefixes"""
|
||||
self.logger.info(f"current prefixes: {self.logline_prefix}")
|
||||
|
||||
def check_prefix(self, prefix:str):
|
||||
"""check if the prefix is in the prefix stack"""
|
||||
return prefix in self.logline_prefix_list
|
||||
|
||||
@property
|
||||
def logline_prefix(self):
|
||||
"""
|
||||
- the prefix will be like: "[prefix1] (prefix2) (prefix3)", there should be [ ]/( ) around each prefix
|
||||
- if there is no prefix, return " "
|
||||
"""
|
||||
first_bracker = "[]"
|
||||
other_bracker = "[]"
|
||||
if len(self.logline_prefix_list) > 0:
|
||||
prefix_first = first_bracker[0] + self.logline_prefix_list[0] + first_bracker[-1] + " "
|
||||
if len(self.logline_prefix_list) > 1:
|
||||
prefix_other = " ".join([(other_bracker[0] + prefix + other_bracker[-1]) for prefix in self.logline_prefix_list[1:]]) + " "
|
||||
else:
|
||||
prefix_other = ""
|
||||
return prefix_first + prefix_other
|
||||
else:
|
||||
return ""
|
||||
|
||||
# methods for logging
|
||||
def trace(self, string:str):
|
||||
""" the most trival info, level value: 5"""
|
||||
self.logger.trace(self.logline_prefix + string)
|
||||
|
||||
def debug(self, string:str):
|
||||
""" debug level log, some point info; level value: 10"""
|
||||
string = string + " | " + self._get_caller_location()
|
||||
self.logger.debug(self.logline_prefix + string)
|
||||
|
||||
def info(self, string:str):
|
||||
"""info level log; level value: 20"""
|
||||
self.logger.info(self.logline_prefix + string)
|
||||
|
||||
def positive(self, string:str):
|
||||
"""positive level log; level value: 22"""
|
||||
self.logger.log("POSITIVE", self.logline_prefix + string)
|
||||
|
||||
def negative(self, string:str):
|
||||
"""negative level log; level value: 23"""
|
||||
self.logger.log("NEGATIVE", self.logline_prefix + string)
|
||||
|
||||
def success(self, string:str):
|
||||
"""success level log; level value: 25"""
|
||||
self.logger.success(self.logline_prefix + string)
|
||||
|
||||
def failed(self, string:str):
|
||||
"""failed level log; level value: 26"""
|
||||
self.logger.log("FAILED", self.logline_prefix + string)
|
||||
|
||||
def warning(self, string:str):
|
||||
"""warning level log; level value: 30"""
|
||||
string = string + " | " + self._get_caller_location() + " | caller: " + self._get_caller_location(3)
|
||||
self.logger.warning(self.logline_prefix + string)
|
||||
|
||||
def error(self, string:str):
|
||||
"""error level log; level value: 40"""
|
||||
string = string + " | " + self._get_caller_location() + " | caller: " + self._get_caller_location(3)
|
||||
self.logger.error(self.logline_prefix + string)
|
||||
|
||||
def critical(self, string:str):
|
||||
"""critical level log; level value: 50"""
|
||||
string = string + " | " + self._get_caller_location() + " | caller: " + self._get_caller_location(3)
|
||||
self.logger.critical(self.logline_prefix + string)
|
||||
|
||||
def match_level(self, condition:bool, yeslevel:str, nolevel:str, string:str):
|
||||
"""will log the string with yeslevel if condition is True, else with nolevel"""
|
||||
if condition:
|
||||
getattr(self, yeslevel)(string)
|
||||
else:
|
||||
getattr(self, nolevel)(string)
|
||||
|
||||
def assert_(self, condition:bool, string:str, level:str='critical'):
|
||||
"""
|
||||
- similar to the assert statement, but will save the log before raising the AssertionError
|
||||
- you can customize the level of the log, default: critical; you should use lower case
|
||||
"""
|
||||
if not condition:
|
||||
getattr(self, level)(string)
|
||||
raise AssertionError(string)
|
||||
|
||||
@staticmethod
|
||||
def _get_caller_location(level:int=2):
|
||||
caller_file = inspect.stack()[level].filename
|
||||
caller_file = os.path.relpath(caller_file, ORIGINAL_RUN_PATH)
|
||||
caller_func = inspect.stack()[level].function
|
||||
caller_line = inspect.stack()[level].lineno
|
||||
return f"location: {caller_file}, func: {caller_func}, line: {caller_line}"
|
||||
|
||||
def _save_config(self):
|
||||
with open(self.running_log_path, 'a') as file:
|
||||
#notes:
|
||||
if not self.config.save.log.notes is None:
|
||||
file.write('%s\n\n' % (self.config.save.log.notes))
|
||||
#config information:
|
||||
print_config(file, self.config)
|
||||
|
||||
"""
|
||||
loguru level table
|
||||
level | value
|
||||
TRACE | 5
|
||||
DEBUG | 10
|
||||
INFO | 20
|
||||
SUCCESS| 25
|
||||
WARNING| 30
|
||||
ERROR | 40
|
||||
CRITICAL| 50
|
||||
|
||||
my level table
|
||||
level | value
|
||||
POSITIVE| 22
|
||||
NEGATIVE| 23
|
||||
FAILED | 26
|
||||
"""
|
||||
|
||||
class log_localprefix:
|
||||
"""
|
||||
usage as a context manager
|
||||
|
||||
::
|
||||
|
||||
with log_localprefix('prefix'):
|
||||
# log content
|
||||
logger.info('log content')
|
||||
|
||||
-> "[prefix] log content"
|
||||
|
||||
|
||||
usage as a decorator
|
||||
|
||||
::
|
||||
|
||||
@log_localprefix('prefix')
|
||||
def func():
|
||||
logger.info('log content')
|
||||
|
||||
-> "[prefix] log content
|
||||
|
||||
"""
|
||||
def __init__(self, prefix:str):
|
||||
self.prefix = prefix
|
||||
self.logger = AutoLogger()
|
||||
|
||||
def __enter__(self):
|
||||
self.logger.set_prefix(self.prefix)
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.logger.pop_prefix()
|
||||
|
||||
def __call__(self, func):
|
||||
def wrapper(*args, **kwargs):
|
||||
with self:
|
||||
return func(*args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
def print_dict(file, input_dict, indent='', ind_style=' '):
|
||||
if isinstance(input_dict, Dict) or isinstance(input_dict, dict):
|
||||
dict_items = input_dict.items()
|
||||
else:
|
||||
raise TypeError("input type of func 'print_dict' is not Dict(obj) or dict")
|
||||
for k, v in dict_items:
|
||||
if not isinstance(v, Dict) and not isinstance(v, dict):
|
||||
file.write('%s%s: %s\n' % (indent, k, v))
|
||||
else:
|
||||
file.write('%s%s: \n' % (indent, k))
|
||||
next_indent = '%s %s'%(ind_style, indent)
|
||||
print_dict(file, v, next_indent, ind_style)
|
||||
|
||||
def print_config(file, config):
|
||||
"""
|
||||
split mode: `running info` + `custom` config + `default` config
|
||||
iwantall mode: `custom` config + `merged` config + `default` config
|
||||
merge mode: `merged` config
|
||||
"""
|
||||
mode = config.save.log.cfg_pmode
|
||||
indent_style = ' '
|
||||
if mode == 'split':
|
||||
# running information: (only split mode)
|
||||
file.write('---------------running info---------------\n')
|
||||
file.write('last version: %s\n' % (config.run.version))
|
||||
file.write('custom config file: %s\n' % (config.run.custom_path))
|
||||
file.write('starting time: %s\n' % (config.run.time))
|
||||
file.write('host name: %s\n' % (config.run.hostname))
|
||||
file.write('process ID: %s\n' % (config.run.pid))
|
||||
file.write('------------------------------------------\n')
|
||||
if mode in ['split', 'iwantall']:
|
||||
# custom config:
|
||||
custom_cfg, default_cfg = cfg.load_split_config(custom_config_path=config.run.custom_path)
|
||||
file.write('\n---------------custom config--------------\n')
|
||||
if custom_cfg is None:
|
||||
file.write('\nNo customized configuration\n')
|
||||
else:
|
||||
print_dict(file, custom_cfg, ind_style = indent_style)
|
||||
file.write('------------------------------------------\n')
|
||||
if mode in ['merge', 'iwantall']:
|
||||
# merged config:
|
||||
file.write('------config info (custom + default)------\n')
|
||||
print_dict(file, config, ind_style = indent_style)
|
||||
file.write('------------------------------------------\n')
|
||||
if mode in ['split', 'iwantall']:
|
||||
# default config:
|
||||
file.write('\n--------------default config--------------\n')
|
||||
print_dict(file, default_cfg, ind_style = indent_style)
|
||||
file.write('------------------------------------------\n')
|
||||
|
||||
def save_config():
|
||||
config = Config()
|
||||
save_path = save_path_setting(config, 'log')
|
||||
with open(save_path, 'a') as file:
|
||||
#notes:
|
||||
if config.save.log.notes is not None:
|
||||
file.write('%s\n\n' % (config.save.log.notes))
|
||||
#config information:
|
||||
print_config(file, config)
|
||||
|
||||
def save_log_line(line, config):
|
||||
# if config.save.log.en:
|
||||
# save_path = save_path_setting(config, 'log')
|
||||
# with open(save_path, 'a') as file:
|
||||
# file.write('%s\n'%(line))
|
||||
autologger = AutoLogger()
|
||||
autologger.info(line)
|
||||
|
||||
def print_and_save(line, config):
|
||||
# print(line)
|
||||
# if config.save.log.en:
|
||||
# save_log_line(line, config)
|
||||
autologger = AutoLogger()
|
||||
autologger.info(line)
|
||||
|
||||
|
||||
############################# message/code save #############################
|
||||
def save_messages_to_txt(messages, save_path):
|
||||
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
||||
with open(save_path, 'a') as file:
|
||||
for message in messages:
|
||||
if "time" in message.keys():
|
||||
file.write('########## %s (%ss used) ##########\n%s\n\n' % (message['role'], message['time'], message['content']))
|
||||
else:
|
||||
file.write('########## %s ##########\n%s\n\n' % (message['role'], message['content']))
|
||||
file.write('\n')
|
||||
|
||||
def print_messages(messages):
|
||||
# just like save_messages_to_txt
|
||||
for message in messages:
|
||||
if "time" in message.keys():
|
||||
print('########## %s (%ss used) ##########\n%s\n' % (message['role'], message['time'], message['content']))
|
||||
else:
|
||||
print('########## %s ##########\n%s\n' % (message['role'], message['content']))
|
||||
|
||||
def save_messages_to_log(messages, config):
|
||||
save_path = save_path_setting(config, 'log')
|
||||
save_messages_to_txt(messages, save_path)
|
||||
|
||||
def gpt_message_individual_save(messages, config, file_name = None, file_format = "json", silent = False):
|
||||
save_path = save_path_setting(config, 'message')
|
||||
# change the file name (xxx/xx.json->xxx/file_name.json)
|
||||
if file_name is not None:
|
||||
save_path = save_path.split('/')
|
||||
save_path[-1] = file_name + "." + file_format
|
||||
save_path = '/'.join(save_path)
|
||||
if file_format == 'txt':
|
||||
save_messages_to_txt(messages, save_path)
|
||||
elif file_format == 'json':
|
||||
save_dict_json_form(messages, save_path)
|
||||
# print
|
||||
if not silent:
|
||||
print("\n")
|
||||
if file_name is not None:
|
||||
print("(file name: %s)"%(file_name))
|
||||
print('your conversation with ChatGPT has been successfully saved to "%s"\n' % (save_path))
|
||||
|
||||
# def save_TB_code(code_txt, task_id, config):
|
||||
# """save the verilog testbench code to a .v file."""
|
||||
# iverilog_dir = config.save.iverilog.subdir
|
||||
# save_path = save_path_setting(config, 'tb')
|
||||
# save_path = save_path.split('/')
|
||||
# # insert iverilog dir
|
||||
# save_path.insert(-1, iverilog_dir)
|
||||
# tb_name = task_id + '_tb.v'
|
||||
# save_path[-1] = tb_name
|
||||
# save_dir = '/'.join(save_path[:-1]) + '/'
|
||||
# save_path = '/'.join(save_path)
|
||||
# if os.path.exists(save_dir) != True: #dir check
|
||||
# os.makedirs(save_dir)
|
||||
# with open(save_path, 'a') as file:
|
||||
# file.write(code_txt)
|
||||
# print("\n")
|
||||
# print('your testbench code has been successfully saved to "%s"\n' % (save_path))
|
||||
# return {'name': tb_name, 'dir': save_dir, 'path': save_path}
|
||||
|
||||
|
||||
# will be discarded in the future
|
||||
def save_code_iv(code_txt, task_id, code_type, config, iverilog_dir = None, silent = False):
|
||||
"""
|
||||
save the verilog TB/DUT code to a .v file. This func is for iverilog call.
|
||||
#### input:
|
||||
- code_txt: the verilog code in string format
|
||||
- task_id: the task id of the problem
|
||||
- code_type: 'TB' or 'DUT'
|
||||
- config: the config object
|
||||
- iverilog_dir: the directory to save the code. If None, use the default directory.
|
||||
"""
|
||||
assert code_type in ["TB", "DUT"], "code_type should be 'TB' or 'DUT'"
|
||||
suffix_dict = {'TB': '_tb.v', 'DUT': '.v'}
|
||||
if iverilog_dir is None:
|
||||
iverilog_subdir = config.save.iverilog.subdir
|
||||
save_path = save_path_setting(config, 'tb')
|
||||
save_path = save_path.split('/')
|
||||
# insert iverilog dir
|
||||
save_path.insert(-1, iverilog_subdir)
|
||||
else:
|
||||
if not iverilog_dir.endswith('/'):
|
||||
iverilog_dir += '/'
|
||||
iverilog_path = iverilog_dir + "name.v"
|
||||
save_path = iverilog_path.split('/')
|
||||
code_name = task_id + suffix_dict[code_type]
|
||||
save_path[-1] = code_name
|
||||
save_dir = '/'.join(save_path[:-1]) + '/'
|
||||
save_path = '/'.join(save_path)
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
with open(save_path, 'a') as file:
|
||||
file.write(code_txt)
|
||||
if not silent:
|
||||
print("\n")
|
||||
print('your %s code has been successfully saved to "%s"\n' % (code_type, save_path))
|
||||
return {'name': code_name, 'dir': save_dir, 'path': save_path, 'code_type': code_type}
|
||||
|
||||
def save_code(code, path):
|
||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||
with open(path, 'w') as file:
|
||||
file.write(code)
|
||||
|
||||
autologger = AutoLogger()
|
||||
|
||||
############################# __main__ ################################
|
||||
if __name__ == "__main__":
|
||||
None
|
||||
98
main.py
Normal file
98
main.py
Normal file
@@ -0,0 +1,98 @@
|
||||
"""
|
||||
TBgen_App - 入口文件
|
||||
|
||||
用法:
|
||||
python main.py -d <dut.v> -w <description.txt> -o <output_dir> -m <model>
|
||||
|
||||
示例:
|
||||
python main.py -d dut.v -w description.txt --header "module example(...)" -m qwen-max
|
||||
"""
|
||||
import sys
|
||||
import getopt
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
# 添加项目路径
|
||||
PROJECT_ROOT = Path(__file__).parent
|
||||
sys.path.insert(0, str(PROJECT_ROOT))
|
||||
|
||||
from run_tbgen import generate_tb
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "h:d:w:o:m:",
|
||||
["help", "dut=", "desc=", "header=", "output=", "model="])
|
||||
except getopt.GetoptError as err:
|
||||
print(err)
|
||||
sys.exit(2)
|
||||
|
||||
dut_path = None
|
||||
desc_path = None
|
||||
header = None
|
||||
output = "./output"
|
||||
model = "qwen-max"
|
||||
|
||||
for opt, arg in opts:
|
||||
if opt in ("-h", "--help"):
|
||||
print("Usage: python main.py -d <dut.v> -w <description.txt> [options]")
|
||||
print("")
|
||||
print("Options:")
|
||||
print(" -d, --dut <file> DUT文件路径 (必需)")
|
||||
print(" -w, --desc <file> 描述文件路径 (必需)")
|
||||
print(" --header <string> Module header (必需)")
|
||||
print(" -o, --output <dir> 输出目录 (默认: ./output)")
|
||||
print(" -m, --model <model> 使用的模型 (默认: qwen-max)")
|
||||
print(" -h, --help 显示帮助信息")
|
||||
sys.exit(0)
|
||||
elif opt in ("-d", "--dut"):
|
||||
dut_path = arg
|
||||
elif opt in ("-w", "--desc"):
|
||||
desc_path = arg
|
||||
elif opt in ("--header"):
|
||||
header = arg
|
||||
elif opt in ("-o", "--output"):
|
||||
output = arg
|
||||
elif opt in ("-m", "--model"):
|
||||
model = arg
|
||||
|
||||
if not dut_path or not desc_path:
|
||||
print("Error: -d (dut) and -w (desc) are required")
|
||||
print("Use -h or --help for usage information")
|
||||
sys.exit(2)
|
||||
|
||||
if not header:
|
||||
print("Error: --header is required")
|
||||
print("Use -h or --help for usage information")
|
||||
sys.exit(2)
|
||||
|
||||
# 读取文件
|
||||
with open(dut_path, "r") as f:
|
||||
dut_code = f.read()
|
||||
with open(desc_path, "r") as f:
|
||||
description = f.read()
|
||||
|
||||
# 生成TB
|
||||
task_id = Path(dut_path).stem
|
||||
print(f"Generating TB for task: {task_id}")
|
||||
print(f"Model: {model}")
|
||||
print(f"Output directory: {output}")
|
||||
|
||||
tb_path, result = generate_tb(
|
||||
dut_code=dut_code,
|
||||
description=description,
|
||||
header=header,
|
||||
task_id=task_id,
|
||||
model=model,
|
||||
output_dir=output
|
||||
)
|
||||
|
||||
print(f"\nTB generated successfully: {tb_path}")
|
||||
if result.get("cga_coverage"):
|
||||
print(f"Coverage: {result['cga_coverage']}")
|
||||
if result.get("full_pass"):
|
||||
print(f"Full Pass: {result['full_pass']}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
30
output/example_mul/example_mul/1_TBgen/TBgen_codes/TBout.txt
Normal file
30
output/example_mul/example_mul/1_TBgen/TBgen_codes/TBout.txt
Normal file
@@ -0,0 +1,30 @@
|
||||
scenario: 1, clk = 0, rst = 1, a = 0, b = 0, y = z
|
||||
[check]scenario: 1, clk = 1, rst = 1, a = 0, b = 0, y = z
|
||||
scenario: 2, clk = 1, rst = 0, a = 1, b = 1, y = z
|
||||
[check]scenario: 2, clk = 1, rst = 0, a = 1, b = 1, y = z
|
||||
scenario: 3, clk = 1, rst = 0, a = 0, b = 255, y = z
|
||||
[check]scenario: 3, clk = 1, rst = 0, a = 0, b = 255, y = z
|
||||
scenario: 4, clk = 1, rst = 0, a = 255, b = 0, y = z
|
||||
[check]scenario: 4, clk = 1, rst = 0, a = 255, b = 0, y = z
|
||||
scenario: 5, clk = 1, rst = 0, a = 255, b = 255, y = z
|
||||
[check]scenario: 5, clk = 1, rst = 0, a = 255, b = 255, y = z
|
||||
scenario: 6, clk = 1, rst = 0, a = 128, b = 128, y = z
|
||||
[check]scenario: 6, clk = 1, rst = 0, a = 128, b = 128, y = z
|
||||
scenario: 7, clk = 1, rst = 0, a = 1, b = 128, y = z
|
||||
[check]scenario: 7, clk = 1, rst = 0, a = 1, b = 128, y = z
|
||||
scenario: 8, clk = 1, rst = 0, a = 128, b = 1, y = z
|
||||
[check]scenario: 8, clk = 1, rst = 0, a = 128, b = 1, y = z
|
||||
scenario: 9, clk = 1, rst = 0, a = 10, b = 11, y = z
|
||||
[check]scenario: 9, clk = 1, rst = 0, a = 10, b = 11, y = z
|
||||
scenario: 10, clk = 1, rst = 0, a = 127, b = 127, y = z
|
||||
[check]scenario: 10, clk = 1, rst = 0, a = 127, b = 127, y = z
|
||||
scenario: 11, clk = 1, rst = 0, a = 15, b = 240, y = z
|
||||
[check]scenario: 11, clk = 1, rst = 0, a = 15, b = 240, y = z
|
||||
scenario: 12, clk = 1, rst = 0, a = 0, b = 0, y = z
|
||||
[check]scenario: 12, clk = 1, rst = 0, a = 0, b = 0, y = z
|
||||
scenario: 13, clk = 1, rst = 0, a = 15, b = 15, y = z
|
||||
[check]scenario: 13, clk = 1, rst = 0, a = 15, b = 15, y = z
|
||||
scenario: 14, clk = 1, rst = 0, a = 14, b = 14, y = z
|
||||
[check]scenario: 14, clk = 1, rst = 0, a = 14, b = 14, y = z
|
||||
scenario: 15, clk = 1, rst = 0, a = 13, b = 13, y = z
|
||||
[check]scenario: 15, clk = 1, rst = 0, a = 13, b = 13, y = z
|
||||
@@ -0,0 +1,3 @@
|
||||
module example(input clk, input rst, input [7:0] a, input [7:0] b, output [15:0] y);
|
||||
|
||||
endmodule
|
||||
@@ -0,0 +1,82 @@
|
||||
class GoldenDUT:
|
||||
def __init__(self):
|
||||
self.a_reg = 0
|
||||
self.b_reg = 0
|
||||
self.y_reg = 0
|
||||
self.rst_reg = 1
|
||||
|
||||
def load(self, signal_vector):
|
||||
self.a_reg = signal_vector['a'] & 0xFF
|
||||
self.b_reg = signal_vector['b'] & 0xFF
|
||||
self.rst_reg = signal_vector['rst']
|
||||
if self.rst_reg == 1:
|
||||
self.a_reg = 0
|
||||
self.b_reg = 0
|
||||
self.y_reg = 0
|
||||
else:
|
||||
self.y_reg = self.multiply_8bit(self.a_reg, self.b_reg)
|
||||
|
||||
def check(self, signal_vector):
|
||||
expected_y = self.multiply_8bit(self.a_reg, self.b_reg)
|
||||
observed_y = signal_vector['y'] & 0xFFFF
|
||||
if expected_y != observed_y:
|
||||
print(f"Scenario: {signal_vector['scenario']}, expected: y=0x{expected_y:04X}, observed: y=0x{observed_y:04X}")
|
||||
return False
|
||||
return True
|
||||
|
||||
def multiply_8bit(self, a, b):
|
||||
a = a & 0xFF
|
||||
b = b & 0xFF
|
||||
product = a * b
|
||||
product = product & 0xFFFF
|
||||
return product
|
||||
|
||||
def check_dut(vectors_in):
|
||||
golden_dut = GoldenDUT()
|
||||
failed_scenarios = []
|
||||
for vector in vectors_in:
|
||||
if vector["check_en"]:
|
||||
check_pass = golden_dut.check(vector)
|
||||
if check_pass:
|
||||
print(f"Passed; vector: {vector}")
|
||||
else:
|
||||
print(f"Failed; vector: {vector}")
|
||||
failed_scenarios.append(vector["scenario"])
|
||||
golden_dut.load(vector)
|
||||
return failed_scenarios
|
||||
|
||||
def SignalTxt_to_dictlist(txt:str):
|
||||
signals = []
|
||||
lines = txt.strip().split("\n")
|
||||
for line in lines:
|
||||
signal = {}
|
||||
if line.startswith("[check]"):
|
||||
signal["check_en"] = True
|
||||
line = line[7:]
|
||||
elif line.startswith("scenario"):
|
||||
signal["check_en"] = False
|
||||
else:
|
||||
continue
|
||||
line = line.strip().split(", ")
|
||||
for item in line:
|
||||
if "scenario" in item:
|
||||
item = item.split(": ")
|
||||
signal["scenario"] = item[1].replace(" ", "")
|
||||
else:
|
||||
item = item.split(" = ")
|
||||
key = item[0]
|
||||
value = item[1]
|
||||
if ("x" not in value) and ("X" not in value) and ("z" not in value):
|
||||
signal[key] = int(value)
|
||||
else:
|
||||
if ("x" in value) or ("X" in value):
|
||||
signal[key] = 0 # used to be "x"
|
||||
else:
|
||||
signal[key] = 0 # used to be "z"
|
||||
signals.append(signal)
|
||||
return signals
|
||||
with open("TBout.txt", "r") as f:
|
||||
txt = f.read()
|
||||
vectors_in = SignalTxt_to_dictlist(txt)
|
||||
tb_pass = check_dut(vectors_in)
|
||||
print(tb_pass)
|
||||
@@ -0,0 +1,138 @@
|
||||
`timescale 1ns / 1ps
|
||||
module testbench;
|
||||
reg clk;
|
||||
reg rst;
|
||||
reg [7:0] a;
|
||||
reg [7:0] b;
|
||||
wire [15:0] y;
|
||||
|
||||
integer file, scenario;
|
||||
// DUT instantiation
|
||||
example DUT (
|
||||
.clk(clk),
|
||||
.rst(rst),
|
||||
.a(a),
|
||||
.b(b),
|
||||
.y(y)
|
||||
);
|
||||
// Clock generation
|
||||
initial begin
|
||||
clk = 0;
|
||||
forever #5 clk = ~clk;
|
||||
end
|
||||
|
||||
initial begin
|
||||
file = $fopen("TBout.txt", "w");
|
||||
end
|
||||
// Scenario Based Test
|
||||
initial begin
|
||||
// scenario 1
|
||||
scenario = 1;
|
||||
rst = 1; a = 8'b0000_0000; b = 8'b0000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 2
|
||||
scenario = 2;
|
||||
rst = 0; a = 8'b0000_0001; b = 8'b0000_0001;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 3
|
||||
scenario = 3;
|
||||
a = 8'b0000_0000; b = 8'b1111_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 4
|
||||
scenario = 4;
|
||||
a = 8'b1111_1111; b = 8'b0000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 5
|
||||
scenario = 5;
|
||||
a = 8'b1111_1111; b = 8'b1111_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 6
|
||||
scenario = 6;
|
||||
a = 8'b1000_0000; b = 8'b1000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 7
|
||||
scenario = 7;
|
||||
a = 8'b0000_0001; b = 8'b1000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 8
|
||||
scenario = 8;
|
||||
a = 8'b1000_0000; b = 8'b0000_0001;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 9
|
||||
scenario = 9;
|
||||
a = 8'b0000_1010; b = 8'b0000_1011;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 10
|
||||
scenario = 10;
|
||||
a = 8'b0111_1111; b = 8'b0111_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 11
|
||||
scenario = 11;
|
||||
a = 8'b0000_1111; b = 8'b1111_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 12
|
||||
scenario = 12;
|
||||
a = 8'b0000_0000; b = 8'b0000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 13
|
||||
scenario = 13;
|
||||
a = 8'b0000_1111; b = 8'b0000_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 14
|
||||
scenario = 14;
|
||||
a = 8'b0000_1110; b = 8'b0000_1110;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 15
|
||||
scenario = 15;
|
||||
a = 8'b0000_1101; b = 8'b0000_1101;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
$fclose(file);
|
||||
$finish;
|
||||
end
|
||||
|
||||
endmodule
|
||||
218
output/example_mul/example_mul/1_TBgen/TBgen_codes/run.vvp
Normal file
218
output/example_mul/example_mul/1_TBgen/TBgen_codes/run.vvp
Normal file
@@ -0,0 +1,218 @@
|
||||
#! /usr/bin/vvp
|
||||
:ivl_version "12.0 (stable)";
|
||||
:ivl_delay_selection "TYPICAL";
|
||||
:vpi_time_precision - 12;
|
||||
:vpi_module "/usr/lib/x86_64-linux-gnu/ivl/system.vpi";
|
||||
:vpi_module "/usr/lib/x86_64-linux-gnu/ivl/vhdl_sys.vpi";
|
||||
:vpi_module "/usr/lib/x86_64-linux-gnu/ivl/vhdl_textio.vpi";
|
||||
:vpi_module "/usr/lib/x86_64-linux-gnu/ivl/v2005_math.vpi";
|
||||
:vpi_module "/usr/lib/x86_64-linux-gnu/ivl/va_math.vpi";
|
||||
:vpi_module "/usr/lib/x86_64-linux-gnu/ivl/v2009.vpi";
|
||||
S_0x5f1b861d6b90 .scope package, "$unit" "$unit" 2 1;
|
||||
.timescale 0 0;
|
||||
S_0x5f1b861b9000 .scope module, "testbench" "testbench" 3 2;
|
||||
.timescale -9 -12;
|
||||
v0x5f1b861f7200_0 .var "a", 7 0;
|
||||
v0x5f1b861f72e0_0 .var "b", 7 0;
|
||||
v0x5f1b861f73b0_0 .var "clk", 0 0;
|
||||
v0x5f1b861f74b0_0 .var/i "file", 31 0;
|
||||
v0x5f1b861f7550_0 .var "rst", 0 0;
|
||||
v0x5f1b861f7640_0 .var/i "scenario", 31 0;
|
||||
o0x70ab3a8a60d8 .functor BUFZ 16, C4<zzzzzzzzzzzzzzzz>; HiZ drive
|
||||
v0x5f1b861f76e0_0 .net "y", 15 0, o0x70ab3a8a60d8; 0 drivers
|
||||
S_0x5f1b861b9190 .scope module, "DUT" "example" 3 11, 4 1 0, S_0x5f1b861b9000;
|
||||
.timescale 0 0;
|
||||
.port_info 0 /INPUT 1 "clk";
|
||||
.port_info 1 /INPUT 1 "rst";
|
||||
.port_info 2 /INPUT 8 "a";
|
||||
.port_info 3 /INPUT 8 "b";
|
||||
.port_info 4 /OUTPUT 16 "y";
|
||||
v0x5f1b861d47e0_0 .net "a", 7 0, v0x5f1b861f7200_0; 1 drivers
|
||||
v0x5f1b861f6dc0_0 .net "b", 7 0, v0x5f1b861f72e0_0; 1 drivers
|
||||
v0x5f1b861f6ea0_0 .net "clk", 0 0, v0x5f1b861f73b0_0; 1 drivers
|
||||
v0x5f1b861f6f70_0 .net "rst", 0 0, v0x5f1b861f7550_0; 1 drivers
|
||||
v0x5f1b861f7030_0 .net "y", 15 0, o0x70ab3a8a60d8; alias, 0 drivers
|
||||
.scope S_0x5f1b861b9000;
|
||||
T_0 ;
|
||||
%pushi/vec4 0, 0, 1;
|
||||
%store/vec4 v0x5f1b861f73b0_0, 0, 1;
|
||||
T_0.0 ;
|
||||
%delay 5000, 0;
|
||||
%load/vec4 v0x5f1b861f73b0_0;
|
||||
%inv;
|
||||
%store/vec4 v0x5f1b861f73b0_0, 0, 1;
|
||||
%jmp T_0.0;
|
||||
%end;
|
||||
.thread T_0;
|
||||
.scope S_0x5f1b861b9000;
|
||||
T_1 ;
|
||||
%vpi_func 3 25 "$fopen" 32, "TBout.txt", "w" {0 0 0};
|
||||
%store/vec4 v0x5f1b861f74b0_0, 0, 32;
|
||||
%end;
|
||||
.thread T_1;
|
||||
.scope S_0x5f1b861b9000;
|
||||
T_2 ;
|
||||
%pushi/vec4 1, 0, 32;
|
||||
%store/vec4 v0x5f1b861f7640_0, 0, 32;
|
||||
%pushi/vec4 1, 0, 1;
|
||||
%store/vec4 v0x5f1b861f7550_0, 0, 1;
|
||||
%pushi/vec4 0, 0, 8;
|
||||
%store/vec4 v0x5f1b861f7200_0, 0, 8;
|
||||
%pushi/vec4 0, 0, 8;
|
||||
%store/vec4 v0x5f1b861f72e0_0, 0, 8;
|
||||
%vpi_call/w 3 32 "$fdisplay", v0x5f1b861f74b0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 34 "$fdisplay", v0x5f1b861f74b0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 2, 0, 32;
|
||||
%store/vec4 v0x5f1b861f7640_0, 0, 32;
|
||||
%pushi/vec4 0, 0, 1;
|
||||
%store/vec4 v0x5f1b861f7550_0, 0, 1;
|
||||
%pushi/vec4 1, 0, 8;
|
||||
%store/vec4 v0x5f1b861f7200_0, 0, 8;
|
||||
%pushi/vec4 1, 0, 8;
|
||||
%store/vec4 v0x5f1b861f72e0_0, 0, 8;
|
||||
%vpi_call/w 3 39 "$fdisplay", v0x5f1b861f74b0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 41 "$fdisplay", v0x5f1b861f74b0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 3, 0, 32;
|
||||
%store/vec4 v0x5f1b861f7640_0, 0, 32;
|
||||
%pushi/vec4 0, 0, 8;
|
||||
%store/vec4 v0x5f1b861f7200_0, 0, 8;
|
||||
%pushi/vec4 255, 0, 8;
|
||||
%store/vec4 v0x5f1b861f72e0_0, 0, 8;
|
||||
%vpi_call/w 3 46 "$fdisplay", v0x5f1b861f74b0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 48 "$fdisplay", v0x5f1b861f74b0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 4, 0, 32;
|
||||
%store/vec4 v0x5f1b861f7640_0, 0, 32;
|
||||
%pushi/vec4 255, 0, 8;
|
||||
%store/vec4 v0x5f1b861f7200_0, 0, 8;
|
||||
%pushi/vec4 0, 0, 8;
|
||||
%store/vec4 v0x5f1b861f72e0_0, 0, 8;
|
||||
%vpi_call/w 3 53 "$fdisplay", v0x5f1b861f74b0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 55 "$fdisplay", v0x5f1b861f74b0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 5, 0, 32;
|
||||
%store/vec4 v0x5f1b861f7640_0, 0, 32;
|
||||
%pushi/vec4 255, 0, 8;
|
||||
%store/vec4 v0x5f1b861f7200_0, 0, 8;
|
||||
%pushi/vec4 255, 0, 8;
|
||||
%store/vec4 v0x5f1b861f72e0_0, 0, 8;
|
||||
%vpi_call/w 3 60 "$fdisplay", v0x5f1b861f74b0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 62 "$fdisplay", v0x5f1b861f74b0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 6, 0, 32;
|
||||
%store/vec4 v0x5f1b861f7640_0, 0, 32;
|
||||
%pushi/vec4 128, 0, 8;
|
||||
%store/vec4 v0x5f1b861f7200_0, 0, 8;
|
||||
%pushi/vec4 128, 0, 8;
|
||||
%store/vec4 v0x5f1b861f72e0_0, 0, 8;
|
||||
%vpi_call/w 3 67 "$fdisplay", v0x5f1b861f74b0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 69 "$fdisplay", v0x5f1b861f74b0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 7, 0, 32;
|
||||
%store/vec4 v0x5f1b861f7640_0, 0, 32;
|
||||
%pushi/vec4 1, 0, 8;
|
||||
%store/vec4 v0x5f1b861f7200_0, 0, 8;
|
||||
%pushi/vec4 128, 0, 8;
|
||||
%store/vec4 v0x5f1b861f72e0_0, 0, 8;
|
||||
%vpi_call/w 3 74 "$fdisplay", v0x5f1b861f74b0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 76 "$fdisplay", v0x5f1b861f74b0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 8, 0, 32;
|
||||
%store/vec4 v0x5f1b861f7640_0, 0, 32;
|
||||
%pushi/vec4 128, 0, 8;
|
||||
%store/vec4 v0x5f1b861f7200_0, 0, 8;
|
||||
%pushi/vec4 1, 0, 8;
|
||||
%store/vec4 v0x5f1b861f72e0_0, 0, 8;
|
||||
%vpi_call/w 3 81 "$fdisplay", v0x5f1b861f74b0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 83 "$fdisplay", v0x5f1b861f74b0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 9, 0, 32;
|
||||
%store/vec4 v0x5f1b861f7640_0, 0, 32;
|
||||
%pushi/vec4 10, 0, 8;
|
||||
%store/vec4 v0x5f1b861f7200_0, 0, 8;
|
||||
%pushi/vec4 11, 0, 8;
|
||||
%store/vec4 v0x5f1b861f72e0_0, 0, 8;
|
||||
%vpi_call/w 3 88 "$fdisplay", v0x5f1b861f74b0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 90 "$fdisplay", v0x5f1b861f74b0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 10, 0, 32;
|
||||
%store/vec4 v0x5f1b861f7640_0, 0, 32;
|
||||
%pushi/vec4 127, 0, 8;
|
||||
%store/vec4 v0x5f1b861f7200_0, 0, 8;
|
||||
%pushi/vec4 127, 0, 8;
|
||||
%store/vec4 v0x5f1b861f72e0_0, 0, 8;
|
||||
%vpi_call/w 3 95 "$fdisplay", v0x5f1b861f74b0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 97 "$fdisplay", v0x5f1b861f74b0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 11, 0, 32;
|
||||
%store/vec4 v0x5f1b861f7640_0, 0, 32;
|
||||
%pushi/vec4 15, 0, 8;
|
||||
%store/vec4 v0x5f1b861f7200_0, 0, 8;
|
||||
%pushi/vec4 240, 0, 8;
|
||||
%store/vec4 v0x5f1b861f72e0_0, 0, 8;
|
||||
%vpi_call/w 3 102 "$fdisplay", v0x5f1b861f74b0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 104 "$fdisplay", v0x5f1b861f74b0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 12, 0, 32;
|
||||
%store/vec4 v0x5f1b861f7640_0, 0, 32;
|
||||
%pushi/vec4 0, 0, 8;
|
||||
%store/vec4 v0x5f1b861f7200_0, 0, 8;
|
||||
%pushi/vec4 0, 0, 8;
|
||||
%store/vec4 v0x5f1b861f72e0_0, 0, 8;
|
||||
%vpi_call/w 3 109 "$fdisplay", v0x5f1b861f74b0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 111 "$fdisplay", v0x5f1b861f74b0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 13, 0, 32;
|
||||
%store/vec4 v0x5f1b861f7640_0, 0, 32;
|
||||
%pushi/vec4 15, 0, 8;
|
||||
%store/vec4 v0x5f1b861f7200_0, 0, 8;
|
||||
%pushi/vec4 15, 0, 8;
|
||||
%store/vec4 v0x5f1b861f72e0_0, 0, 8;
|
||||
%vpi_call/w 3 116 "$fdisplay", v0x5f1b861f74b0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 118 "$fdisplay", v0x5f1b861f74b0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 14, 0, 32;
|
||||
%store/vec4 v0x5f1b861f7640_0, 0, 32;
|
||||
%pushi/vec4 14, 0, 8;
|
||||
%store/vec4 v0x5f1b861f7200_0, 0, 8;
|
||||
%pushi/vec4 14, 0, 8;
|
||||
%store/vec4 v0x5f1b861f72e0_0, 0, 8;
|
||||
%vpi_call/w 3 123 "$fdisplay", v0x5f1b861f74b0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 125 "$fdisplay", v0x5f1b861f74b0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 15, 0, 32;
|
||||
%store/vec4 v0x5f1b861f7640_0, 0, 32;
|
||||
%pushi/vec4 13, 0, 8;
|
||||
%store/vec4 v0x5f1b861f7200_0, 0, 8;
|
||||
%pushi/vec4 13, 0, 8;
|
||||
%store/vec4 v0x5f1b861f72e0_0, 0, 8;
|
||||
%vpi_call/w 3 130 "$fdisplay", v0x5f1b861f74b0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 132 "$fdisplay", v0x5f1b861f74b0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5f1b861f7640_0, v0x5f1b861f73b0_0, v0x5f1b861f7550_0, v0x5f1b861f7200_0, v0x5f1b861f72e0_0, v0x5f1b861f76e0_0 {0 0 0};
|
||||
%vpi_call/w 3 134 "$fclose", v0x5f1b861f74b0_0 {0 0 0};
|
||||
%vpi_call/w 3 135 "$finish" {0 0 0};
|
||||
%end;
|
||||
.thread T_2;
|
||||
# The file index is used to find the file name in the following table.
|
||||
:file_names 5;
|
||||
"N/A";
|
||||
"<interactive>";
|
||||
"-";
|
||||
"example_mul_tb.v";
|
||||
"example_mul.v";
|
||||
@@ -0,0 +1,15 @@
|
||||
iverilog simulation passed!
|
||||
|
||||
iverilog cmd 1:
|
||||
/usr/bin/iverilog -g2012 -o run.vvp example_mul.v example_mul_tb.v
|
||||
iverilog cmd 1 output:
|
||||
|
||||
iverilog cmd 1 error:
|
||||
|
||||
iverilog cmd 2:
|
||||
/usr/bin/vvp run.vvp
|
||||
iverilog cmd 2 output:
|
||||
example_mul_tb.v:135: $finish called at 290000 (1ps)
|
||||
|
||||
iverilog cmd 2 error:
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
python compilation passed!
|
||||
|
||||
###output:
|
||||
Passed; vector: {'check_en': True, 'scenario': '1', 'clk': 1, 'rst': 1, 'a': 0, 'b': 0, 'y': 0}
|
||||
Scenario: 2, expected: y=0x0001, observed: y=0x0000
|
||||
Failed; vector: {'check_en': True, 'scenario': '2', 'clk': 1, 'rst': 0, 'a': 1, 'b': 1, 'y': 0}
|
||||
Passed; vector: {'check_en': True, 'scenario': '3', 'clk': 1, 'rst': 0, 'a': 0, 'b': 255, 'y': 0}
|
||||
Passed; vector: {'check_en': True, 'scenario': '4', 'clk': 1, 'rst': 0, 'a': 255, 'b': 0, 'y': 0}
|
||||
Scenario: 5, expected: y=0xFE01, observed: y=0x0000
|
||||
Failed; vector: {'check_en': True, 'scenario': '5', 'clk': 1, 'rst': 0, 'a': 255, 'b': 255, 'y': 0}
|
||||
Scenario: 6, expected: y=0x4000, observed: y=0x0000
|
||||
Failed; vector: {'check_en': True, 'scenario': '6', 'clk': 1, 'rst': 0, 'a': 128, 'b': 128, 'y': 0}
|
||||
Scenario: 7, expected: y=0x0080, observed: y=0x0000
|
||||
Failed; vector: {'check_en': True, 'scenario': '7', 'clk': 1, 'rst': 0, 'a': 1, 'b': 128, 'y': 0}
|
||||
Scenario: 8, expected: y=0x0080, observed: y=0x0000
|
||||
Failed; vector: {'check_en': True, 'scenario': '8', 'clk': 1, 'rst': 0, 'a': 128, 'b': 1, 'y': 0}
|
||||
Scenario: 9, expected: y=0x006E, observed: y=0x0000
|
||||
Failed; vector: {'check_en': True, 'scenario': '9', 'clk': 1, 'rst': 0, 'a': 10, 'b': 11, 'y': 0}
|
||||
Scenario: 10, expected: y=0x3F01, observed: y=0x0000
|
||||
Failed; vector: {'check_en': True, 'scenario': '10', 'clk': 1, 'rst': 0, 'a': 127, 'b': 127, 'y': 0}
|
||||
Scenario: 11, expected: y=0x0E10, observed: y=0x0000
|
||||
Failed; vector: {'check_en': True, 'scenario': '11', 'clk': 1, 'rst': 0, 'a': 15, 'b': 240, 'y': 0}
|
||||
Passed; vector: {'check_en': True, 'scenario': '12', 'clk': 1, 'rst': 0, 'a': 0, 'b': 0, 'y': 0}
|
||||
Scenario: 13, expected: y=0x00E1, observed: y=0x0000
|
||||
Failed; vector: {'check_en': True, 'scenario': '13', 'clk': 1, 'rst': 0, 'a': 15, 'b': 15, 'y': 0}
|
||||
Scenario: 14, expected: y=0x00C4, observed: y=0x0000
|
||||
Failed; vector: {'check_en': True, 'scenario': '14', 'clk': 1, 'rst': 0, 'a': 14, 'b': 14, 'y': 0}
|
||||
Scenario: 15, expected: y=0x00A9, observed: y=0x0000
|
||||
Failed; vector: {'check_en': True, 'scenario': '15', 'clk': 1, 'rst': 0, 'a': 13, 'b': 13, 'y': 0}
|
||||
['2', '5', '6', '7', '8', '9', '10', '11', '13', '14', '15']
|
||||
|
||||
|
||||
###error:
|
||||
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
example_mul.v
|
||||
example_mul_tb.v
|
||||
37
output/example_mul/example_mul/1_TBgen/stage_0.txt
Normal file
37
output/example_mul/example_mul/1_TBgen/stage_0.txt
Normal file
@@ -0,0 +1,37 @@
|
||||
############################## conversation begin ##############################
|
||||
########## system ##########
|
||||
You are the strongest AI in the world. You alraedy have the knowledge of verilog, python and hardware designing. Do not save words by discarding information. I will tip you 200$ if you can fullfill the tasks I give you.
|
||||
|
||||
########## user ##########
|
||||
Please generate the verilog RTL code according to the following description and header information:
|
||||
problem description:
|
||||
一个8位乘法器,输入两个8位无符号数,输出16位乘积
|
||||
RTL header:
|
||||
module example(input clk, input rst, input [7:0] a, input [7:0] b, output [15:0] y);
|
||||
please only reply verilog codes. reply_format:
|
||||
```verilog
|
||||
your_code_here...
|
||||
```
|
||||
|
||||
|
||||
########## assistant ##########
|
||||
```verilog
|
||||
module example(
|
||||
input clk,
|
||||
input rst,
|
||||
input [7:0] a,
|
||||
input [7:0] b,
|
||||
output reg [15:0] y
|
||||
);
|
||||
|
||||
always @(posedge clk or posedge rst) begin
|
||||
if (rst) begin
|
||||
y <= 16'b0;
|
||||
end else begin
|
||||
y <= a * b;
|
||||
end
|
||||
end
|
||||
|
||||
endmodule
|
||||
```
|
||||
|
||||
42
output/example_mul/example_mul/1_TBgen/stage_1.txt
Normal file
42
output/example_mul/example_mul/1_TBgen/stage_1.txt
Normal file
@@ -0,0 +1,42 @@
|
||||
############################## conversation begin ##############################
|
||||
########## system ##########
|
||||
You are the strongest AI in the world. You alraedy have the knowledge of verilog, python and hardware designing. Do not save words by discarding information. I will tip you 200$ if you can fullfill the tasks I give you.
|
||||
|
||||
########## user ##########
|
||||
1. Your task is to write a verilog testbench for an verilog RTL module code (we call it as "DUT", device under test). The infomation we have is the problem description that guides student to write the RTL code (DUT) and the header of the "DUT". Our target is to generate the verilog testbench for the DUT. This testbench can check if the DUT in verilog satisfies all technical requirements of the problem description.
|
||||
2. You are in the first stage. In this stage, please summarize the technical details of the DUT and give me a technical specification of the testbench generation task, so we can use it to design its corresponding testbench.
|
||||
3. The core of testbench is the testcases. It usually include two parts logically: the input signals to the DUT and the expected result signals from DUT. The testbench will send the input signals to DUT and check if the result signals are the same as the expected result signals. If they are the same, this means the DUT is passed. Otherwise the DUT fails.
|
||||
4. Your technical specification should include these sections:
|
||||
- section 1: specification of the DUT, including the module header of the RTL code. If table or other detailed data is provided in the original problem description, DO repeat them in your response. They are very important!!!
|
||||
5. your response should be in the form of JSON.
|
||||
6. below is the information including the problem description and the DUT header:
|
||||
RTL circuit problem description:
|
||||
一个8位乘法器,输入两个8位无符号数,输出16位乘积
|
||||
DUT header:
|
||||
module example(input clk, input rst, input [7:0] a, input [7:0] b, output [15:0] y);
|
||||
your response must be in JSON form. example:
|
||||
{
|
||||
"important data": "...", # type: string. If no table, state transition or other direct data, leave this with ""
|
||||
"technical specifications": ["...", "...", ...] # each element of the list is one specification string, the starting of the string is its index
|
||||
}
|
||||
|
||||
|
||||
|
||||
########## assistant ##########
|
||||
```json
|
||||
{
|
||||
"important data": "",
|
||||
"technical specifications": [
|
||||
"1. The DUT is an 8-bit multiplier that takes two 8-bit unsigned numbers as inputs and produces a 16-bit product as the output.",
|
||||
"2. The module header of the DUT is: `module example(input clk, input rst, input [7:0] a, input [7:0] b, output [15:0] y);`",
|
||||
"3. The DUT has the following inputs and outputs:\n- Input: `clk` (clock signal)\n- Input: `rst` (reset signal)\n- Input: `a` (8-bit unsigned number, range: 0 to 255)\n- Input: `b` (8-bit unsigned number, range: 0 to 255)\n- Output: `y` (16-bit unsigned number, range: 0 to 65535)",
|
||||
"4. The testbench should generate a series of test cases to verify the functionality of the DUT. Each test case should include:\n- A set of input values for `a` and `b`\n- The expected 16-bit product `y`",
|
||||
"5. The testbench should initialize the DUT with a reset signal (`rst`) and then apply the clock signal (`clk`) to drive the DUT through its operation.",
|
||||
"6. The testbench should compare the actual output `y` from the DUT with the expected output for each test case. If they match, the test case passes; otherwise, it fails.",
|
||||
"7. The testbench should cover a wide range of test cases, including but not limited to:\n- Boundary conditions (e.g., `a = 0`, `b = 0`, `a = 255`, `b = 255`)\n- Random values within the range of 0 to 255\n- Special cases (e.g., `a = 1`, `b = 1`, `a = 128`, `b = 128`)"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
This JSON response provides a detailed technical specification for the DUT and the requirements for the testbench generation task. It includes the necessary information about the inputs, outputs, and the expected behavior of the DUT, as well as the test cases that should be included in the testbench.
|
||||
|
||||
98
output/example_mul/example_mul/1_TBgen/stage_2.txt
Normal file
98
output/example_mul/example_mul/1_TBgen/stage_2.txt
Normal file
@@ -0,0 +1,98 @@
|
||||
############################## conversation begin ##############################
|
||||
########## system ##########
|
||||
You are the strongest AI in the world. You alraedy have the knowledge of verilog, python and hardware designing. Do not save words by discarding information. I will tip you 200$ if you can fullfill the tasks I give you.
|
||||
|
||||
########## user ##########
|
||||
1. Your task is to write a verilog testbench for an verilog RTL module code (we call it as "DUT", device under test). The infomation we have is the problem description that guides student to write the RTL code (DUT) and the header of the "DUT". Our target is to generate the verilog testbench for the DUT. This testbench can check if the DUT in verilog satisfies all technical requirements of the problem description.
|
||||
2. you are in section 2. in this section, please give me the test scenarios. you only need to describe the stimulus in each test scenarios. If time is important, please inform the clock cycle information. we will use the stimulus description to generate the test vectors and send them to DUT. you must not tell the expected results even though you know that.
|
||||
3. your information is:
|
||||
RTL circuit problem description:
|
||||
一个8位乘法器,输入两个8位无符号数,输出16位乘积
|
||||
RTL testbench specification:
|
||||
```json
|
||||
{
|
||||
"important data": "",
|
||||
"technical specifications": [
|
||||
"1. The DUT is an 8-bit multiplier that takes two 8-bit unsigned numbers as inputs and produces a 16-bit product as the output.",
|
||||
"2. The module header of the DUT is: `module example(input clk, input rst, input [7:0] a, input [7:0] b, output [15:0] y);`",
|
||||
"3. The DUT has the following inputs and outputs:\n- Input: `clk` (clock signal)\n- Input: `rst` (reset signal)\n- Input: `a` (8-bit unsigned number, range: 0 to 255)\n- Input: `b` (8-bit unsigned number, range: 0 to 255)\n- Output: `y` (16-bit unsigned number, range: 0 to 65535)",
|
||||
"4. The testbench should generate a series of test cases to verify the functionality of the DUT. Each test case should include:\n- A set of input values for `a` and `b`\n- The expected 16-bit product `y`",
|
||||
"5. The testbench should initialize the DUT with a reset signal (`rst`) and then apply the clock signal (`clk`) to drive the DUT through its operation.",
|
||||
"6. The testbench should compare the actual output `y` from the DUT with the expected output for each test case. If they match, the test case passes; otherwise, it fails.",
|
||||
"7. The testbench should cover a wide range of test cases, including but not limited to:\n- Boundary conditions (e.g., `a = 0`, `b = 0`, `a = 255`, `b = 255`)\n- Random values within the range of 0 to 255\n- Special cases (e.g., `a = 1`, `b = 1`, `a = 128`, `b = 128`)"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
This JSON response provides a detailed technical specification for the DUT and the requirements for the testbench generation task. It includes the necessary information about the inputs, outputs, and the expected behavior of the DUT, as well as the test cases that should be included in the testbench.
|
||||
DUT header:
|
||||
module example(input clk, input rst, input [7:0] a, input [7:0] b, output [15:0] y);
|
||||
|
||||
you only need to describe the stimulus in each test scenarios. If time is important, please inform the clock cycle information. we will use the stimulus description to generate the test vectors and send them to DUT. you must not tell the expected results even though you know that.
|
||||
|
||||
your response must be in JSON form. example:
|
||||
{
|
||||
"scenario 1": "...", # each content is a string
|
||||
"scenario 2": "...",
|
||||
"scenario 3": "...",
|
||||
...
|
||||
}
|
||||
|
||||
|
||||
########## assistant ##########
|
||||
```json
|
||||
{
|
||||
"scenario 1": "Initialize the DUT with a reset signal (rst = 1) for one clock cycle. Set clk to 0, then to 1, and back to 0 to complete one clock cycle. After the reset, set rst to 0.",
|
||||
"scenario 2": "Set a = 0 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 3": "Set a = 255 and b = 255. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 4": "Set a = 1 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 5": "Set a = 128 and b = 128. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 6": "Set a = 0 and b = 255. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 7": "Set a = 255 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 8": "Set a = 1 and b = 255. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 9": "Set a = 255 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 10": "Set a = 128 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 11": "Set a = 0 and b = 128. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 12": "Set a = 1 and b = 128. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 13": "Set a = 128 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 14": "Set a = 127 and b = 127. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 15": "Set a = 127 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 16": "Set a = 1 and b = 127. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 17": "Set a = 127 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 18": "Set a = 0 and b = 127. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 19": "Set a = 64 and b = 64. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 20": "Set a = 64 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 21": "Set a = 1 and b = 64. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 22": "Set a = 64 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 23": "Set a = 0 and b = 64. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 24": "Set a = 32 and b = 32. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 25": "Set a = 32 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 26": "Set a = 1 and b = 32. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 27": "Set a = 32 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 28": "Set a = 0 and b = 32. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 29": "Set a = 16 and b = 16. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 30": "Set a = 16 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 31": "Set a = 1 and b = 16. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 32": "Set a = 16 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 33": "Set a = 0 and b = 16. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 34": "Set a = 8 and b = 8. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 35": "Set a = 8 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 36": "Set a = 1 and b = 8. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 37": "Set a = 8 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 38": "Set a = 0 and b = 8. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 39": "Set a = 4 and b = 4. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 40": "Set a = 4 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 41": "Set a = 1 and b = 4. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 42": "Set a = 4 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 43": "Set a = 0 and b = 4. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 44": "Set a = 2 and b = 2. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 45": "Set a = 2 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 46": "Set a = 1 and b = 2. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 47": "Set a = 2 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 48": "Set a = 0 and b = 2. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 49": "Set a = 1 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 50": "Set a = 1 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 51": "Set a = 0 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0."
|
||||
}
|
||||
```
|
||||
|
||||
174
output/example_mul/example_mul/1_TBgen/stage_3.txt
Normal file
174
output/example_mul/example_mul/1_TBgen/stage_3.txt
Normal file
@@ -0,0 +1,174 @@
|
||||
############################## conversation begin ##############################
|
||||
########## system ##########
|
||||
You are the strongest AI in the world. You alraedy have the knowledge of verilog, python and hardware designing. Do not save words by discarding information. I will tip you 200$ if you can fullfill the tasks I give you.
|
||||
|
||||
########## user ##########
|
||||
1. Your task is to write a verilog testbench for an verilog RTL module code (we call it as "DUT", device under test). The information we have is the problem description that guides student to write the RTL code (DUT) and the header of the "DUT". Our target is to generate the verilog testbench for the DUT. This testbench can check if the DUT in verilog satisfies all technical requirements of the problem description.
|
||||
2. you are in stage 3; in this stage, please give me the core rules of an ideal DUT. you should give these rules in python. (For convenience, you can use binary or hexadecimal format in python, i.e. 0b0010 and 0x1a). Later we will use these ideal rules to generate expected values in each test scenario. currently you must only generate the core part of the rules. the input of these rules should be related to the test vectors from test scenario. the rule should give the expected values under test vectors. You don't need to consider the control signals like clk or reset, unless the core rules of this task are about these signals. You can use numpy, scipy or other third party python libraries to help you write the rules. Please import them if you need.
|
||||
3. your information is:
|
||||
RTL circuit problem description:
|
||||
一个8位乘法器,输入两个8位无符号数,输出16位乘积
|
||||
RTL testbench specification:
|
||||
```json
|
||||
{
|
||||
"important data": "",
|
||||
"technical specifications": [
|
||||
"1. The DUT is an 8-bit multiplier that takes two 8-bit unsigned numbers as inputs and produces a 16-bit product as the output.",
|
||||
"2. The module header of the DUT is: `module example(input clk, input rst, input [7:0] a, input [7:0] b, output [15:0] y);`",
|
||||
"3. The DUT has the following inputs and outputs:\n- Input: `clk` (clock signal)\n- Input: `rst` (reset signal)\n- Input: `a` (8-bit unsigned number, range: 0 to 255)\n- Input: `b` (8-bit unsigned number, range: 0 to 255)\n- Output: `y` (16-bit unsigned number, range: 0 to 65535)",
|
||||
"4. The testbench should generate a series of test cases to verify the functionality of the DUT. Each test case should include:\n- A set of input values for `a` and `b`\n- The expected 16-bit product `y`",
|
||||
"5. The testbench should initialize the DUT with a reset signal (`rst`) and then apply the clock signal (`clk`) to drive the DUT through its operation.",
|
||||
"6. The testbench should compare the actual output `y` from the DUT with the expected output for each test case. If they match, the test case passes; otherwise, it fails.",
|
||||
"7. The testbench should cover a wide range of test cases, including but not limited to:\n- Boundary conditions (e.g., `a = 0`, `b = 0`, `a = 255`, `b = 255`)\n- Random values within the range of 0 to 255\n- Special cases (e.g., `a = 1`, `b = 1`, `a = 128`, `b = 128`)"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
This JSON response provides a detailed technical specification for the DUT and the requirements for the testbench generation task. It includes the necessary information about the inputs, outputs, and the expected behavior of the DUT, as well as the test cases that should be included in the testbench.
|
||||
DUT header:
|
||||
module example(input clk, input rst, input [7:0] a, input [7:0] b, output [15:0] y);
|
||||
test scenario: (please note the test vectors below, it will help you determine the input parameters of the rules)
|
||||
{
|
||||
"scenario 1": "Initialize the DUT with a reset signal (rst = 1) for one clock cycle. Set clk to 0, then to 1, and back to 0 to complete one clock cycle. After the reset, set rst to 0.",
|
||||
"scenario 2": "Set a = 0 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 3": "Set a = 255 and b = 255. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 4": "Set a = 1 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 5": "Set a = 128 and b = 128. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 6": "Set a = 0 and b = 255. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 7": "Set a = 255 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 8": "Set a = 1 and b = 255. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 9": "Set a = 255 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 10": "Set a = 128 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 11": "Set a = 0 and b = 128. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 12": "Set a = 1 and b = 128. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 13": "Set a = 128 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 14": "Set a = 127 and b = 127. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 15": "Set a = 127 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 16": "Set a = 1 and b = 127. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 17": "Set a = 127 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 18": "Set a = 0 and b = 127. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 19": "Set a = 64 and b = 64. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 20": "Set a = 64 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 21": "Set a = 1 and b = 64. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 22": "Set a = 64 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 23": "Set a = 0 and b = 64. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 24": "Set a = 32 and b = 32. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 25": "Set a = 32 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 26": "Set a = 1 and b = 32. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 27": "Set a = 32 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 28": "Set a = 0 and b = 32. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 29": "Set a = 16 and b = 16. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 30": "Set a = 16 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 31": "Set a = 1 and b = 16. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 32": "Set a = 16 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 33": "Set a = 0 and b = 16. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 34": "Set a = 8 and b = 8. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 35": "Set a = 8 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 36": "Set a = 1 and b = 8. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 37": "Set a = 8 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 38": "Set a = 0 and b = 8. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 39": "Set a = 4 and b = 4. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 40": "Set a = 4 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 41": "Set a = 1 and b = 4. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 42": "Set a = 4 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 43": "Set a = 0 and b = 4. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 44": "Set a = 2 and b = 2. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 45": "Set a = 2 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 46": "Set a = 1 and b = 2. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 47": "Set a = 2 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 48": "Set a = 0 and b = 2. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 49": "Set a = 1 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 50": "Set a = 1 and b = 0. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0.",
|
||||
"scenario 51": "Set a = 0 and b = 1. Apply the clock signal (clk) for one cycle. Ensure that the clock signal transitions from 0 to 1 and back to 0."
|
||||
}
|
||||
|
||||
your response should only contain python code. For convenience, you can use binary or hexadecimal format in python. For example: 0b0010 and 0x1a
|
||||
|
||||
|
||||
########## assistant ##########
|
||||
```python
|
||||
# Core rules for the 8-bit multiplier DUT
|
||||
|
||||
def calculate_product(a, b):
|
||||
"""
|
||||
Calculate the 16-bit product of two 8-bit unsigned numbers.
|
||||
|
||||
:param a: 8-bit unsigned number (0 to 255)
|
||||
:param b: 8-bit unsigned number (0 to 255)
|
||||
:return: 16-bit unsigned product (0 to 65535)
|
||||
"""
|
||||
return a * b
|
||||
|
||||
# Example usage:
|
||||
# a = 0b00000001 # 1 in binary
|
||||
# b = 0b00000010 # 2 in binary
|
||||
# expected_product = calculate_product(a, b)
|
||||
# print(f"Expected product: {expected_product} (binary: {bin(expected_product)})")
|
||||
|
||||
# Test scenarios
|
||||
test_scenarios = [
|
||||
{"a": 0x00, "b": 0x00}, # 0 * 0
|
||||
{"a": 0xFF, "b": 0xFF}, # 255 * 255
|
||||
{"a": 0x01, "b": 0x01}, # 1 * 1
|
||||
{"a": 0x80, "b": 0x80}, # 128 * 128
|
||||
{"a": 0x00, "b": 0xFF}, # 0 * 255
|
||||
{"a": 0xFF, "b": 0x00}, # 255 * 0
|
||||
{"a": 0x01, "b": 0xFF}, # 1 * 255
|
||||
{"a": 0xFF, "b": 0x01}, # 255 * 1
|
||||
{"a": 0x80, "b": 0x00}, # 128 * 0
|
||||
{"a": 0x00, "b": 0x80}, # 0 * 128
|
||||
{"a": 0x01, "b": 0x80}, # 1 * 128
|
||||
{"a": 0x80, "b": 0x01}, # 128 * 1
|
||||
{"a": 0x7F, "b": 0x7F}, # 127 * 127
|
||||
{"a": 0x7F, "b": 0x01}, # 127 * 1
|
||||
{"a": 0x01, "b": 0x7F}, # 1 * 127
|
||||
{"a": 0x7F, "b": 0x00}, # 127 * 0
|
||||
{"a": 0x00, "b": 0x7F}, # 0 * 127
|
||||
{"a": 0x40, "b": 0x40}, # 64 * 64
|
||||
{"a": 0x40, "b": 0x01}, # 64 * 1
|
||||
{"a": 0x01, "b": 0x40}, # 1 * 64
|
||||
{"a": 0x40, "b": 0x00}, # 64 * 0
|
||||
{"a": 0x00, "b": 0x40}, # 0 * 64
|
||||
{"a": 0x20, "b": 0x20}, # 32 * 32
|
||||
{"a": 0x20, "b": 0x01}, # 32 * 1
|
||||
{"a": 0x01, "b": 0x20}, # 1 * 32
|
||||
{"a": 0x20, "b": 0x00}, # 32 * 0
|
||||
{"a": 0x00, "b": 0x20}, # 0 * 32
|
||||
{"a": 0x10, "b": 0x10}, # 16 * 16
|
||||
{"a": 0x10, "b": 0x01}, # 16 * 1
|
||||
{"a": 0x01, "b": 0x10}, # 1 * 16
|
||||
{"a": 0x10, "b": 0x00}, # 16 * 0
|
||||
{"a": 0x00, "b": 0x10}, # 0 * 16
|
||||
{"a": 0x08, "b": 0x08}, # 8 * 8
|
||||
{"a": 0x08, "b": 0x01}, # 8 * 1
|
||||
{"a": 0x01, "b": 0x08}, # 1 * 8
|
||||
{"a": 0x08, "b": 0x00}, # 8 * 0
|
||||
{"a": 0x00, "b": 0x08}, # 0 * 8
|
||||
{"a": 0x04, "b": 0x04}, # 4 * 4
|
||||
{"a": 0x04, "b": 0x01}, # 4 * 1
|
||||
{"a": 0x01, "b": 0x04}, # 1 * 4
|
||||
{"a": 0x04, "b": 0x00}, # 4 * 0
|
||||
{"a": 0x00, "b": 0x04}, # 0 * 4
|
||||
{"a": 0x02, "b": 0x02}, # 2 * 2
|
||||
{"a": 0x02, "b": 0x01}, # 2 * 1
|
||||
{"a": 0x01, "b": 0x02}, # 1 * 2
|
||||
{"a": 0x02, "b": 0x00}, # 2 * 0
|
||||
{"a": 0x00, "b": 0x02}, # 0 * 2
|
||||
{"a": 0x01, "b": 0x01}, # 1 * 1
|
||||
{"a": 0x01, "b": 0x00}, # 1 * 0
|
||||
{"a": 0x00, "b": 0x01}, # 0 * 1
|
||||
]
|
||||
|
||||
# Generate expected values for each test scenario
|
||||
expected_values = []
|
||||
for scenario in test_scenarios:
|
||||
a = scenario["a"]
|
||||
b = scenario["b"]
|
||||
expected_product = calculate_product(a, b)
|
||||
expected_values.append({"a": a, "b": b, "expected_product": expected_product})
|
||||
|
||||
# Print the expected values
|
||||
for i, scenario in enumerate(expected_values):
|
||||
print(f"Scenario {i+1}: a = 0x{scenario['a']:02X}, b = 0x{scenario['b']:02X}, Expected Product = 0x{scenario['expected_product']:04X}")
|
||||
```
|
||||
|
||||
223
output/example_mul/example_mul/1_TBgen/stage_4.txt
Normal file
223
output/example_mul/example_mul/1_TBgen/stage_4.txt
Normal file
@@ -0,0 +1,223 @@
|
||||
############################## conversation begin ##############################
|
||||
########## system ##########
|
||||
You are the strongest AI in the world. You alraedy have the knowledge of verilog, python and hardware designing. Do not save words by discarding information. I will tip you 200$ if you can fullfill the tasks I give you.
|
||||
|
||||
########## user ##########
|
||||
|
||||
1. Your task is to complete a given verilog testbench code. This testbench is for a verilog RTL module code (we call it as "DUT", device under test). This circuit is a sequential circuit. The infomation we have is
|
||||
- 1.1. the problem description that guides student to write the RTL code (DUT) and the header of the "DUT".
|
||||
- 1.2. the module header.
|
||||
- 1.3. test scenarios which determines values and sequential information of test vectors
|
||||
- 1.4. the testbench structure
|
||||
- 1.5. the instruction of writing our testbench
|
||||
|
||||
DUT header:
|
||||
module example(input clk, input rst, input [7:0] a, input [7:0] b, output [15:0] y);
|
||||
Your other information:
|
||||
RTL circuit problem description:
|
||||
一个8位乘法器,输入两个8位无符号数,输出16位乘积
|
||||
IMPORTANT - test scenario (Please determine the values of input signals according to these test scenarios.):
|
||||
{
|
||||
"scenario 1": "Initialize the DUT by asserting the reset signal (rst) for one clock cycle. Set both inputs a and b to 0x00.",
|
||||
"scenario 2": "After de-asserting the reset signal, set both inputs a and b to 0x01 and wait for one clock cycle.",
|
||||
"scenario 3": "Set input a to 0x00 and input b to 0xFF, and wait for one clock cycle.",
|
||||
"scenario 4": "Set input a to 0xFF and input b to 0x00, and wait for one clock cycle.",
|
||||
"scenario 5": "Set both inputs a and b to 0xFF and wait for one clock cycle.",
|
||||
"scenario 6": "Set input a to 0x80 and input b to 0x80, and wait for one clock cycle.",
|
||||
"scenario 7": "Set input a to 0x01 and input b to 0x80, and wait for one clock cycle.",
|
||||
"scenario 8": "Set input a to 0x80 and input b to 0x01, and wait for one clock cycle.",
|
||||
"scenario 9": "Set input a to 0x0A and input b to 0x0B, and wait for one clock cycle.",
|
||||
"scenario 10": "Set input a to 0x7F and input b to 0x7F, and wait for one clock cycle.",
|
||||
"scenario 11": "Set input a to 0x0F and input b to 0xF0, and wait for one clock cycle.",
|
||||
"scenario 12": "Set input a to 0x00 and input b to 0x00, and wait for one clock cycle.",
|
||||
"scenario 13": "Set input a to 0x0F and input b to 0x0F, and wait for one clock cycle.",
|
||||
"scenario 14": "Set input a to 0x0E and input b to 0x0E, and wait for one clock cycle.",
|
||||
"scenario 15": "Set input a to 0x0D and input b to 0x0D, and wait for one clock cycle."
|
||||
}
|
||||
|
||||
below is the given testbench codes:
|
||||
`timescale 1ns / 1ps
|
||||
module testbench;
|
||||
reg clk;
|
||||
reg rst;
|
||||
reg [7:0] a;
|
||||
reg [7:0] b;
|
||||
wire [15:0] y;
|
||||
|
||||
integer file, scenario;
|
||||
// DUT instantiation
|
||||
top_module DUT (
|
||||
.clk(clk),
|
||||
.rst(rst),
|
||||
.a(a),
|
||||
.b(b),
|
||||
.y(y)
|
||||
);
|
||||
// Clock generation
|
||||
initial begin
|
||||
clk = 0;
|
||||
forever #5 clk = ~clk;
|
||||
end
|
||||
|
||||
initial begin
|
||||
file = $fopen("TBout.txt", "w");
|
||||
end
|
||||
// Scenario Based Test
|
||||
initial begin
|
||||
|
||||
// write your scenario checking codes here, according to scenario information
|
||||
|
||||
$fclose(file);
|
||||
$finish;
|
||||
end
|
||||
|
||||
endmodule
|
||||
|
||||
|
||||
The testbench does not need to check the DUT's output but only export the signals of DUT. Please export the signals of DUT to a file named "TBout.txt" at the end of each scenario. The template is given below:
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
The variables are already declared. The clock signal is already prepared. This output will be used to check the correctness of the DUT's output later.
|
||||
please only use "#10" as the delay when you need. If you need longer delay, you can use multiple "#10", such as "#10; #10; #10;". Avoid meaningless long delay in your code.
|
||||
If you need a loop in a scenario to check multiple time points, use "repeat" loop. for exmaple:
|
||||
```
|
||||
// scenario x
|
||||
scenario = x;
|
||||
signal_1 = 1;
|
||||
repeat(5) begin
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
end
|
||||
```
|
||||
Please determine the input signal's exact values according to given test scenarios.
|
||||
Note: please complete the last initial code part (marked in the given testbench template). You should give me the completed full code. The testbench template above is to help you generate the code. You must use %d when exporting values.
|
||||
please generate the full testbench code. please only reply verilog codes, no other words.
|
||||
|
||||
|
||||
|
||||
########## assistant ##########
|
||||
```verilog
|
||||
`timescale 1ns / 1ps
|
||||
module testbench;
|
||||
reg clk;
|
||||
reg rst;
|
||||
reg [7:0] a;
|
||||
reg [7:0] b;
|
||||
wire [15:0] y;
|
||||
|
||||
integer file, scenario;
|
||||
// DUT instantiation
|
||||
example DUT (
|
||||
.clk(clk),
|
||||
.rst(rst),
|
||||
.a(a),
|
||||
.b(b),
|
||||
.y(y)
|
||||
);
|
||||
// Clock generation
|
||||
initial begin
|
||||
clk = 0;
|
||||
forever #5 clk = ~clk;
|
||||
end
|
||||
|
||||
initial begin
|
||||
file = $fopen("TBout.txt", "w");
|
||||
end
|
||||
// Scenario Based Test
|
||||
initial begin
|
||||
// scenario 1
|
||||
scenario = 1;
|
||||
rst = 1; a = 8'b0000_0000; b = 8'b0000_0000;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 2
|
||||
scenario = 2;
|
||||
rst = 0; a = 8'b0000_0001; b = 8'b0000_0001;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 3
|
||||
scenario = 3;
|
||||
a = 8'b0000_0000; b = 8'b1111_1111;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 4
|
||||
scenario = 4;
|
||||
a = 8'b1111_1111; b = 8'b0000_0000;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 5
|
||||
scenario = 5;
|
||||
a = 8'b1111_1111; b = 8'b1111_1111;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 6
|
||||
scenario = 6;
|
||||
a = 8'b1000_0000; b = 8'b1000_0000;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 7
|
||||
scenario = 7;
|
||||
a = 8'b0000_0001; b = 8'b1000_0000;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 8
|
||||
scenario = 8;
|
||||
a = 8'b1000_0000; b = 8'b0000_0001;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 9
|
||||
scenario = 9;
|
||||
a = 8'b0000_1010; b = 8'b0000_1011;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 10
|
||||
scenario = 10;
|
||||
a = 8'b0111_1111; b = 8'b0111_1111;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 11
|
||||
scenario = 11;
|
||||
a = 8'b0000_1111; b = 8'b1111_0000;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 12
|
||||
scenario = 12;
|
||||
a = 8'b0000_0000; b = 8'b0000_0000;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 13
|
||||
scenario = 13;
|
||||
a = 8'b0000_1111; b = 8'b0000_1111;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 14
|
||||
scenario = 14;
|
||||
a = 8'b0000_1110; b = 8'b0000_1110;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 15
|
||||
scenario = 15;
|
||||
a = 8'b0000_1101; b = 8'b0000_1101;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
$fclose(file);
|
||||
$finish;
|
||||
end
|
||||
|
||||
endmodule
|
||||
```
|
||||
|
||||
310
output/example_mul/example_mul/1_TBgen/stage_4b.txt
Normal file
310
output/example_mul/example_mul/1_TBgen/stage_4b.txt
Normal file
@@ -0,0 +1,310 @@
|
||||
############################## conversation begin ##############################
|
||||
########## system ##########
|
||||
You are the strongest AI in the world. You alraedy have the knowledge of verilog, python and hardware designing. Do not save words by discarding information. I will tip you 200$ if you can fullfill the tasks I give you.
|
||||
|
||||
########## user ##########
|
||||
given the scenario based verilog testbench code below:
|
||||
`timescale 1ns / 1ps
|
||||
module testbench;
|
||||
reg clk;
|
||||
reg rst;
|
||||
reg [7:0] a;
|
||||
reg [7:0] b;
|
||||
wire [15:0] y;
|
||||
|
||||
integer file, scenario;
|
||||
// DUT instantiation
|
||||
example DUT (
|
||||
.clk(clk),
|
||||
.rst(rst),
|
||||
.a(a),
|
||||
.b(b),
|
||||
.y(y)
|
||||
);
|
||||
// Clock generation
|
||||
initial begin
|
||||
clk = 0;
|
||||
forever #5 clk = ~clk;
|
||||
end
|
||||
|
||||
initial begin
|
||||
file = $fopen("TBout.txt", "w");
|
||||
end
|
||||
// Scenario Based Test
|
||||
initial begin
|
||||
// scenario 1
|
||||
scenario = 1;
|
||||
rst = 1; a = 8'b0000_0000; b = 8'b0000_0000;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 2
|
||||
scenario = 2;
|
||||
rst = 0; a = 8'b0000_0001; b = 8'b0000_0001;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 3
|
||||
scenario = 3;
|
||||
a = 8'b0000_0000; b = 8'b1111_1111;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 4
|
||||
scenario = 4;
|
||||
a = 8'b1111_1111; b = 8'b0000_0000;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 5
|
||||
scenario = 5;
|
||||
a = 8'b1111_1111; b = 8'b1111_1111;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 6
|
||||
scenario = 6;
|
||||
a = 8'b1000_0000; b = 8'b1000_0000;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 7
|
||||
scenario = 7;
|
||||
a = 8'b0000_0001; b = 8'b1000_0000;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 8
|
||||
scenario = 8;
|
||||
a = 8'b1000_0000; b = 8'b0000_0001;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 9
|
||||
scenario = 9;
|
||||
a = 8'b0000_1010; b = 8'b0000_1011;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 10
|
||||
scenario = 10;
|
||||
a = 8'b0111_1111; b = 8'b0111_1111;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 11
|
||||
scenario = 11;
|
||||
a = 8'b0000_1111; b = 8'b1111_0000;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 12
|
||||
scenario = 12;
|
||||
a = 8'b0000_0000; b = 8'b0000_0000;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 13
|
||||
scenario = 13;
|
||||
a = 8'b0000_1111; b = 8'b0000_1111;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 14
|
||||
scenario = 14;
|
||||
a = 8'b0000_1110; b = 8'b0000_1110;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 15
|
||||
scenario = 15;
|
||||
a = 8'b0000_1101; b = 8'b0000_1101;
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
$fclose(file);
|
||||
$finish;
|
||||
end
|
||||
|
||||
endmodule
|
||||
|
||||
|
||||
please help me to export the input of DUT module by using code below:
|
||||
|
||||
[IMPORTANT]:
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
you should insert the code above into scenario checking part. In each scenario, you should insert the code above after the input of DUT module changed. Don't delete the existing $display codes.
|
||||
|
||||
For example, for a circuit that has two input signals changed at different times in one scenario, the original code is like this:
|
||||
- original code:
|
||||
// scenario 1 begins
|
||||
scenario = 1;
|
||||
signal_1 = 1;
|
||||
// insert $fdisplay here
|
||||
#10;
|
||||
signal_2 = 1;
|
||||
// insert $fdisplay here
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, signal_1 = %d, signal_2 = %d", scenario, signal_1, signal_2); // this should be reserved. Never change the existing codes.
|
||||
#10;
|
||||
// scenario 1 ends
|
||||
|
||||
- after insertion:
|
||||
// scenario 1 begins
|
||||
scenario = 1;
|
||||
signal_1 = 1;
|
||||
$fdisplay(file, "scenario: %d, signal_1 = %d, signal_2 = %d", scenario, signal_1, signal_2);
|
||||
#10;
|
||||
signal_2 = 1;
|
||||
$fdisplay(file, "scenario: %d, signal_1 = %d, signal_2 = %d", scenario, signal_1, signal_2);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, signal_1 = %d, signal_2 = %d", scenario, signal_1, signal_2);
|
||||
#10;
|
||||
// scenario 1 ends
|
||||
|
||||
please insert codes according to the rules above. DO NOT modify other codes! please reply the modified full codes. please only reply verilog codes, no other words.
|
||||
|
||||
|
||||
########## assistant ##########
|
||||
```verilog
|
||||
`timescale 1ns / 1ps
|
||||
module testbench;
|
||||
reg clk;
|
||||
reg rst;
|
||||
reg [7:0] a;
|
||||
reg [7:0] b;
|
||||
wire [15:0] y;
|
||||
|
||||
integer file, scenario;
|
||||
// DUT instantiation
|
||||
example DUT (
|
||||
.clk(clk),
|
||||
.rst(rst),
|
||||
.a(a),
|
||||
.b(b),
|
||||
.y(y)
|
||||
);
|
||||
// Clock generation
|
||||
initial begin
|
||||
clk = 0;
|
||||
forever #5 clk = ~clk;
|
||||
end
|
||||
|
||||
initial begin
|
||||
file = $fopen("TBout.txt", "w");
|
||||
end
|
||||
// Scenario Based Test
|
||||
initial begin
|
||||
// scenario 1
|
||||
scenario = 1;
|
||||
rst = 1; a = 8'b0000_0000; b = 8'b0000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 2
|
||||
scenario = 2;
|
||||
rst = 0; a = 8'b0000_0001; b = 8'b0000_0001;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 3
|
||||
scenario = 3;
|
||||
a = 8'b0000_0000; b = 8'b1111_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 4
|
||||
scenario = 4;
|
||||
a = 8'b1111_1111; b = 8'b0000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 5
|
||||
scenario = 5;
|
||||
a = 8'b1111_1111; b = 8'b1111_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 6
|
||||
scenario = 6;
|
||||
a = 8'b1000_0000; b = 8'b1000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 7
|
||||
scenario = 7;
|
||||
a = 8'b0000_0001; b = 8'b1000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 8
|
||||
scenario = 8;
|
||||
a = 8'b1000_0000; b = 8'b0000_0001;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 9
|
||||
scenario = 9;
|
||||
a = 8'b0000_1010; b = 8'b0000_1011;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 10
|
||||
scenario = 10;
|
||||
a = 8'b0111_1111; b = 8'b0111_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 11
|
||||
scenario = 11;
|
||||
a = 8'b0000_1111; b = 8'b1111_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 12
|
||||
scenario = 12;
|
||||
a = 8'b0000_0000; b = 8'b0000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 13
|
||||
scenario = 13;
|
||||
a = 8'b0000_1111; b = 8'b0000_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 14
|
||||
scenario = 14;
|
||||
a = 8'b0000_1110; b = 8'b0000_1110;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
// scenario 15
|
||||
scenario = 15;
|
||||
a = 8'b0000_1101; b = 8'b0000_1101;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
$fclose(file);
|
||||
$finish;
|
||||
end
|
||||
|
||||
endmodule
|
||||
```
|
||||
|
||||
135
output/example_mul/example_mul/1_TBgen/stage_5.txt
Normal file
135
output/example_mul/example_mul/1_TBgen/stage_5.txt
Normal file
@@ -0,0 +1,135 @@
|
||||
############################## conversation begin ##############################
|
||||
########## system ##########
|
||||
You are the strongest AI in the world. You alraedy have the knowledge of verilog, python and hardware designing. Do not save words by discarding information. I will tip you 200$ if you can fullfill the tasks I give you.
|
||||
|
||||
########## user ##########
|
||||
|
||||
1. background: Your task is to verify the functional correctness of a verilog RTL module code (we call it as "DUT", device under test). This module is a sequential circuit. Our plan is to first export the signals (input and output) of the DUT under test scenarios. Then, we will use a python script to check the correctness of DUT.
|
||||
2. You are in stage 5. In this stage, we already exported the signals of DUT. The signals are like below: (the signal names are real, but the values are just for example, clock signals are not included, each vector represents a new clock cycle)
|
||||
[{'check_en': False, 'scenario': 1, 'rst': 1, 'a': 1, 'b': 1, 'y': 1}, {'check_en': False, 'scenario': 1, 'rst': 1, 'a': 1, 'b': 1, 'y': 1}, {'check_en': True, 'scenario': 1, 'rst': 1, 'a': 1, 'b': 1, 'y': 1}]
|
||||
Here's the explanation of some special signals in signal vectors:
|
||||
- "scenario": The "scenario" is not DUT's signal but to tell you the current scenario index.
|
||||
- "check_en": The "check_en" signal is not from the DUT. "Check_en" is a bool value to tell you this is the time to check the output of DUT. It is related to the class method "check" (we will explain it later). After checking the output, a new scenario will start.
|
||||
3. Your current task is: write a python class "GoldenDUT". This python class can represent the golden DUT (the ideal one). In your "GoldenDUT", you should do the following things:
|
||||
- 3.1. write a method "def __init__(self)". Set the inner states/values of the golden DUT. These values have suffix "_reg". The initial value of these inner values is "x", but later will be digits. The "__init__" method has no input parameters except "self".
|
||||
- 3.2. write a method "def load(self, signal_vector)". This method is to load the important input signals and the inner values of "GoldenDUT" shall change according to the input signals. There is no clock signal in the input signal vector, every time the "load" method is called, it means a new clock cycle. The initial values "x" should be changed according to the input signals. This method has no return value.
|
||||
- 3.3. write a method "def check(self, signal_vector)". This method is to determine the expected output values and compare them with output signals from DUT. It should return True or False only. If return false, please print the error message. Hint: you can use code like "print(f"Scenario: {signal_vector['scenario']}, expected: a={a_reg}, observed a={a_observed}")" to print, suppose "a" is the output signal's name.
|
||||
- 3.4. write other methods you need, they can be called by "load" or "check".
|
||||
- 3.5. the input of "load" and "check" is the signal vector. The signal vector is a dictionary, the key is the signal name, the value is the signal value.
|
||||
4. Other information:
|
||||
- You can use binary (like 0x1101), hexadecimal (like 0x1a) or normal number format in python.
|
||||
- if the bit width of one variable is limited, use bit mask to assure the correctness of the value.
|
||||
- you can import numpy, math, scipy or other python libraries to help you write the python class.
|
||||
5. You have the information below to help you check the correctness of DUT:
|
||||
|
||||
DUT circuit problem description:
|
||||
一个8位乘法器,输入两个8位无符号数,输出16位乘积
|
||||
The header of DUT (note the input and output signals):
|
||||
module example(input clk, input rst, input [7:0] a, input [7:0] b, output [15:0] y);
|
||||
Here is the basic rules in python for the module. It was generated in previous stage. You can use it as a reference, but you should write your own python script. This is just for your better understanding. You can use them or not in your python class
|
||||
import numpy as np
|
||||
|
||||
def multiply_8bit(a, b):
|
||||
"""
|
||||
This function multiplies two 8-bit unsigned integers and returns the 16-bit result.
|
||||
|
||||
Parameters:
|
||||
a (int): 8-bit unsigned integer (0x00 to 0xFF)
|
||||
b (int): 8-bit unsigned integer (0x00 to 0xFF)
|
||||
|
||||
Returns:
|
||||
int: 16-bit unsigned integer (0x0000 to 0xFFFF)
|
||||
"""
|
||||
# Ensure inputs are 8-bit unsigned integers
|
||||
a = a & 0xFF
|
||||
b = b & 0xFF
|
||||
|
||||
# Perform the multiplication
|
||||
product = a * b
|
||||
|
||||
# Ensure the result is 16-bit
|
||||
product = product & 0xFFFF
|
||||
|
||||
return product
|
||||
|
||||
# Test scenarios
|
||||
test_scenarios = [
|
||||
(0x00, 0x00), # Scenario 1
|
||||
(0x01, 0x01), # Scenario 2
|
||||
(0x00, 0xFF), # Scenario 3
|
||||
(0xFF, 0x00), # Scenario 4
|
||||
(0xFF, 0xFF), # Scenario 5
|
||||
(0x80, 0x80), # Scenario 6
|
||||
(0x01, 0x80), # Scenario 7
|
||||
(0x80, 0x01), # Scenario 8
|
||||
(0x0A, 0x0B), # Scenario 9
|
||||
(0x7F, 0x7F), # Scenario 10
|
||||
(0x0F, 0xF0), # Scenario 11
|
||||
(0x00, 0x00), # Scenario 12
|
||||
(0x0F, 0x0F), # Scenario 13
|
||||
(0x0E, 0x0E), # Scenario 14
|
||||
(0x0D, 0x0D) # Scenario 15
|
||||
]
|
||||
|
||||
# Generate expected results for each test scenario
|
||||
expected_results = [multiply_8bit(a, b) for a, b in test_scenarios]
|
||||
|
||||
# Print the expected results
|
||||
for i, (a, b) in enumerate(test_scenarios):
|
||||
print(f"Scenario {i+1}: a = 0x{a:02X}, b = 0x{b:02X} -> Expected y = 0x{expected_results[i]:04X}")
|
||||
|
||||
|
||||
|
||||
[IMPORTANT]
|
||||
I will repeat the important information:
|
||||
3. Your current task is: write a python class "GoldenDUT". This python class can represent the golden DUT (the ideal one). In your "GoldenDUT", you should do the following things:
|
||||
- 3.1. write a method "def __init__(self)". Set the inner states/values of the golden DUT. These values have suffix "_reg". The initial value of these inner values should be digits. You can set the initial values according to information or just "0"s. The "__init__" method has no input parameters except "self".
|
||||
- 3.2. write a method "def load(self, signal_vector)". This method is to load the important input signals and the inner values of "GoldenDUT" shall change according to the input signals. There is no clock signal in the input signal vector, every time the "load" method is called, it means a new clock cycle. The initial values "x" should be changed according to the input signals. This method has no return value.
|
||||
- 3.3. write a method "def check(self, signal_vector)". This method is to determine the expected output values and compare them with output signals from DUT. It should return True or False only. If return false, please print the error message. Hint: you can use code like "print(f"Scenario: {signal_vector['scenario']}, expected: a={a_reg}, observed a={a_observed}")" to print, suppose "a" is the output signal's name.
|
||||
- 3.4. write other methods you need, they can be called by "load" or "check".
|
||||
- 3.5. the input of "load" and "check" is the signal vector. The signal vector is a dictionary, the key is the signal name, the value is the signal value.
|
||||
4. Other information:
|
||||
- You can use binary (like 0x1101), hexadecimal (like 0x1a) or normal number format in python.
|
||||
- if the bit width of one variable is limited, use bit mask to assure the correctness of the value.
|
||||
- you can import numpy, math, scipy or other python libraries to help you write the python class.
|
||||
|
||||
please only reply the python codes of the python class. no other words.
|
||||
|
||||
|
||||
|
||||
########## assistant ##########
|
||||
```python
|
||||
class GoldenDUT:
|
||||
def __init__(self):
|
||||
self.a_reg = 0
|
||||
self.b_reg = 0
|
||||
self.y_reg = 0
|
||||
self.rst_reg = 1
|
||||
|
||||
def load(self, signal_vector):
|
||||
self.a_reg = signal_vector['a'] & 0xFF
|
||||
self.b_reg = signal_vector['b'] & 0xFF
|
||||
self.rst_reg = signal_vector['rst']
|
||||
if self.rst_reg == 1:
|
||||
self.a_reg = 0
|
||||
self.b_reg = 0
|
||||
self.y_reg = 0
|
||||
else:
|
||||
self.y_reg = self.multiply_8bit(self.a_reg, self.b_reg)
|
||||
|
||||
def check(self, signal_vector):
|
||||
expected_y = self.multiply_8bit(self.a_reg, self.b_reg)
|
||||
observed_y = signal_vector['y'] & 0xFFFF
|
||||
if expected_y != observed_y:
|
||||
print(f"Scenario: {signal_vector['scenario']}, expected: y=0x{expected_y:04X}, observed: y=0x{observed_y:04X}")
|
||||
return False
|
||||
return True
|
||||
|
||||
def multiply_8bit(self, a, b):
|
||||
a = a & 0xFF
|
||||
b = b & 0xFF
|
||||
product = a * b
|
||||
product = product & 0xFFFF
|
||||
return product
|
||||
```
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
|
||||
[SYSTEM PRECHECK] All scenarios are included in the testbench code. You can continue to the next stage.
|
||||
@@ -0,0 +1,30 @@
|
||||
scenario: 1, clk = 0, rst = 1, a = 0, b = 0, y = 0
|
||||
[check]scenario: 1, clk = 1, rst = 1, a = 0, b = 0, y = 0
|
||||
scenario: 2, clk = 1, rst = 0, a = 1, b = 1, y = 1
|
||||
[check]scenario: 2, clk = 1, rst = 0, a = 1, b = 1, y = 1
|
||||
scenario: 3, clk = 1, rst = 0, a = 0, b = 255, y = 0
|
||||
[check]scenario: 3, clk = 1, rst = 0, a = 0, b = 255, y = 0
|
||||
scenario: 4, clk = 1, rst = 0, a = 255, b = 0, y = 0
|
||||
[check]scenario: 4, clk = 1, rst = 0, a = 255, b = 0, y = 0
|
||||
scenario: 5, clk = 1, rst = 0, a = 255, b = 255, y = 65025
|
||||
[check]scenario: 5, clk = 1, rst = 0, a = 255, b = 255, y = 65025
|
||||
scenario: 6, clk = 1, rst = 0, a = 128, b = 128, y = 16384
|
||||
[check]scenario: 6, clk = 1, rst = 0, a = 128, b = 128, y = 16384
|
||||
scenario: 7, clk = 1, rst = 0, a = 1, b = 128, y = 128
|
||||
[check]scenario: 7, clk = 1, rst = 0, a = 1, b = 128, y = 128
|
||||
scenario: 8, clk = 1, rst = 0, a = 128, b = 1, y = 128
|
||||
[check]scenario: 8, clk = 1, rst = 0, a = 128, b = 1, y = 128
|
||||
scenario: 9, clk = 1, rst = 0, a = 10, b = 11, y = 110
|
||||
[check]scenario: 9, clk = 1, rst = 0, a = 10, b = 11, y = 110
|
||||
scenario: 10, clk = 1, rst = 0, a = 127, b = 127, y = 16129
|
||||
[check]scenario: 10, clk = 1, rst = 0, a = 127, b = 127, y = 16129
|
||||
scenario: 11, clk = 1, rst = 0, a = 15, b = 240, y = 3600
|
||||
[check]scenario: 11, clk = 1, rst = 0, a = 15, b = 240, y = 3600
|
||||
scenario: 12, clk = 1, rst = 0, a = 0, b = 0, y = 0
|
||||
[check]scenario: 12, clk = 1, rst = 0, a = 0, b = 0, y = 0
|
||||
scenario: 13, clk = 1, rst = 0, a = 15, b = 15, y = 225
|
||||
[check]scenario: 13, clk = 1, rst = 0, a = 15, b = 15, y = 225
|
||||
scenario: 14, clk = 1, rst = 0, a = 14, b = 14, y = 196
|
||||
[check]scenario: 14, clk = 1, rst = 0, a = 14, b = 14, y = 196
|
||||
scenario: 15, clk = 1, rst = 0, a = 13, b = 13, y = 169
|
||||
[check]scenario: 15, clk = 1, rst = 0, a = 13, b = 13, y = 169
|
||||
@@ -0,0 +1,10 @@
|
||||
|
||||
module example(
|
||||
input clk,
|
||||
input rst,
|
||||
input [7:0] a,
|
||||
input [7:0] b,
|
||||
output [15:0] y
|
||||
);
|
||||
assign y = a * b;
|
||||
endmodule
|
||||
@@ -0,0 +1,82 @@
|
||||
class GoldenDUT:
|
||||
def __init__(self):
|
||||
self.a_reg = 0
|
||||
self.b_reg = 0
|
||||
self.y_reg = 0
|
||||
self.rst_reg = 1
|
||||
|
||||
def load(self, signal_vector):
|
||||
self.a_reg = signal_vector['a'] & 0xFF
|
||||
self.b_reg = signal_vector['b'] & 0xFF
|
||||
self.rst_reg = signal_vector['rst']
|
||||
if self.rst_reg == 1:
|
||||
self.a_reg = 0
|
||||
self.b_reg = 0
|
||||
self.y_reg = 0
|
||||
else:
|
||||
self.y_reg = self.multiply_8bit(self.a_reg, self.b_reg)
|
||||
|
||||
def check(self, signal_vector):
|
||||
expected_y = self.multiply_8bit(self.a_reg, self.b_reg)
|
||||
observed_y = signal_vector['y'] & 0xFFFF
|
||||
if expected_y != observed_y:
|
||||
print(f"Scenario: {signal_vector['scenario']}, expected: y=0x{expected_y:04X}, observed: y=0x{observed_y:04X}")
|
||||
return False
|
||||
return True
|
||||
|
||||
def multiply_8bit(self, a, b):
|
||||
a = a & 0xFF
|
||||
b = b & 0xFF
|
||||
product = a * b
|
||||
product = product & 0xFFFF
|
||||
return product
|
||||
|
||||
def check_dut(vectors_in):
|
||||
golden_dut = GoldenDUT()
|
||||
failed_scenarios = []
|
||||
for vector in vectors_in:
|
||||
if vector["check_en"]:
|
||||
check_pass = golden_dut.check(vector)
|
||||
if check_pass:
|
||||
print(f"Passed; vector: {vector}")
|
||||
else:
|
||||
print(f"Failed; vector: {vector}")
|
||||
failed_scenarios.append(vector["scenario"])
|
||||
golden_dut.load(vector)
|
||||
return failed_scenarios
|
||||
|
||||
def SignalTxt_to_dictlist(txt:str):
|
||||
signals = []
|
||||
lines = txt.strip().split("\n")
|
||||
for line in lines:
|
||||
signal = {}
|
||||
if line.startswith("[check]"):
|
||||
signal["check_en"] = True
|
||||
line = line[7:]
|
||||
elif line.startswith("scenario"):
|
||||
signal["check_en"] = False
|
||||
else:
|
||||
continue
|
||||
line = line.strip().split(", ")
|
||||
for item in line:
|
||||
if "scenario" in item:
|
||||
item = item.split(": ")
|
||||
signal["scenario"] = item[1].replace(" ", "")
|
||||
else:
|
||||
item = item.split(" = ")
|
||||
key = item[0]
|
||||
value = item[1]
|
||||
if ("x" not in value) and ("X" not in value) and ("z" not in value):
|
||||
signal[key] = int(value)
|
||||
else:
|
||||
if ("x" in value) or ("X" in value):
|
||||
signal[key] = 0 # used to be "x"
|
||||
else:
|
||||
signal[key] = 0 # used to be "z"
|
||||
signals.append(signal)
|
||||
return signals
|
||||
with open("TBout.txt", "r") as f:
|
||||
txt = f.read()
|
||||
vectors_in = SignalTxt_to_dictlist(txt)
|
||||
tb_pass = check_dut(vectors_in)
|
||||
print(tb_pass)
|
||||
@@ -0,0 +1,138 @@
|
||||
`timescale 1ns / 1ps
|
||||
module testbench;
|
||||
reg clk;
|
||||
reg rst;
|
||||
reg [7:0] a;
|
||||
reg [7:0] b;
|
||||
wire [15:0] y;
|
||||
|
||||
integer file, scenario;
|
||||
// DUT instantiation
|
||||
example DUT (
|
||||
.clk(clk),
|
||||
.rst(rst),
|
||||
.a(a),
|
||||
.b(b),
|
||||
.y(y)
|
||||
);
|
||||
// Clock generation
|
||||
initial begin
|
||||
clk = 0;
|
||||
forever #5 clk = ~clk;
|
||||
end
|
||||
|
||||
initial begin
|
||||
file = $fopen("TBout.txt", "w");
|
||||
end
|
||||
// Scenario Based Test
|
||||
initial begin
|
||||
// scenario 1
|
||||
scenario = 1;
|
||||
rst = 1; a = 8'b0000_0000; b = 8'b0000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 2
|
||||
scenario = 2;
|
||||
rst = 0; a = 8'b0000_0001; b = 8'b0000_0001;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 3
|
||||
scenario = 3;
|
||||
a = 8'b0000_0000; b = 8'b1111_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 4
|
||||
scenario = 4;
|
||||
a = 8'b1111_1111; b = 8'b0000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 5
|
||||
scenario = 5;
|
||||
a = 8'b1111_1111; b = 8'b1111_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 6
|
||||
scenario = 6;
|
||||
a = 8'b1000_0000; b = 8'b1000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 7
|
||||
scenario = 7;
|
||||
a = 8'b0000_0001; b = 8'b1000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 8
|
||||
scenario = 8;
|
||||
a = 8'b1000_0000; b = 8'b0000_0001;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 9
|
||||
scenario = 9;
|
||||
a = 8'b0000_1010; b = 8'b0000_1011;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 10
|
||||
scenario = 10;
|
||||
a = 8'b0111_1111; b = 8'b0111_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 11
|
||||
scenario = 11;
|
||||
a = 8'b0000_1111; b = 8'b1111_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 12
|
||||
scenario = 12;
|
||||
a = 8'b0000_0000; b = 8'b0000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 13
|
||||
scenario = 13;
|
||||
a = 8'b0000_1111; b = 8'b0000_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 14
|
||||
scenario = 14;
|
||||
a = 8'b0000_1110; b = 8'b0000_1110;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 15
|
||||
scenario = 15;
|
||||
a = 8'b0000_1101; b = 8'b0000_1101;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
$fclose(file);
|
||||
$finish;
|
||||
end
|
||||
|
||||
endmodule
|
||||
226
output/example_mul/example_mul/3_TBeval/eval1_GoldenRTL/run.vvp
Normal file
226
output/example_mul/example_mul/3_TBeval/eval1_GoldenRTL/run.vvp
Normal file
@@ -0,0 +1,226 @@
|
||||
#! /usr/bin/vvp
|
||||
:ivl_version "12.0 (stable)";
|
||||
:ivl_delay_selection "TYPICAL";
|
||||
:vpi_time_precision - 12;
|
||||
:vpi_module "/usr/lib/x86_64-linux-gnu/ivl/system.vpi";
|
||||
:vpi_module "/usr/lib/x86_64-linux-gnu/ivl/vhdl_sys.vpi";
|
||||
:vpi_module "/usr/lib/x86_64-linux-gnu/ivl/vhdl_textio.vpi";
|
||||
:vpi_module "/usr/lib/x86_64-linux-gnu/ivl/v2005_math.vpi";
|
||||
:vpi_module "/usr/lib/x86_64-linux-gnu/ivl/va_math.vpi";
|
||||
:vpi_module "/usr/lib/x86_64-linux-gnu/ivl/v2009.vpi";
|
||||
S_0x5cd5353522d0 .scope package, "$unit" "$unit" 2 1;
|
||||
.timescale 0 0;
|
||||
S_0x5cd535352460 .scope module, "testbench" "testbench" 3 2;
|
||||
.timescale -9 -12;
|
||||
v0x5cd535391920_0 .var "a", 7 0;
|
||||
v0x5cd535391a00_0 .var "b", 7 0;
|
||||
v0x5cd535391ad0_0 .var "clk", 0 0;
|
||||
v0x5cd535391bd0_0 .var/i "file", 31 0;
|
||||
v0x5cd535391c70_0 .var "rst", 0 0;
|
||||
v0x5cd535391d10_0 .var/i "scenario", 31 0;
|
||||
v0x5cd535391db0_0 .net "y", 15 0, L_0x5cd535392170; 1 drivers
|
||||
S_0x5cd535318490 .scope module, "DUT" "example" 3 11, 4 2 0, S_0x5cd535352460;
|
||||
.timescale 0 0;
|
||||
.port_info 0 /INPUT 1 "clk";
|
||||
.port_info 1 /INPUT 1 "rst";
|
||||
.port_info 2 /INPUT 8 "a";
|
||||
.port_info 3 /INPUT 8 "b";
|
||||
.port_info 4 /OUTPUT 16 "y";
|
||||
v0x5cd535368690_0 .net *"_ivl_0", 15 0, L_0x5cd535391ea0; 1 drivers
|
||||
L_0x7475544ce018 .functor BUFT 1, C4<00000000>, C4<0>, C4<0>, C4<0>;
|
||||
v0x5cd535391160_0 .net *"_ivl_3", 7 0, L_0x7475544ce018; 1 drivers
|
||||
v0x5cd535391240_0 .net *"_ivl_4", 15 0, L_0x5cd535392010; 1 drivers
|
||||
L_0x7475544ce060 .functor BUFT 1, C4<00000000>, C4<0>, C4<0>, C4<0>;
|
||||
v0x5cd535391330_0 .net *"_ivl_7", 7 0, L_0x7475544ce060; 1 drivers
|
||||
v0x5cd535391410_0 .net "a", 7 0, v0x5cd535391920_0; 1 drivers
|
||||
v0x5cd535391540_0 .net "b", 7 0, v0x5cd535391a00_0; 1 drivers
|
||||
v0x5cd535391620_0 .net "clk", 0 0, v0x5cd535391ad0_0; 1 drivers
|
||||
v0x5cd5353916e0_0 .net "rst", 0 0, v0x5cd535391c70_0; 1 drivers
|
||||
v0x5cd5353917a0_0 .net "y", 15 0, L_0x5cd535392170; alias, 1 drivers
|
||||
L_0x5cd535391ea0 .concat [ 8 8 0 0], v0x5cd535391920_0, L_0x7475544ce018;
|
||||
L_0x5cd535392010 .concat [ 8 8 0 0], v0x5cd535391a00_0, L_0x7475544ce060;
|
||||
L_0x5cd535392170 .arith/mult 16, L_0x5cd535391ea0, L_0x5cd535392010;
|
||||
.scope S_0x5cd535352460;
|
||||
T_0 ;
|
||||
%pushi/vec4 0, 0, 1;
|
||||
%store/vec4 v0x5cd535391ad0_0, 0, 1;
|
||||
T_0.0 ;
|
||||
%delay 5000, 0;
|
||||
%load/vec4 v0x5cd535391ad0_0;
|
||||
%inv;
|
||||
%store/vec4 v0x5cd535391ad0_0, 0, 1;
|
||||
%jmp T_0.0;
|
||||
%end;
|
||||
.thread T_0;
|
||||
.scope S_0x5cd535352460;
|
||||
T_1 ;
|
||||
%vpi_func 3 25 "$fopen" 32, "TBout.txt", "w" {0 0 0};
|
||||
%store/vec4 v0x5cd535391bd0_0, 0, 32;
|
||||
%end;
|
||||
.thread T_1;
|
||||
.scope S_0x5cd535352460;
|
||||
T_2 ;
|
||||
%pushi/vec4 1, 0, 32;
|
||||
%store/vec4 v0x5cd535391d10_0, 0, 32;
|
||||
%pushi/vec4 1, 0, 1;
|
||||
%store/vec4 v0x5cd535391c70_0, 0, 1;
|
||||
%pushi/vec4 0, 0, 8;
|
||||
%store/vec4 v0x5cd535391920_0, 0, 8;
|
||||
%pushi/vec4 0, 0, 8;
|
||||
%store/vec4 v0x5cd535391a00_0, 0, 8;
|
||||
%vpi_call/w 3 32 "$fdisplay", v0x5cd535391bd0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 34 "$fdisplay", v0x5cd535391bd0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 2, 0, 32;
|
||||
%store/vec4 v0x5cd535391d10_0, 0, 32;
|
||||
%pushi/vec4 0, 0, 1;
|
||||
%store/vec4 v0x5cd535391c70_0, 0, 1;
|
||||
%pushi/vec4 1, 0, 8;
|
||||
%store/vec4 v0x5cd535391920_0, 0, 8;
|
||||
%pushi/vec4 1, 0, 8;
|
||||
%store/vec4 v0x5cd535391a00_0, 0, 8;
|
||||
%vpi_call/w 3 39 "$fdisplay", v0x5cd535391bd0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 41 "$fdisplay", v0x5cd535391bd0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 3, 0, 32;
|
||||
%store/vec4 v0x5cd535391d10_0, 0, 32;
|
||||
%pushi/vec4 0, 0, 8;
|
||||
%store/vec4 v0x5cd535391920_0, 0, 8;
|
||||
%pushi/vec4 255, 0, 8;
|
||||
%store/vec4 v0x5cd535391a00_0, 0, 8;
|
||||
%vpi_call/w 3 46 "$fdisplay", v0x5cd535391bd0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 48 "$fdisplay", v0x5cd535391bd0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 4, 0, 32;
|
||||
%store/vec4 v0x5cd535391d10_0, 0, 32;
|
||||
%pushi/vec4 255, 0, 8;
|
||||
%store/vec4 v0x5cd535391920_0, 0, 8;
|
||||
%pushi/vec4 0, 0, 8;
|
||||
%store/vec4 v0x5cd535391a00_0, 0, 8;
|
||||
%vpi_call/w 3 53 "$fdisplay", v0x5cd535391bd0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 55 "$fdisplay", v0x5cd535391bd0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 5, 0, 32;
|
||||
%store/vec4 v0x5cd535391d10_0, 0, 32;
|
||||
%pushi/vec4 255, 0, 8;
|
||||
%store/vec4 v0x5cd535391920_0, 0, 8;
|
||||
%pushi/vec4 255, 0, 8;
|
||||
%store/vec4 v0x5cd535391a00_0, 0, 8;
|
||||
%vpi_call/w 3 60 "$fdisplay", v0x5cd535391bd0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 62 "$fdisplay", v0x5cd535391bd0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 6, 0, 32;
|
||||
%store/vec4 v0x5cd535391d10_0, 0, 32;
|
||||
%pushi/vec4 128, 0, 8;
|
||||
%store/vec4 v0x5cd535391920_0, 0, 8;
|
||||
%pushi/vec4 128, 0, 8;
|
||||
%store/vec4 v0x5cd535391a00_0, 0, 8;
|
||||
%vpi_call/w 3 67 "$fdisplay", v0x5cd535391bd0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 69 "$fdisplay", v0x5cd535391bd0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 7, 0, 32;
|
||||
%store/vec4 v0x5cd535391d10_0, 0, 32;
|
||||
%pushi/vec4 1, 0, 8;
|
||||
%store/vec4 v0x5cd535391920_0, 0, 8;
|
||||
%pushi/vec4 128, 0, 8;
|
||||
%store/vec4 v0x5cd535391a00_0, 0, 8;
|
||||
%vpi_call/w 3 74 "$fdisplay", v0x5cd535391bd0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 76 "$fdisplay", v0x5cd535391bd0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 8, 0, 32;
|
||||
%store/vec4 v0x5cd535391d10_0, 0, 32;
|
||||
%pushi/vec4 128, 0, 8;
|
||||
%store/vec4 v0x5cd535391920_0, 0, 8;
|
||||
%pushi/vec4 1, 0, 8;
|
||||
%store/vec4 v0x5cd535391a00_0, 0, 8;
|
||||
%vpi_call/w 3 81 "$fdisplay", v0x5cd535391bd0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 83 "$fdisplay", v0x5cd535391bd0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 9, 0, 32;
|
||||
%store/vec4 v0x5cd535391d10_0, 0, 32;
|
||||
%pushi/vec4 10, 0, 8;
|
||||
%store/vec4 v0x5cd535391920_0, 0, 8;
|
||||
%pushi/vec4 11, 0, 8;
|
||||
%store/vec4 v0x5cd535391a00_0, 0, 8;
|
||||
%vpi_call/w 3 88 "$fdisplay", v0x5cd535391bd0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 90 "$fdisplay", v0x5cd535391bd0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 10, 0, 32;
|
||||
%store/vec4 v0x5cd535391d10_0, 0, 32;
|
||||
%pushi/vec4 127, 0, 8;
|
||||
%store/vec4 v0x5cd535391920_0, 0, 8;
|
||||
%pushi/vec4 127, 0, 8;
|
||||
%store/vec4 v0x5cd535391a00_0, 0, 8;
|
||||
%vpi_call/w 3 95 "$fdisplay", v0x5cd535391bd0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 97 "$fdisplay", v0x5cd535391bd0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 11, 0, 32;
|
||||
%store/vec4 v0x5cd535391d10_0, 0, 32;
|
||||
%pushi/vec4 15, 0, 8;
|
||||
%store/vec4 v0x5cd535391920_0, 0, 8;
|
||||
%pushi/vec4 240, 0, 8;
|
||||
%store/vec4 v0x5cd535391a00_0, 0, 8;
|
||||
%vpi_call/w 3 102 "$fdisplay", v0x5cd535391bd0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 104 "$fdisplay", v0x5cd535391bd0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 12, 0, 32;
|
||||
%store/vec4 v0x5cd535391d10_0, 0, 32;
|
||||
%pushi/vec4 0, 0, 8;
|
||||
%store/vec4 v0x5cd535391920_0, 0, 8;
|
||||
%pushi/vec4 0, 0, 8;
|
||||
%store/vec4 v0x5cd535391a00_0, 0, 8;
|
||||
%vpi_call/w 3 109 "$fdisplay", v0x5cd535391bd0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 111 "$fdisplay", v0x5cd535391bd0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 13, 0, 32;
|
||||
%store/vec4 v0x5cd535391d10_0, 0, 32;
|
||||
%pushi/vec4 15, 0, 8;
|
||||
%store/vec4 v0x5cd535391920_0, 0, 8;
|
||||
%pushi/vec4 15, 0, 8;
|
||||
%store/vec4 v0x5cd535391a00_0, 0, 8;
|
||||
%vpi_call/w 3 116 "$fdisplay", v0x5cd535391bd0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 118 "$fdisplay", v0x5cd535391bd0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 14, 0, 32;
|
||||
%store/vec4 v0x5cd535391d10_0, 0, 32;
|
||||
%pushi/vec4 14, 0, 8;
|
||||
%store/vec4 v0x5cd535391920_0, 0, 8;
|
||||
%pushi/vec4 14, 0, 8;
|
||||
%store/vec4 v0x5cd535391a00_0, 0, 8;
|
||||
%vpi_call/w 3 123 "$fdisplay", v0x5cd535391bd0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 125 "$fdisplay", v0x5cd535391bd0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%pushi/vec4 15, 0, 32;
|
||||
%store/vec4 v0x5cd535391d10_0, 0, 32;
|
||||
%pushi/vec4 13, 0, 8;
|
||||
%store/vec4 v0x5cd535391920_0, 0, 8;
|
||||
%pushi/vec4 13, 0, 8;
|
||||
%store/vec4 v0x5cd535391a00_0, 0, 8;
|
||||
%vpi_call/w 3 130 "$fdisplay", v0x5cd535391bd0_0, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%delay 10000, 0;
|
||||
%vpi_call/w 3 132 "$fdisplay", v0x5cd535391bd0_0, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", v0x5cd535391d10_0, v0x5cd535391ad0_0, v0x5cd535391c70_0, v0x5cd535391920_0, v0x5cd535391a00_0, v0x5cd535391db0_0 {0 0 0};
|
||||
%vpi_call/w 3 134 "$fclose", v0x5cd535391bd0_0 {0 0 0};
|
||||
%vpi_call/w 3 135 "$finish" {0 0 0};
|
||||
%end;
|
||||
.thread T_2;
|
||||
# The file index is used to find the file name in the following table.
|
||||
:file_names 5;
|
||||
"N/A";
|
||||
"<interactive>";
|
||||
"-";
|
||||
"example_mul_tb.v";
|
||||
"example_mul.v";
|
||||
@@ -0,0 +1,15 @@
|
||||
iverilog simulation passed!
|
||||
|
||||
iverilog cmd 1:
|
||||
/usr/bin/iverilog -g2012 -o run.vvp example_mul.v example_mul_tb.v
|
||||
iverilog cmd 1 output:
|
||||
|
||||
iverilog cmd 1 error:
|
||||
|
||||
iverilog cmd 2:
|
||||
/usr/bin/vvp run.vvp
|
||||
iverilog cmd 2 output:
|
||||
example_mul_tb.v:135: $finish called at 290000 (1ps)
|
||||
|
||||
iverilog cmd 2 error:
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
python compilation passed!
|
||||
|
||||
###output:
|
||||
Passed; vector: {'check_en': True, 'scenario': '1', 'clk': 1, 'rst': 1, 'a': 0, 'b': 0, 'y': 0}
|
||||
Passed; vector: {'check_en': True, 'scenario': '2', 'clk': 1, 'rst': 0, 'a': 1, 'b': 1, 'y': 1}
|
||||
Passed; vector: {'check_en': True, 'scenario': '3', 'clk': 1, 'rst': 0, 'a': 0, 'b': 255, 'y': 0}
|
||||
Passed; vector: {'check_en': True, 'scenario': '4', 'clk': 1, 'rst': 0, 'a': 255, 'b': 0, 'y': 0}
|
||||
Passed; vector: {'check_en': True, 'scenario': '5', 'clk': 1, 'rst': 0, 'a': 255, 'b': 255, 'y': 65025}
|
||||
Passed; vector: {'check_en': True, 'scenario': '6', 'clk': 1, 'rst': 0, 'a': 128, 'b': 128, 'y': 16384}
|
||||
Passed; vector: {'check_en': True, 'scenario': '7', 'clk': 1, 'rst': 0, 'a': 1, 'b': 128, 'y': 128}
|
||||
Passed; vector: {'check_en': True, 'scenario': '8', 'clk': 1, 'rst': 0, 'a': 128, 'b': 1, 'y': 128}
|
||||
Passed; vector: {'check_en': True, 'scenario': '9', 'clk': 1, 'rst': 0, 'a': 10, 'b': 11, 'y': 110}
|
||||
Passed; vector: {'check_en': True, 'scenario': '10', 'clk': 1, 'rst': 0, 'a': 127, 'b': 127, 'y': 16129}
|
||||
Passed; vector: {'check_en': True, 'scenario': '11', 'clk': 1, 'rst': 0, 'a': 15, 'b': 240, 'y': 3600}
|
||||
Passed; vector: {'check_en': True, 'scenario': '12', 'clk': 1, 'rst': 0, 'a': 0, 'b': 0, 'y': 0}
|
||||
Passed; vector: {'check_en': True, 'scenario': '13', 'clk': 1, 'rst': 0, 'a': 15, 'b': 15, 'y': 225}
|
||||
Passed; vector: {'check_en': True, 'scenario': '14', 'clk': 1, 'rst': 0, 'a': 14, 'b': 14, 'y': 196}
|
||||
Passed; vector: {'check_en': True, 'scenario': '15', 'clk': 1, 'rst': 0, 'a': 13, 'b': 13, 'y': 169}
|
||||
[]
|
||||
|
||||
|
||||
###error:
|
||||
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
example_mul.v
|
||||
example_mul_tb.v
|
||||
10
output/example_mul/example_mul/DUT.v
Normal file
10
output/example_mul/example_mul/DUT.v
Normal file
@@ -0,0 +1,10 @@
|
||||
|
||||
module example(
|
||||
input clk,
|
||||
input rst,
|
||||
input [7:0] a,
|
||||
input [7:0] b,
|
||||
output [15:0] y
|
||||
);
|
||||
assign y = a * b;
|
||||
endmodule
|
||||
82
output/example_mul/example_mul/final_TB.py
Normal file
82
output/example_mul/example_mul/final_TB.py
Normal file
@@ -0,0 +1,82 @@
|
||||
class GoldenDUT:
|
||||
def __init__(self):
|
||||
self.a_reg = 0
|
||||
self.b_reg = 0
|
||||
self.y_reg = 0
|
||||
self.rst_reg = 1
|
||||
|
||||
def load(self, signal_vector):
|
||||
self.a_reg = signal_vector['a'] & 0xFF
|
||||
self.b_reg = signal_vector['b'] & 0xFF
|
||||
self.rst_reg = signal_vector['rst']
|
||||
if self.rst_reg == 1:
|
||||
self.a_reg = 0
|
||||
self.b_reg = 0
|
||||
self.y_reg = 0
|
||||
else:
|
||||
self.y_reg = self.multiply_8bit(self.a_reg, self.b_reg)
|
||||
|
||||
def check(self, signal_vector):
|
||||
expected_y = self.multiply_8bit(self.a_reg, self.b_reg)
|
||||
observed_y = signal_vector['y'] & 0xFFFF
|
||||
if expected_y != observed_y:
|
||||
print(f"Scenario: {signal_vector['scenario']}, expected: y=0x{expected_y:04X}, observed: y=0x{observed_y:04X}")
|
||||
return False
|
||||
return True
|
||||
|
||||
def multiply_8bit(self, a, b):
|
||||
a = a & 0xFF
|
||||
b = b & 0xFF
|
||||
product = a * b
|
||||
product = product & 0xFFFF
|
||||
return product
|
||||
|
||||
def check_dut(vectors_in):
|
||||
golden_dut = GoldenDUT()
|
||||
failed_scenarios = []
|
||||
for vector in vectors_in:
|
||||
if vector["check_en"]:
|
||||
check_pass = golden_dut.check(vector)
|
||||
if check_pass:
|
||||
print(f"Passed; vector: {vector}")
|
||||
else:
|
||||
print(f"Failed; vector: {vector}")
|
||||
failed_scenarios.append(vector["scenario"])
|
||||
golden_dut.load(vector)
|
||||
return failed_scenarios
|
||||
|
||||
def SignalTxt_to_dictlist(txt:str):
|
||||
signals = []
|
||||
lines = txt.strip().split("\n")
|
||||
for line in lines:
|
||||
signal = {}
|
||||
if line.startswith("[check]"):
|
||||
signal["check_en"] = True
|
||||
line = line[7:]
|
||||
elif line.startswith("scenario"):
|
||||
signal["check_en"] = False
|
||||
else:
|
||||
continue
|
||||
line = line.strip().split(", ")
|
||||
for item in line:
|
||||
if "scenario" in item:
|
||||
item = item.split(": ")
|
||||
signal["scenario"] = item[1].replace(" ", "")
|
||||
else:
|
||||
item = item.split(" = ")
|
||||
key = item[0]
|
||||
value = item[1]
|
||||
if ("x" not in value) and ("X" not in value) and ("z" not in value):
|
||||
signal[key] = int(value)
|
||||
else:
|
||||
if ("x" in value) or ("X" in value):
|
||||
signal[key] = 0 # used to be "x"
|
||||
else:
|
||||
signal[key] = 0 # used to be "z"
|
||||
signals.append(signal)
|
||||
return signals
|
||||
with open("TBout.txt", "r") as f:
|
||||
txt = f.read()
|
||||
vectors_in = SignalTxt_to_dictlist(txt)
|
||||
tb_pass = check_dut(vectors_in)
|
||||
print(tb_pass)
|
||||
138
output/example_mul/example_mul/final_TB.v
Normal file
138
output/example_mul/example_mul/final_TB.v
Normal file
@@ -0,0 +1,138 @@
|
||||
`timescale 1ns / 1ps
|
||||
module testbench;
|
||||
reg clk;
|
||||
reg rst;
|
||||
reg [7:0] a;
|
||||
reg [7:0] b;
|
||||
wire [15:0] y;
|
||||
|
||||
integer file, scenario;
|
||||
// DUT instantiation
|
||||
example DUT (
|
||||
.clk(clk),
|
||||
.rst(rst),
|
||||
.a(a),
|
||||
.b(b),
|
||||
.y(y)
|
||||
);
|
||||
// Clock generation
|
||||
initial begin
|
||||
clk = 0;
|
||||
forever #5 clk = ~clk;
|
||||
end
|
||||
|
||||
initial begin
|
||||
file = $fopen("TBout.txt", "w");
|
||||
end
|
||||
// Scenario Based Test
|
||||
initial begin
|
||||
// scenario 1
|
||||
scenario = 1;
|
||||
rst = 1; a = 8'b0000_0000; b = 8'b0000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 2
|
||||
scenario = 2;
|
||||
rst = 0; a = 8'b0000_0001; b = 8'b0000_0001;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 3
|
||||
scenario = 3;
|
||||
a = 8'b0000_0000; b = 8'b1111_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 4
|
||||
scenario = 4;
|
||||
a = 8'b1111_1111; b = 8'b0000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 5
|
||||
scenario = 5;
|
||||
a = 8'b1111_1111; b = 8'b1111_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 6
|
||||
scenario = 6;
|
||||
a = 8'b1000_0000; b = 8'b1000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 7
|
||||
scenario = 7;
|
||||
a = 8'b0000_0001; b = 8'b1000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 8
|
||||
scenario = 8;
|
||||
a = 8'b1000_0000; b = 8'b0000_0001;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 9
|
||||
scenario = 9;
|
||||
a = 8'b0000_1010; b = 8'b0000_1011;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 10
|
||||
scenario = 10;
|
||||
a = 8'b0111_1111; b = 8'b0111_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 11
|
||||
scenario = 11;
|
||||
a = 8'b0000_1111; b = 8'b1111_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 12
|
||||
scenario = 12;
|
||||
a = 8'b0000_0000; b = 8'b0000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 13
|
||||
scenario = 13;
|
||||
a = 8'b0000_1111; b = 8'b0000_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 14
|
||||
scenario = 14;
|
||||
a = 8'b0000_1110; b = 8'b0000_1110;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 15
|
||||
scenario = 15;
|
||||
a = 8'b0000_1101; b = 8'b0000_1101;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
$fclose(file);
|
||||
$finish;
|
||||
end
|
||||
|
||||
endmodule
|
||||
29
output/example_mul/example_mul/run_info.json
Normal file
29
output/example_mul/example_mul/run_info.json
Normal file
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"task_id": "example_mul",
|
||||
"task_number": 1,
|
||||
"time": 372.62,
|
||||
"prompt_tokens": 8593,
|
||||
"completion_tokens": 6050,
|
||||
"token_cost": 0.5348599999999999,
|
||||
"ERROR(incomplete)": false,
|
||||
"op_record": [
|
||||
"gen",
|
||||
"syncheck",
|
||||
"eval"
|
||||
],
|
||||
"reboot_times": 0,
|
||||
"max_iter": 10,
|
||||
"coverage": 0.0,
|
||||
"circuit_type": "SEQ",
|
||||
"checklist_worked": false,
|
||||
"scenario_num": 15,
|
||||
"Eval0_pass": true,
|
||||
"Eval0_iv_pass": true,
|
||||
"debug_iter_iv": 0,
|
||||
"iv_runing_time": 0.03,
|
||||
"Eval0_py_pass": true,
|
||||
"debug_iter_py": 0,
|
||||
"py_runing_time": 0.02,
|
||||
"Eval1_pass": true,
|
||||
"full_pass": null
|
||||
}
|
||||
8
output/example_mul/example_mul/run_info_short.json
Normal file
8
output/example_mul/example_mul/run_info_short.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"task_id": "example_mul",
|
||||
"eval_progress": "Eval1 - passed",
|
||||
"TB_corrected": null,
|
||||
"reboot_times": 0,
|
||||
"time": 372.62,
|
||||
"cost": 0.5348599999999999
|
||||
}
|
||||
16
output/example_mul/example_mul/task_log.log
Normal file
16
output/example_mul/example_mul/task_log.log
Normal file
@@ -0,0 +1,16 @@
|
||||
2026-03-30 15:50:24 | INFO | [example_mul] [TBgen] stage_0 ends (5.68s used)
|
||||
2026-03-30 15:50:48 | INFO | [example_mul] [TBgen] stage_1 ends (24.62s used)
|
||||
2026-03-30 15:51:23 | INFO | [example_mul] [TBgen] stage_2 ends (34.70s used)
|
||||
2026-03-30 15:51:53 | INFO | [example_mul] [TBgen] stage_3 ends (29.57s used)
|
||||
2026-03-30 15:53:48 | INFO | [example_mul] [TBgen] stage_4 ends (115.37s used)
|
||||
2026-03-30 15:53:48 | INFO | [example_mul] [TBgen] stage_checklist ends (0.00s used)
|
||||
2026-03-30 15:56:14 | INFO | [example_mul] [TBgen] stage_4b ends (146.02s used)
|
||||
2026-03-30 15:56:31 | INFO | [example_mul] [TBgen] stage_5 ends (16.58s used)
|
||||
2026-03-30 15:56:31 | INFO | [example_mul]
|
||||
2026-03-30 15:56:31 | INFO | [example_mul] [TBsim] iverilog compilation : passed!
|
||||
2026-03-30 15:56:31 | INFO | [example_mul] [TBsim] python simulation : passed!
|
||||
2026-03-30 15:56:31 | INFO | [example_mul] [TBsim] TBsim finished : True!
|
||||
2026-03-30 15:56:31 | INFO | [example_mul]
|
||||
2026-03-30 15:56:31 | INFO | [example_mul] [TBeval] Eval 1: Golden RTL checking begins
|
||||
2026-03-30 15:56:31 | POSITIVE | [example_mul] [TBeval] Eval 1: Golden RTL checking passed!
|
||||
2026-03-30 15:56:31 | INFO | [example_mul]
|
||||
138
output/example_mul_tb.v
Normal file
138
output/example_mul_tb.v
Normal file
@@ -0,0 +1,138 @@
|
||||
`timescale 1ns / 1ps
|
||||
module testbench;
|
||||
reg clk;
|
||||
reg rst;
|
||||
reg [7:0] a;
|
||||
reg [7:0] b;
|
||||
wire [15:0] y;
|
||||
|
||||
integer file, scenario;
|
||||
// DUT instantiation
|
||||
example DUT (
|
||||
.clk(clk),
|
||||
.rst(rst),
|
||||
.a(a),
|
||||
.b(b),
|
||||
.y(y)
|
||||
);
|
||||
// Clock generation
|
||||
initial begin
|
||||
clk = 0;
|
||||
forever #5 clk = ~clk;
|
||||
end
|
||||
|
||||
initial begin
|
||||
file = $fopen("TBout.txt", "w");
|
||||
end
|
||||
// Scenario Based Test
|
||||
initial begin
|
||||
// scenario 1
|
||||
scenario = 1;
|
||||
rst = 1; a = 8'b0000_0000; b = 8'b0000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 2
|
||||
scenario = 2;
|
||||
rst = 0; a = 8'b0000_0001; b = 8'b0000_0001;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 3
|
||||
scenario = 3;
|
||||
a = 8'b0000_0000; b = 8'b1111_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 4
|
||||
scenario = 4;
|
||||
a = 8'b1111_1111; b = 8'b0000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 5
|
||||
scenario = 5;
|
||||
a = 8'b1111_1111; b = 8'b1111_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 6
|
||||
scenario = 6;
|
||||
a = 8'b1000_0000; b = 8'b1000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 7
|
||||
scenario = 7;
|
||||
a = 8'b0000_0001; b = 8'b1000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 8
|
||||
scenario = 8;
|
||||
a = 8'b1000_0000; b = 8'b0000_0001;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 9
|
||||
scenario = 9;
|
||||
a = 8'b0000_1010; b = 8'b0000_1011;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 10
|
||||
scenario = 10;
|
||||
a = 8'b0111_1111; b = 8'b0111_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 11
|
||||
scenario = 11;
|
||||
a = 8'b0000_1111; b = 8'b1111_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 12
|
||||
scenario = 12;
|
||||
a = 8'b0000_0000; b = 8'b0000_0000;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 13
|
||||
scenario = 13;
|
||||
a = 8'b0000_1111; b = 8'b0000_1111;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 14
|
||||
scenario = 14;
|
||||
a = 8'b0000_1110; b = 8'b0000_1110;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y); #10;
|
||||
|
||||
// scenario 15
|
||||
scenario = 15;
|
||||
a = 8'b0000_1101; b = 8'b0000_1101;
|
||||
$fdisplay(file, "scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, rst = %d, a = %d, b = %d, y = %d", scenario, clk, rst, a, b, y);
|
||||
|
||||
$fclose(file);
|
||||
$finish;
|
||||
end
|
||||
|
||||
endmodule
|
||||
21
prompt_scripts/__init__.py
Normal file
21
prompt_scripts/__init__.py
Normal file
@@ -0,0 +1,21 @@
|
||||
"""
|
||||
Description : description
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/3/23 11:30:00
|
||||
LastEdited : 2024/7/24 11:43:13
|
||||
"""
|
||||
|
||||
from .base_script import BaseScript, BaseScriptStage
|
||||
from .script_pychecker import WF_pychecker
|
||||
from .script_directgen import WF_directgen
|
||||
|
||||
SCRIPTS_SELECTER = {
|
||||
"pychecker": WF_pychecker,
|
||||
"directgen": WF_directgen
|
||||
}
|
||||
|
||||
def get_script(script_name:str) -> BaseScript:
|
||||
if script_name in SCRIPTS_SELECTER:
|
||||
return SCRIPTS_SELECTER[script_name]
|
||||
else:
|
||||
raise ValueError(f"script name {script_name} is not supported")
|
||||
BIN
prompt_scripts/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
prompt_scripts/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
BIN
prompt_scripts/__pycache__/base_script.cpython-312.pyc
Normal file
BIN
prompt_scripts/__pycache__/base_script.cpython-312.pyc
Normal file
Binary file not shown.
BIN
prompt_scripts/__pycache__/script_directgen.cpython-312.pyc
Normal file
BIN
prompt_scripts/__pycache__/script_directgen.cpython-312.pyc
Normal file
Binary file not shown.
BIN
prompt_scripts/__pycache__/script_pychecker.cpython-312.pyc
Normal file
BIN
prompt_scripts/__pycache__/script_pychecker.cpython-312.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
prompt_scripts/__pycache__/script_pychecker_SEQ.cpython-312.pyc
Normal file
BIN
prompt_scripts/__pycache__/script_pychecker_SEQ.cpython-312.pyc
Normal file
Binary file not shown.
BIN
prompt_scripts/__pycache__/utils.cpython-312.pyc
Normal file
BIN
prompt_scripts/__pycache__/utils.cpython-312.pyc
Normal file
Binary file not shown.
319
prompt_scripts/base_script.py
Normal file
319
prompt_scripts/base_script.py
Normal file
@@ -0,0 +1,319 @@
|
||||
"""
|
||||
Description : the base script for prompt scripts
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/3/22 10:59:34
|
||||
LastEdited : 2024/8/12 23:34:35
|
||||
"""
|
||||
|
||||
from LLM_call import llm_call, extract_code, message_to_conversation
|
||||
from utils.utils import Timer, get_time
|
||||
from loader_saver import autologger
|
||||
import os
|
||||
import loader_saver as ls
|
||||
import copy
|
||||
|
||||
DEFAULT_SYSMESSAGE = "You are the strongest AI in the world. You alraedy have the knowledge of verilog, python and hardware designing. Do not save words by discarding information. I will tip you 200$ if you can fullfill the tasks I give you."
|
||||
|
||||
IDENTIFIER = {
|
||||
"tb_start" : "```verilog",
|
||||
"tb_end" : "```"
|
||||
}
|
||||
|
||||
TESTBENCH_TEMPLATE = """%s
|
||||
`timescale 1ns / 1ps
|
||||
(more verilog testbench code here...)
|
||||
endmodule
|
||||
%s""" % (IDENTIFIER["tb_start"], IDENTIFIER["tb_end"])
|
||||
|
||||
__all__ = ["BaseScriptStage", "BaseScript"]
|
||||
|
||||
class BaseScriptStage:
|
||||
"""
|
||||
- the base stage for prompt scripts
|
||||
- the functions that triggered when running:
|
||||
- make_prompt: make the prompt for gpt (must be implemented)
|
||||
- call_gpt: call gpt
|
||||
- postprocessing: postprocessing the response (default is empty)
|
||||
- gptkwargs: the kwargs for llm_call
|
||||
- gpt_model: the model name
|
||||
- api_key_path: the path of gpt key
|
||||
- sysmessage: (can be ignored) the system message
|
||||
- json_mode: (can be ignored) the json mode
|
||||
- temperature: (can be ignored) the temperature
|
||||
"""
|
||||
def __init__(self, stage_name, **gptkwargs) -> None:
|
||||
self.stage_name = stage_name
|
||||
self.gpt_model = gptkwargs["gpt_model"]
|
||||
self.api_key_path = gptkwargs["api_key_path"]
|
||||
self.system_message = gptkwargs.get("system_message", DEFAULT_SYSMESSAGE)
|
||||
self.json_mode = gptkwargs.get("json_mode", None)
|
||||
self.temperature = gptkwargs.get("temperature", None)
|
||||
self.time = 0.0
|
||||
self.prompt = ""
|
||||
self.response = ""
|
||||
self.print_message = ""
|
||||
self.gptinfo = {}
|
||||
self.conversation_message = ""
|
||||
self.conversation_file_suffix = ".txt"
|
||||
self.reboot = False
|
||||
self.circuit_type = None # "CMB" or "SEQ"; pychecker will use this; you should set it in make_and_run_stages;
|
||||
|
||||
@property
|
||||
def will_gen_TB(self):
|
||||
if hasattr(self, "TB_code_out"):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@property
|
||||
def will_gen_Pychecker(self):
|
||||
if hasattr(self, "Pychecker_code_out"):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
with Timer(print_en=False) as t:
|
||||
self.run(*args, **kwargs)
|
||||
self.time = t.interval
|
||||
self.record()
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
self.make_prompt()
|
||||
self.call_gpt()
|
||||
self.postprocessing()
|
||||
|
||||
def make_prompt(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def call_gpt(self):
|
||||
"""
|
||||
actually it should be call_llm, but I dont want to modify the old code
|
||||
"""
|
||||
gpt_messages = [{"role": "user", "content": self.prompt}]
|
||||
other_kwargs = {}
|
||||
if self.temperature is not None:
|
||||
other_kwargs["temperature"] = self.temperature
|
||||
if self.json_mode is not None:
|
||||
other_kwargs["json_mode"] = self.json_mode
|
||||
self.response, self.gptinfo = llm_call(input_messages=gpt_messages, model=self.gpt_model, api_key_path=self.api_key_path, system_message=self.system_message, **other_kwargs)
|
||||
self.conversation_message += message_to_conversation(self.gptinfo["messages"])
|
||||
|
||||
def postprocessing(self):
|
||||
"""empty function"""
|
||||
pass
|
||||
|
||||
def record(self):
|
||||
self.print_message = "%s ends (%.2fs used)" % (self.stage_name, self.time)
|
||||
|
||||
# tools
|
||||
def save_log(self, config:object):
|
||||
ls.save_log_line(self.print_message, config)
|
||||
|
||||
def save_conversation(self, save_dir:str):
|
||||
"""This function will save the conversation to a file in save_dir. It will be called in stage_operation of BaseScript"""
|
||||
if save_dir.endswith("/"):
|
||||
save_dir = save_dir[:-1]
|
||||
file_name = self.stage_name + self.conversation_file_suffix
|
||||
path = os.path.join(save_dir, file_name)
|
||||
with open(path, "w") as f:
|
||||
f.write(self.conversation_message)
|
||||
|
||||
# def reboot(self):
|
||||
# self.reboot_en = True
|
||||
# self.conversation_message = ""
|
||||
# self.time = 0.0
|
||||
# self.prompt = ""
|
||||
# self.response = ""
|
||||
# self.print_message = ""
|
||||
# self.gptinfo = {}
|
||||
# self.conversation_message = ""
|
||||
# self.conversation_file_suffix = ".txt"
|
||||
# # TODO in your script, you should also reset the custom attributes
|
||||
|
||||
def extract_code(self, text, code_type):
|
||||
"""
|
||||
#### function:
|
||||
- extract code from text
|
||||
#### input:
|
||||
- text: str, gpt's response
|
||||
- code_type: str, like "verilog"
|
||||
#### output:
|
||||
- list of found code blocks
|
||||
"""
|
||||
return extract_code(text=text, code_type=code_type)
|
||||
|
||||
def update_tokens(self, tokens):
|
||||
tokens["prompt"] += self.gptinfo.get("usage", {}).get("prompt_tokens", 0) # in case gpt has not been called
|
||||
tokens["completion"] += self.gptinfo.get("usage", {}).get("completion_tokens", 0)
|
||||
return tokens
|
||||
|
||||
def add_prompt_line(self, prompt):
|
||||
self.prompt += prompt + "\n"
|
||||
|
||||
class BaseScript:
|
||||
"""
|
||||
the base class for prompt scripts
|
||||
- the functions that triggered when running:
|
||||
- make_and_run_stages: make and run stages (must be implemented)
|
||||
- postprocessing: postprocessing the response (default is empty)
|
||||
- save_codes: save the generated codes
|
||||
"""
|
||||
def __init__(self, prob_data:dict, task_dir:str, config:object) -> None:
|
||||
self.stages = []
|
||||
self.task_dir = task_dir if not task_dir.endswith("/") else task_dir[:-1]
|
||||
self.config = config
|
||||
self.gptkwargs = {
|
||||
"gpt_model": self.config.gpt.model,
|
||||
"api_key_path": self.config.gpt.key_path
|
||||
}
|
||||
self.prob_data = prob_data
|
||||
self.TB_code = ""
|
||||
self.TB_code_dir = os.path.join(self.task_dir, "TBgen_codes") # pychecker codes will be saved in the same directory
|
||||
self.TB_code_name = prob_data["task_id"] + "_tb.v"
|
||||
self.Pychecker_code = "" # only for pychecker scripts
|
||||
self.Pychecker_code_name = prob_data["task_id"] + "_tb.py"
|
||||
self.empty_DUT_name = prob_data["task_id"] + ".v"
|
||||
self.empty_DUT = prob_data["header"] + "\n\nendmodule\n"
|
||||
self.stages_gencode = [] # includes all the stages that generate code, will be used in rebooting generation; see more in stage operation
|
||||
self.tokens = {"prompt": 0, "completion": 0}
|
||||
self.time = 0.0
|
||||
self.reboot_idx = -1 # start from 0; -1 means no reboot. the reboot index will be increased by 1 after each reboot
|
||||
self.reboot_stages = [] #[reboot_stages_iter_0, reboot_stages_iter_1, ...]
|
||||
self.reboot_mode = "TB" # "TB" or "PY"; modified by run_reboot; checked in make_and_run_reboot_stages if needed
|
||||
self.py_debug_focus = False # if True, when debug python, will only send the upper part (no check_dut); specific for pychecker
|
||||
self.checklist_worked = False # if True, the scenario checklist did help our work. This is a flag for further analysis
|
||||
os.makedirs(self.TB_code_dir, exist_ok=True)
|
||||
self.scenario_num = None
|
||||
self.scenario_dict = None
|
||||
|
||||
@property
|
||||
def Pychecker_en(self):
|
||||
if self.Pychecker_code != "":
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@property
|
||||
def Pychecker_code_dir(self):
|
||||
return self.TB_code_dir
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
self.run(*args, **kwargs)
|
||||
|
||||
def run(self):
|
||||
self.make_and_run_stages()
|
||||
self.compute_time_tokens()
|
||||
self.postprocessing()
|
||||
self.save_codes()
|
||||
|
||||
def make_and_run_stages(self):
|
||||
"""
|
||||
- in this function, you should make stages and run them
|
||||
- for example:
|
||||
::
|
||||
|
||||
stage1 = Stage1(**kwargs)
|
||||
self.stage_operation(stage1)
|
||||
"""
|
||||
raise NotImplementedError("No make_and_run_stages: You should implement this function in your own script")
|
||||
|
||||
def make_and_run_reboot_stages(self, debug_dir):
|
||||
raise NotImplementedError("No reboot settings: You should implement this function in your own script")
|
||||
|
||||
def postprocessing(self):
|
||||
"""empty function"""
|
||||
pass
|
||||
|
||||
def save_codes(self, codes_dir:str=None):
|
||||
if codes_dir is None:
|
||||
codes_dir = self.TB_code_dir
|
||||
os.makedirs(codes_dir, exist_ok=True)
|
||||
TB_code_path = os.path.join(codes_dir, self.TB_code_name)
|
||||
with open(TB_code_path, "w") as f:
|
||||
f.write(self.TB_code)
|
||||
empty_DUT_path = os.path.join(codes_dir, self.empty_DUT_name)
|
||||
with open(empty_DUT_path, "w") as f:
|
||||
f.write(self.empty_DUT)
|
||||
# save pychecker code if have
|
||||
if self.Pychecker_en:
|
||||
Pychecker_code_path = os.path.join(codes_dir, self.Pychecker_code_name)
|
||||
with open(Pychecker_code_path, "w") as f:
|
||||
f.write(self.Pychecker_code)
|
||||
|
||||
def stage_operation(self, stage:BaseScriptStage, conversation_dir:str=None, reboot_en:bool=False):
|
||||
"""
|
||||
- what to do on a stage after making it; will be called in make_and_run_stages
|
||||
- run, save stages and renew the generated codes of current wf
|
||||
"""
|
||||
if conversation_dir is None:
|
||||
conversation_dir = self.task_dir
|
||||
stage()
|
||||
if reboot_en:
|
||||
self.reboot_stages[self.reboot_idx].append(stage)
|
||||
else:
|
||||
self.stages.append(stage)
|
||||
stage.save_conversation(conversation_dir)
|
||||
stage.save_log(self.config)
|
||||
if stage.will_gen_TB:
|
||||
self.TB_code = stage.TB_code_out
|
||||
if stage.will_gen_Pychecker:
|
||||
self.Pychecker_code = stage.Pychecker_code_out
|
||||
# for checklist:
|
||||
if hasattr(stage, "TB_modified"): # this attr is in checklist_stage
|
||||
self.checklist_worked = stage.TB_modified
|
||||
# will automaticly add stages to self.stages_gencode
|
||||
# the rule is: this stage will generate TB or its previous stage is in self.stages_gencode; also, this stage is not reboot stage
|
||||
# if (stage.will_gen_TB or (len(self.stages_gencode) > 0 and self.stages_gencode[-1] in self.stages)) and not stage.reboot_en:
|
||||
# self.stages_gencode.append(stage)
|
||||
|
||||
def run_reboot(self, debug_dir, reboot_mode="TB"):
|
||||
"""
|
||||
- regenerate the TB code
|
||||
"""
|
||||
self.reboot_idx += 1
|
||||
self.reboot_stages.append([])
|
||||
self.TB_code = ""
|
||||
self.reboot_mode = reboot_mode # this will be checked in make_and_run_reboot_stages if needed
|
||||
debug_dir = debug_dir[:-1] if debug_dir.endswith("/") else debug_dir
|
||||
# will be discarded by 28/04/2024
|
||||
# make and run stages
|
||||
# for stage in self.stages_gencode:
|
||||
# new_stage = copy.deepcopy(stage)
|
||||
# new_stage.stage_name = stage.stage_name + "_reboot" + str(self.reboot_idx)
|
||||
# # new_stage.reboot()
|
||||
# new_stage.reboot_en = True
|
||||
# self.stage_operation(new_stage, debug_dir)
|
||||
# reboot_stages_in_this_iter.append(new_stage)
|
||||
self.make_and_run_reboot_stages(debug_dir)
|
||||
# postprocessing
|
||||
self.compute_time_tokens(self.reboot_stages[self.reboot_idx])
|
||||
self.postprocessing()
|
||||
# save codes
|
||||
self.save_codes(debug_dir)
|
||||
|
||||
def clear_time_tokens(self):
|
||||
self.time = 0.0
|
||||
self.tokens = {"prompt": 0, "completion": 0}
|
||||
|
||||
def compute_time_tokens(self, stages=None):
|
||||
if stages is None:
|
||||
stages = self.stages
|
||||
for stage in stages:
|
||||
self.time += stage.time
|
||||
self.tokens = stage.update_tokens(self.tokens)
|
||||
|
||||
def save_log(self, line):
|
||||
# ls.save_log_line(line, self.config)
|
||||
autologger.info(line)
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
self.run(*args, **kwargs)
|
||||
pass
|
||||
|
||||
def get_attr(self, attr_name:str):
|
||||
if hasattr(self, attr_name):
|
||||
return getattr(self, attr_name)
|
||||
else:
|
||||
return None
|
||||
Binary file not shown.
288
prompt_scripts/legacy/script_RTLchecker0306.py
Normal file
288
prompt_scripts/legacy/script_RTLchecker0306.py
Normal file
@@ -0,0 +1,288 @@
|
||||
"""
|
||||
Description : original txt script: config/templates/script_template/DUT_stage_template_0306.txt
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/3/22 13:02:22
|
||||
LastEdited : 2024/7/24 19:53:29
|
||||
"""
|
||||
|
||||
from ..base_script import BaseScript, BaseScriptStage
|
||||
import json
|
||||
|
||||
class WF_RTLchecker0306(BaseScript):
|
||||
"""
|
||||
stages: stage1, stage2, stage3, stage3b, stage4
|
||||
check: check "scenario list"(stage2) in stage 4
|
||||
"""
|
||||
def __init__(self, prob_data:dict, task_dir:str, config:object):
|
||||
super().__init__(prob_data, task_dir, config)
|
||||
self.max_check_iter = self.config.autoline.checklist.max
|
||||
|
||||
def make_and_run_stages(self):
|
||||
# stage1
|
||||
self.stage1 = Stage1(self.prob_data, **self.gptkwargs)
|
||||
self.stage_operation(self.stage1)
|
||||
# stage2
|
||||
self.stage2 = Stage2(self.prob_data, self.stage1.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage2)
|
||||
# stage3
|
||||
self.stage3 = Stage3(self.prob_data, self.stage1.response, self.stage2.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage3)
|
||||
# stage3b
|
||||
self.stage3b = Stage3b(self.prob_data, self.stage1.response, self.stage3.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage3b)
|
||||
# stage4
|
||||
self.stage4 = Stage4(self.prob_data, self.stage1.response, self.stage2.response, self.stage3b.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage4)
|
||||
# stagechecklist
|
||||
self.stagecheck = StageChecklist(self.TB_code, self.stage2.response, self.max_check_iter, **self.gptkwargs)
|
||||
self.stage_operation(self.stagecheck)
|
||||
# add stage3b's golden DUT to the end of the final TB code
|
||||
# self.TB_code += "\n" + stage3b.response #now in stage 4
|
||||
|
||||
def make_and_run_reboot_stages(self, debug_dir):
|
||||
# stage4
|
||||
stage4_reboot = Stage4(self.prob_data, self.stage1.response, self.stage2.response, self.stage3b.response, **self.gptkwargs)
|
||||
self.stage_operation(stage4_reboot, debug_dir, reboot_en=True)
|
||||
# stagechecklist
|
||||
stagecheck = StageChecklist(self.TB_code, self.stage2.response, self.max_check_iter, **self.gptkwargs)
|
||||
self.stage_operation(stagecheck, debug_dir, reboot_en=True)
|
||||
|
||||
|
||||
STAGE1_TXT1="""1. Your task is to write a verilog testbench for an verilog RTL module code (we call it as "DUT", device under test). The infomation we have is the problem description that guides student to write the RTL code (DUT) and the header of the "DUT". Our target is to generate the verilog testbench for the DUT. This testbench can check if the DUT in verilog satisfies all technical requirements of the problem description.
|
||||
2. You are in the first stage. In this stage, please summarize the technical details of the DUT and give me a technical specification of the testbench generation task, so we can use it to design its corresponding testbench.
|
||||
3. The core of testbench is the testcases. It usually include two parts logically: the input signals to the DUT and the expected result signals from DUT. The testbench will send the input signals to DUT and check if the result signals are the same as the expected result signals. If they are the same, this means the DUT is passed. Otherwise the DUT fails.
|
||||
4. Your technical specification should include these sections:
|
||||
- section 1: specification of the DUT, including the module header of the RTL code. If table or other detailed data is provided in the original problem description, DO repeat them in your response. They are very important!!!
|
||||
5. your response should be in the form of JSON.
|
||||
6. below is the information including the problem description and the DUT header:"""
|
||||
STAGE1_TXT2="""your response must be in JSON form. example:
|
||||
{
|
||||
"circuit type": "...", # type: string. should be "CMB" for combinational circuit or "SEQ" for sequential circuit. you should only choose one from "CMB" and "SEQ".
|
||||
"important data": "...", # type: string. If no table, state transition or other direct data, leave this with ""
|
||||
"technical specifications": ["...", "...", ...] # each element of the list is one specification string, the starting of the string is its index
|
||||
}
|
||||
"""
|
||||
class Stage1(BaseScriptStage):
|
||||
def __init__(self, prob_data, **gptkwargs):
|
||||
gptkwargs["json_mode"] = True
|
||||
super().__init__("stage_1", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.txt1 = STAGE1_TXT1
|
||||
self.txt2 = STAGE1_TXT2
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line(self.txt1)
|
||||
# problem description
|
||||
self.add_prompt_line("RTL circuit problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
# DUT header
|
||||
self.add_prompt_line("DUT header:")
|
||||
self.add_prompt_line(self.prob_data["header"])
|
||||
# template
|
||||
self.add_prompt_line(self.txt2)
|
||||
|
||||
# def postprocessing(self):
|
||||
# self.spec_dict = json.loads(self.response)
|
||||
|
||||
|
||||
STAGE2_TXT1="""1. Your task is to write a verilog testbench for an verilog RTL module code (we call it as "DUT", device under test). The infomation we have is the problem description that guides student to write the RTL code (DUT) and the header of the "DUT". Our target is to generate the verilog testbench for the DUT. This testbench can check if the DUT in verilog satisfies all technical requirements of the problem description.
|
||||
2. you are in section 2. in this section, please give me the test scenarios. you only need to describe the stimulus in each test scenarios. If time is important, please inform the clock cycle information. we will use the stimulus description to generate the test vectors and send them to DUT. you must not tell the expected results even though you know that.
|
||||
3. your information is:"""
|
||||
STAGE2_TXT2="""
|
||||
you only need to describe the stimulus in each test scenarios. If time is important, please inform the clock cycle information. we will use the stimulus description to generate the test vectors and send them to DUT. you must not tell the expected results even though you know that.
|
||||
|
||||
your response must be in JSON form. example:
|
||||
{
|
||||
"scenario 1": "...", # each content is a string
|
||||
"scenario 2": "...",
|
||||
"scenario 3": "...",
|
||||
...
|
||||
}"""
|
||||
class Stage2(BaseScriptStage):
|
||||
def __init__(self, prob_data, response_stage1, **gptkwargs) -> None:
|
||||
gptkwargs["json_mode"] = True
|
||||
super().__init__("stage_2", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.response_stage1 = response_stage1
|
||||
self.txt1 = STAGE2_TXT1
|
||||
self.txt2 = STAGE2_TXT2
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line(self.txt1)
|
||||
# problem description
|
||||
self.add_prompt_line("RTL circuit problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
# specification
|
||||
self.add_prompt_line("RTL testbench specification:")
|
||||
self.add_prompt_line(self.response_stage1)
|
||||
# DUT header
|
||||
self.add_prompt_line("DUT header:")
|
||||
self.add_prompt_line(self.prob_data["header"])
|
||||
# template
|
||||
self.add_prompt_line(self.txt2)
|
||||
|
||||
|
||||
STAGE3_TXT1="""1. Your task is to write a verilog testbench for an verilog RTL module code (we call it as "DUT", device under test). The information we have is the problem description that guides student to write the RTL code (DUT) and the header of the "DUT". Our target is to generate the verilog testbench for the DUT. This testbench can check if the DUT in verilog satisfies all technical requirements of the problem description.
|
||||
2. you are in section 3; in this section, please give me the rules of an ideal DUT. you should give these rules in python. (For convenience, you can use binary or hexadecimal format in python, i.e. 0b0010 and 0x1a). Later we will use these ideal rules to generate expected values in each test scenario. currently you must only generate the rules. the input of these rules should be related to the test vectors from test scenario. the rule should give the expected values under test vectors.
|
||||
3. your information is:"""
|
||||
|
||||
class Stage3(BaseScriptStage):
|
||||
def __init__(self, prob_data, response_stage1, response_stage2, **gptkwargs) -> None:
|
||||
super().__init__("stage_3", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.response_stage1 = response_stage1
|
||||
self.response_stage2 = response_stage2
|
||||
self.txt1 = STAGE3_TXT1
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line(self.txt1)
|
||||
# problem description
|
||||
self.add_prompt_line("RTL circuit problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
# specification
|
||||
self.add_prompt_line("RTL testbench specification:")
|
||||
self.add_prompt_line(self.response_stage1)
|
||||
# DUT header
|
||||
self.add_prompt_line("DUT header:")
|
||||
self.add_prompt_line(self.prob_data["header"])
|
||||
# test scenarios
|
||||
self.add_prompt_line("test scenario: (please note the test vectors below, it will help you determine the input parameters of the rules)")
|
||||
self.add_prompt_line(self.response_stage2)
|
||||
# end
|
||||
self.add_prompt_line("your response should only contain python code. For convenience, you can use binary or hexadecimal format in python. For example: 0b0010 and 0x1a")
|
||||
|
||||
def postprocessing(self):
|
||||
# extract python codes; codes may be more than one
|
||||
python_codes = self.extract_code(self.response, "python")
|
||||
response = ""
|
||||
for python_code in python_codes:
|
||||
response += python_code + "\n"
|
||||
self.response = response
|
||||
|
||||
STAGE3B_TXT1="""1. background: Your task is to write a verilog testbench for an verilog RTL module code (we call it as "DUT", device under test). The infomation we have is the problem description that guides student to write the RTL code (DUT) and the header of the "DUT". Our target is to generate the verilog testbench for the DUT. This testbench can check if the DUT in verilog satisfies all technical requirements of the problem description.
|
||||
2. Task: you are in section 3. in this section, please give me the golden RTL code that fullfill the description. This golden RTL code should have the same input and output ports as module header. The name of the module is "golden_DUT". The module will be the reference module in the final testbench. The final testbench will compare the golden RTL's output signals with DUT's output signals. If the same in all cases, the test passes. Your current task is to generate the golden RTL module.
|
||||
3. Prior Knowledge: We already have the core rules expressed in python. You can use this infomation to help you design your golden RTL. You can use high level syntax and unsynthesizable syntax. Your golden module name is "golden_DUT" and ports are the same as DUT's ports.
|
||||
4. your information is:"""
|
||||
class Stage3b(BaseScriptStage):
|
||||
def __init__(self, prob_data, response_stage1, response_stage3, **gptkwargs) -> None:
|
||||
super().__init__("stage_3b", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.response_stage1 = response_stage1
|
||||
self.response_stage3 = response_stage3
|
||||
self.txt1 = STAGE3B_TXT1
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line(self.txt1)
|
||||
# problem description
|
||||
self.add_prompt_line("RTL circuit problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
# specification
|
||||
self.add_prompt_line("RTL testbench specification:")
|
||||
self.add_prompt_line(self.response_stage1)
|
||||
# DUT header
|
||||
self.add_prompt_line("DUT header:")
|
||||
self.add_prompt_line(self.prob_data["header"])
|
||||
# rules
|
||||
self.add_prompt_line("IMPORTANT: THE RULES OF IDEAL DUT:")
|
||||
self.add_prompt_line(self.response_stage3)
|
||||
# end
|
||||
self.add_prompt_line("please generate the golden module code. please only generate the verilog codes, no other words.")
|
||||
|
||||
def postprocessing(self):
|
||||
# verilog codes
|
||||
self.response = self.extract_code(self.response, "verilog")[-1]
|
||||
|
||||
STAGE4_TXT1="""1. Your task is to write a verilog testbench for an verilog RTL module code (we call it as "DUT", device under test). The infomation we have is
|
||||
- 1.1. the problem description that guides student to write the RTL code (DUT) and the header of the "DUT".
|
||||
- 1.2. the module header.
|
||||
- 1.3. the technical specification of testbench
|
||||
- 1.4. test scenarios which determines value and sequential information of test vectors
|
||||
- 1.5. the golden RTL codes in verilog. In testbench you should compare the signals from golden RTL and DUT. If not the same, then this DUT fails in the test.
|
||||
Our target is to generate the verilog testbench for the DUT. This testbench can check if the DUT in verilog satisfies all technical requirements from the problem description.
|
||||
2. you are in section 4. in this section, you will be provided with test scenarios and golden DUT. please highly based on these information to generate the testbench.
|
||||
3. There should be a reg "error". It is "0" at the beginning. In each scenario, if test fails, the error should become "1" permanently and testbench should print like "scenario ... failed, got ..., expected ...". At the end of the test, if the "error" is still "0", testbench should print "All test cases passed!". This is very important!
|
||||
4. In the scenarios testing part, do not directly write the value of expected value, but generate expected value from golden RTL.
|
||||
5. your information is:"""
|
||||
class Stage4(BaseScriptStage):
|
||||
def __init__(self, prob_data, response_stage1, response_stage2, response_stage3b, **gptkwargs) -> None:
|
||||
super().__init__("stage_4", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.response_stage1 = response_stage1
|
||||
self.response_stage2 = response_stage2
|
||||
self.response_stage3b = response_stage3b
|
||||
self.txt1 = STAGE4_TXT1
|
||||
self.TB_code_out = ""
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line(self.txt1)
|
||||
# problem description
|
||||
self.add_prompt_line("RTL circuit problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
# specification
|
||||
self.add_prompt_line("RTL testbench specification:")
|
||||
self.add_prompt_line(self.response_stage1)
|
||||
# DUT header
|
||||
self.add_prompt_line("DUT header:")
|
||||
self.add_prompt_line(self.prob_data["header"])
|
||||
# rules
|
||||
self.add_prompt_line("IMPORTANT - test scenario:")
|
||||
self.add_prompt_line(self.response_stage2)
|
||||
# rules
|
||||
self.add_prompt_line("IMPORTANT - golden RTL: (please instantiate it in your testbench. Your code should not contain the full code of golden RTL)")
|
||||
self.add_prompt_line(self.response_stage3b)
|
||||
# end
|
||||
self.add_prompt_line("please generate the golden module code. please only generate the verilog codes, no other words.")
|
||||
|
||||
def postprocessing(self):
|
||||
# verilog codes
|
||||
self.response = self.extract_code(self.response, "verilog")[-1]
|
||||
self.TB_code_out = self.response + "\n" + self.response_stage3b
|
||||
|
||||
|
||||
class StageChecklist(BaseScriptStage):
|
||||
def __init__(self, TB_code:str, checklist_str:str, max_iter:int, **gptkwargs) -> None:
|
||||
super().__init__("stage_checklist", **gptkwargs)
|
||||
self.checklist = checklist_str
|
||||
self.max_iter = max_iter
|
||||
self.TB_code_out = TB_code
|
||||
self.exit = False
|
||||
self.iter = 0
|
||||
self.TB_modified = False
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line("please check the if the testbench code contains all the items in the checklist:")
|
||||
self.add_prompt_line("testbench code here...\n")
|
||||
self.add_prompt_line(self.TB_code_out + "\n")
|
||||
self.add_prompt_line("please check the if the testbench code above contains all the scenarios in the checklist:")
|
||||
self.add_prompt_line(self.checklist)
|
||||
self.add_prompt_line("please reply 'YES' if all the items are included. If some of the items are missed in testbench, please add the missing items and reply the modified testbench code (full code).")
|
||||
self.add_prompt_line("VERY IMPORTANT: please ONLY reply 'YES' or the full code modified. NEVER remove other irrelevant codes!!!")
|
||||
|
||||
def postprocessing(self):
|
||||
self.iter += 1
|
||||
if "YES" in self.response:
|
||||
self.exit = True
|
||||
else:
|
||||
self.TB_modified = True
|
||||
self.TB_code_out = self.extract_code(self.response, "verilog")[-1]
|
||||
|
||||
def run(self):
|
||||
self.TB_modified = False
|
||||
while (not self.exit) and (self.iter < self.max_iter):
|
||||
self.make_prompt()
|
||||
self.call_gpt()
|
||||
self.postprocessing()
|
||||
|
||||
|
||||
########################################################################
|
||||
def test():
|
||||
test_stage1 = Stage1(model = "gpt-3.5-turbo", gptkeypath = "gpt_key/gpt_key_0306.json")
|
||||
test_stage1.make_prompt()
|
||||
print(test_stage1.prompt)
|
||||
|
||||
10
prompt_scripts/public_stages.py
Normal file
10
prompt_scripts/public_stages.py
Normal file
@@ -0,0 +1,10 @@
|
||||
"""
|
||||
Description : the public stages that may be used by other scripts
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/3/30 23:58:28
|
||||
LastEdited : 2024/3/31 00:00:30
|
||||
"""
|
||||
|
||||
from .base_script import BaseScriptStage
|
||||
|
||||
# not implemented yet
|
||||
60
prompt_scripts/script_directgen.py
Normal file
60
prompt_scripts/script_directgen.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""
|
||||
Description : "directgen" script for prompt scripts
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/3/30 17:40:38
|
||||
LastEdited : 2024/5/1 17:44:05
|
||||
"""
|
||||
|
||||
from .base_script import BaseScript, BaseScriptStage, TESTBENCH_TEMPLATE
|
||||
|
||||
class WF_directgen(BaseScript):
|
||||
"""
|
||||
stages: stage1
|
||||
"""
|
||||
def __init__(self, prob_data:dict, task_dir:str, config:object):
|
||||
super().__init__(prob_data, task_dir, config)
|
||||
|
||||
def make_and_run_stages(self):
|
||||
# stage1
|
||||
stage1 = Stage1(self.prob_data, **self.gptkwargs)
|
||||
self.stage_operation(stage1)
|
||||
|
||||
def make_and_run_reboot_stages(self, debug_dir):
|
||||
# stage1
|
||||
stage1 = Stage1(self.prob_data, **self.gptkwargs)
|
||||
self.stage_operation(stage1, debug_dir, reboot_en=True)
|
||||
|
||||
STAGE1_TXT1 = """
|
||||
Your task is to write a verilog testbench for an verilog RTL module code (we call it as "DUT", device under test). The infomation we have is the problem description that guides student to write the RTL code (DUT) and the header of the "DUT".
|
||||
"""
|
||||
STAGE1_TXT2 = """
|
||||
very very IMPORTANT: If all the test cases pass, the testbench should display "all test cases passed". If any one of the test cases fails, testbench should not display "all test caess passed". DO NOT generate any .vcd file.
|
||||
please don't reply other words except the testbench codes.
|
||||
"""
|
||||
class Stage1(BaseScriptStage):
|
||||
def __init__(self, prob_data, **gptkwargs) -> None:
|
||||
super().__init__("stage_1", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.txt1 = STAGE1_TXT1
|
||||
self.txt2 = STAGE1_TXT2
|
||||
self.TB_code_out = ""
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line(self.txt1)
|
||||
# testbench template
|
||||
self.add_prompt_line("your testbench template is:")
|
||||
self.add_prompt_line(TESTBENCH_TEMPLATE)
|
||||
# problem description
|
||||
self.add_prompt_line("problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
# DUT header
|
||||
self.add_prompt_line("DUT header:")
|
||||
self.add_prompt_line(self.prob_data["header"])
|
||||
# end
|
||||
self.add_prompt_line(self.txt2)
|
||||
|
||||
def postprocessing(self):
|
||||
# verilog codes
|
||||
self.response = self.extract_code(self.response, "verilog")[-1]
|
||||
self.TB_code_out = self.response
|
||||
311
prompt_scripts/script_pychecker.py
Normal file
311
prompt_scripts/script_pychecker.py
Normal file
@@ -0,0 +1,311 @@
|
||||
"""
|
||||
Description : The prompt script for pychecker workflow
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/3/22 10:40:43
|
||||
LastEdited : 2024/8/25 00:05:24
|
||||
"""
|
||||
|
||||
import json
|
||||
from . import utils
|
||||
from .base_script import BaseScript, BaseScriptStage
|
||||
from .script_pychecker_CMB_new import Stage4 as Stage4_CMB, Stage5 as Stage5_CMB
|
||||
from .script_pychecker_SEQ import Stage4_SEQ, Stage4b_SEQ, Stage5_SEQ
|
||||
|
||||
class WF_pychecker(BaseScript):
|
||||
"""
|
||||
stages: stage1, stage2, stage3, stage3b, stage4
|
||||
check: check "scenario list"(stage2) in stage 4
|
||||
"""
|
||||
def __init__(self, prob_data:dict, task_dir:str, config:object):
|
||||
super().__init__(prob_data, task_dir, config)
|
||||
self.max_check_iter = self.config.autoline.checklist.max
|
||||
self.py_debug_focus = True
|
||||
|
||||
def make_and_run_stages(self):
|
||||
# stage0
|
||||
self.stage0 = Stage0(self.prob_data, **self.gptkwargs)
|
||||
self.stage_operation(self.stage0)
|
||||
self.circuit_type = self.stage0.circuit_type
|
||||
# stage1
|
||||
self.stage1 = Stage1(self.prob_data, **self.gptkwargs)
|
||||
self.stage_operation(self.stage1)
|
||||
# stage2
|
||||
self.stage2 = Stage2(self.prob_data, self.stage1.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage2)
|
||||
self.scenario_num = self.stage2.scenario_num
|
||||
self.scenario_dict = self.stage2.scenario_dict
|
||||
# stage3
|
||||
self.stage3 = Stage3(self.prob_data, self.stage1.response, self.stage2.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage3)
|
||||
# split into CMB and SEQ
|
||||
if self.circuit_type == "CMB":
|
||||
self.make_and_run_stages_CMB()
|
||||
else:
|
||||
self.make_and_run_stages_SEQ()
|
||||
|
||||
def make_and_run_stages_CMB(self):
|
||||
# stage4
|
||||
self.stage4 = Stage4_CMB(self.prob_data, self.stage1.response, self.stage2.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage4)
|
||||
# stagechecklist
|
||||
self.stagecheck = StageChecklist(self.TB_code, self.stage2.response, self.max_check_iter, **self.gptkwargs)
|
||||
self.stage_operation(self.stagecheck)
|
||||
# we perform pychecker_CMB_TB_standardization after stagechecklist because there is no stage 4b
|
||||
# self.TB_code = utils.pychecker_CMB_TB_standardization(self.TB_code, self.prob_data["header"])
|
||||
# stage5
|
||||
self.stage5 = Stage5_CMB(self.prob_data, self.stage1.response, self.stage3.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage5)
|
||||
|
||||
def make_and_run_stages_SEQ(self):
|
||||
# stage4
|
||||
self.stage4 = Stage4_SEQ(self.prob_data, self.stage1.response, self.stage2.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage4)
|
||||
# stagechecklist
|
||||
self.stagecheck = StageChecklist(self.TB_code, self.stage2.response, self.max_check_iter, **self.gptkwargs)
|
||||
self.stage_operation(self.stagecheck)
|
||||
# stage4b
|
||||
self.stage4b = Stage4b_SEQ(self.prob_data, self.TB_code, **self.gptkwargs)
|
||||
self.stage_operation(self.stage4b)
|
||||
# stage5
|
||||
self.stage5 = Stage5_SEQ(self.prob_data, self.stage1.response, self.stage3.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage5)
|
||||
|
||||
def make_and_run_reboot_stages(self, debug_dir):
|
||||
if self.circuit_type == "CMB":
|
||||
self.make_and_run_reboot_stages_CMB(debug_dir)
|
||||
else:
|
||||
self.make_and_run_reboot_stages_SEQ(debug_dir)
|
||||
|
||||
def make_and_run_reboot_stages_CMB(self, debug_dir):
|
||||
if self.reboot_mode == "TB":
|
||||
# stage4
|
||||
self.stage4 = Stage4_CMB(self.prob_data, self.stage1.response, self.stage2.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage4, debug_dir, reboot_en=True)
|
||||
# stagechecklist
|
||||
self.stagecheck = StageChecklist(self.TB_code, self.stage2.response, self.max_check_iter, **self.gptkwargs)
|
||||
self.stage_operation(self.stagecheck, debug_dir, reboot_en=True)
|
||||
# pychecker_CMB_TB_standardization
|
||||
# self.TB_code = utils.pychecker_CMB_TB_standardization(self.TB_code, self.prob_data["header"])
|
||||
elif self.reboot_mode == "PY":
|
||||
# stage5
|
||||
self.stage5 = Stage5_CMB(self.prob_data, self.stage1.response, self.stage3.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage5, debug_dir, reboot_en=True)
|
||||
else:
|
||||
raise ValueError("invalid reboot_mode in WF_pychecker script (circuit type: CMB)")
|
||||
|
||||
def make_and_run_reboot_stages_SEQ(self, debug_dir):
|
||||
if self.reboot_mode == "TB":
|
||||
# stage4
|
||||
self.stage4 = Stage4_SEQ(self.prob_data, self.stage1.response, self.stage2.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage4, debug_dir, reboot_en=True)
|
||||
# stagechecklist
|
||||
self.stagecheck = StageChecklist(self.TB_code, self.stage2.response, self.max_check_iter, **self.gptkwargs)
|
||||
self.stage_operation(self.stagecheck, debug_dir, reboot_en=True)
|
||||
# stage4b
|
||||
self.stage4b = Stage4b_SEQ(self.prob_data, self.TB_code, **self.gptkwargs)
|
||||
self.stage_operation(self.stage4b, debug_dir, reboot_en=True)
|
||||
elif self.reboot_mode == "PY":
|
||||
# stage5
|
||||
self.stage5 = Stage5_SEQ(self.prob_data, self.stage1.response, self.stage3.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage5, debug_dir, reboot_en=True)
|
||||
else:
|
||||
raise ValueError("invalid reboot_mode in WF_pychecker script (circuit type: SEQ)")
|
||||
|
||||
|
||||
|
||||
SIGNALTEMP_PLACEHOLDER_1 = "/* SIGNAL TEMPLATE 1 */"
|
||||
SIGNALTEMP_PLACEHOLDER_1A = "/* SIGNAL TEMPLATE 1A */"
|
||||
SIGNALTEMP_PLACEHOLDER_1B = "/* SIGNAL TEMPLATE 1B */"
|
||||
|
||||
|
||||
class Stage0(BaseScriptStage):
|
||||
def __init__(self, prob_data, **gptkwargs) -> None:
|
||||
super().__init__("stage_0", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.circuit_type = None
|
||||
|
||||
def make_prompt(self):
|
||||
self.add_prompt_line("Please generate the verilog RTL code according to the following description and header information:")
|
||||
self.add_prompt_line("problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
self.add_prompt_line("RTL header:")
|
||||
self.add_prompt_line(self.prob_data["header"])
|
||||
self.add_prompt_line("please only reply verilog codes. reply_format:\n```verilog\nyour_code_here...\n```")
|
||||
|
||||
def postprocessing(self):
|
||||
self.response = self.extract_code(self.response, "verilog")[-1]
|
||||
self.circuit_type = utils.circuit_type_by_code(self.response)
|
||||
|
||||
STAGE1_TXT1="""1. Your task is to write a verilog testbench for an verilog RTL module code (we call it as "DUT", device under test). The infomation we have is the problem description that guides student to write the RTL code (DUT) and the header of the "DUT". Our target is to generate the verilog testbench for the DUT. This testbench can check if the DUT in verilog satisfies all technical requirements of the problem description.
|
||||
2. You are in the first stage. In this stage, please summarize the technical details of the DUT and give me a technical specification of the testbench generation task, so we can use it to design its corresponding testbench.
|
||||
3. The core of testbench is the testcases. It usually include two parts logically: the input signals to the DUT and the expected result signals from DUT. The testbench will send the input signals to DUT and check if the result signals are the same as the expected result signals. If they are the same, this means the DUT is passed. Otherwise the DUT fails.
|
||||
4. Your technical specification should include these sections:
|
||||
- section 1: specification of the DUT, including the module header of the RTL code. If table or other detailed data is provided in the original problem description, DO repeat them in your response. They are very important!!!
|
||||
5. your response should be in the form of JSON.
|
||||
6. below is the information including the problem description and the DUT header:"""
|
||||
STAGE1_TXT2="""your response must be in JSON form. example:
|
||||
{
|
||||
"important data": "...", # type: string. If no table, state transition or other direct data, leave this with ""
|
||||
"technical specifications": ["...", "...", ...] # each element of the list is one specification string, the starting of the string is its index
|
||||
}
|
||||
"""
|
||||
class Stage1(BaseScriptStage):
|
||||
def __init__(self, prob_data, **gptkwargs):
|
||||
gptkwargs["json_mode"] = True
|
||||
super().__init__("stage_1", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.txt1 = STAGE1_TXT1
|
||||
self.txt2 = STAGE1_TXT2
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line(self.txt1)
|
||||
# problem description
|
||||
self.add_prompt_line("RTL circuit problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
# DUT header
|
||||
self.add_prompt_line("DUT header:")
|
||||
self.add_prompt_line(self.prob_data["header"])
|
||||
# template
|
||||
self.add_prompt_line(self.txt2)
|
||||
|
||||
# def postprocessing(self):
|
||||
# self.spec_dict = json.loads(self.response)
|
||||
|
||||
|
||||
STAGE2_TXT1="""1. Your task is to write a verilog testbench for an verilog RTL module code (we call it as "DUT", device under test). The infomation we have is the problem description that guides student to write the RTL code (DUT) and the header of the "DUT". Our target is to generate the verilog testbench for the DUT. This testbench can check if the DUT in verilog satisfies all technical requirements of the problem description.
|
||||
2. you are in section 2. in this section, please give me the test scenarios. you only need to describe the stimulus in each test scenarios. If time is important, please inform the clock cycle information. we will use the stimulus description to generate the test vectors and send them to DUT. you must not tell the expected results even though you know that.
|
||||
3. your information is:"""
|
||||
STAGE2_TXT2="""
|
||||
you only need to describe the stimulus in each test scenarios. If time is important, please inform the clock cycle information. we will use the stimulus description to generate the test vectors and send them to DUT. you must not tell the expected results even though you know that.
|
||||
|
||||
your response must be in JSON form. example:
|
||||
{
|
||||
"scenario 1": "...", # each content is a string
|
||||
"scenario 2": "...",
|
||||
"scenario 3": "...",
|
||||
...
|
||||
}"""
|
||||
class Stage2(BaseScriptStage):
|
||||
def __init__(self, prob_data, response_stage1, **gptkwargs) -> None:
|
||||
gptkwargs["json_mode"] = True
|
||||
super().__init__("stage_2", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.response_stage1 = response_stage1
|
||||
self.txt1 = STAGE2_TXT1
|
||||
self.txt2 = STAGE2_TXT2
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line(self.txt1)
|
||||
# problem description
|
||||
self.add_prompt_line("RTL circuit problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
# specification
|
||||
self.add_prompt_line("RTL testbench specification:")
|
||||
self.add_prompt_line(self.response_stage1)
|
||||
# DUT header
|
||||
self.add_prompt_line("DUT header:")
|
||||
self.add_prompt_line(self.prob_data["header"])
|
||||
# template
|
||||
self.add_prompt_line(self.txt2)
|
||||
|
||||
def postprocessing(self):
|
||||
if "```json" in self.response:
|
||||
self.response = self.extract_code(self.response, "json")[-1]
|
||||
try:
|
||||
self.scenario_dict = json.loads(self.response)
|
||||
self.scenario_num = len(self.scenario_dict.keys())
|
||||
except:
|
||||
self.scenario_dict = None
|
||||
self.scenario_num = None
|
||||
|
||||
STAGE3_TXT1="""1. Your task is to write a verilog testbench for an verilog RTL module code (we call it as "DUT", device under test). The information we have is the problem description that guides student to write the RTL code (DUT) and the header of the "DUT". Our target is to generate the verilog testbench for the DUT. This testbench can check if the DUT in verilog satisfies all technical requirements of the problem description.
|
||||
2. you are in stage 3; in this stage, please give me the core rules of an ideal DUT. you should give these rules in python. (For convenience, you can use binary or hexadecimal format in python, i.e. 0b0010 and 0x1a). Later we will use these ideal rules to generate expected values in each test scenario. currently you must only generate the core part of the rules. the input of these rules should be related to the test vectors from test scenario. the rule should give the expected values under test vectors. You don't need to consider the control signals like clk or reset, unless the core rules of this task are about these signals. You can use numpy, scipy or other third party python libraries to help you write the rules. Please import them if you need.
|
||||
3. your information is:"""
|
||||
|
||||
class Stage3(BaseScriptStage):
|
||||
def __init__(self, prob_data, response_stage1, response_stage2, **gptkwargs) -> None:
|
||||
super().__init__("stage_3", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.response_stage1 = response_stage1
|
||||
self.response_stage2 = response_stage2
|
||||
self.txt1 = STAGE3_TXT1
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line(self.txt1)
|
||||
# problem description
|
||||
self.add_prompt_line("RTL circuit problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
# specification
|
||||
self.add_prompt_line("RTL testbench specification:")
|
||||
self.add_prompt_line(self.response_stage1)
|
||||
# DUT header
|
||||
self.add_prompt_line("DUT header:")
|
||||
self.add_prompt_line(self.prob_data["header"])
|
||||
# test scenarios
|
||||
self.add_prompt_line("test scenario: (please note the test vectors below, it will help you determine the input parameters of the rules)")
|
||||
self.add_prompt_line(self.response_stage2)
|
||||
# end
|
||||
self.add_prompt_line("your response should only contain python code. For convenience, you can use binary or hexadecimal format in python. For example: 0b0010 and 0x1a")
|
||||
|
||||
def postprocessing(self):
|
||||
# extract python codes; codes may be more than one
|
||||
python_codes = self.extract_code(self.response, "python")
|
||||
response = ""
|
||||
for python_code in python_codes:
|
||||
response += python_code + "\n"
|
||||
self.response = response
|
||||
|
||||
class StageChecklist(BaseScriptStage):
|
||||
def __init__(self, TB_code:str, checklist_str:str, max_iter:int, **gptkwargs) -> None:
|
||||
super().__init__("stage_checklist", **gptkwargs)
|
||||
self.checklist = checklist_str # {"scenario 1": "xxx", "scenario 2": "xxx", ...}
|
||||
self.checklist_dict = json.loads(checklist_str)
|
||||
self.missing_scenarios = []
|
||||
self.max_iter = max_iter
|
||||
self.TB_code_out = TB_code
|
||||
self.exit = False
|
||||
self.iter = 0
|
||||
self.TB_modified = False
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line("please check the if the testbench code contains all the items in the checklist:")
|
||||
self.add_prompt_line("testbench code here...\n")
|
||||
self.add_prompt_line(self.TB_code_out + "\n")
|
||||
self.add_prompt_line("please check the if the testbench code above contains all the scenarios in the checklist:")
|
||||
self.add_prompt_line(self.checklist)
|
||||
self.add_prompt_line("please reply 'YES' if all the items are included. If some of the items are missed in testbench, please add the missing items and reply the modified testbench code (full code).")
|
||||
self.add_prompt_line("HINT: the missing scenarios may be: " + str(self.missing_scenarios))
|
||||
self.add_prompt_line("VERY IMPORTANT: please ONLY reply 'YES' or the full code modified. NEVER remove other irrelevant codes!!!")
|
||||
|
||||
def postprocessing(self):
|
||||
self.iter += 1
|
||||
if "YES" in self.response or "Yes" in self.response or "yes" in self.response:
|
||||
self.exit = True
|
||||
else:
|
||||
self.TB_modified = True
|
||||
self.TB_code_out = self.extract_code(self.response, "verilog")[-1]
|
||||
|
||||
def pre_check(self):
|
||||
"""this function is called at the beginning of run() so that the stage can be skipped if needed"""
|
||||
self.missing_scenarios = []
|
||||
for key in self.checklist_dict.keys():
|
||||
if key.replace(" ", " = ") not in self.TB_code_out:
|
||||
self.missing_scenarios.append(key)
|
||||
|
||||
def run(self):
|
||||
self.TB_modified = False
|
||||
while (not self.exit) and (self.iter < self.max_iter):
|
||||
self.pre_check()
|
||||
if self.missing_scenarios == []:
|
||||
self.exit = True
|
||||
self.conversation_message += "\n[SYSTEM PRECHECK] All scenarios are included in the testbench code. You can continue to the next stage."
|
||||
else:
|
||||
self.make_prompt()
|
||||
self.call_gpt()
|
||||
self.postprocessing()
|
||||
|
||||
# more stages see script_pychecker_CMB and script_pychecker_SEQ
|
||||
313
prompt_scripts/script_pychecker_CMB_new.py
Normal file
313
prompt_scripts/script_pychecker_CMB_new.py
Normal file
@@ -0,0 +1,313 @@
|
||||
"""
|
||||
Description : The prompt script for pychecker workflow
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/3/22 10:40:43
|
||||
LastEdited : 2024/8/25 11:55:28
|
||||
"""
|
||||
|
||||
from .base_script import BaseScript, BaseScriptStage
|
||||
from .legacy import script_RTLchecker0306
|
||||
from .legacy.script_RTLchecker0306 import StageChecklist
|
||||
from . import utils
|
||||
|
||||
class WF_pychecker_CMB(BaseScript):
|
||||
"""
|
||||
stages: stage1, stage2, stage3, stage3b, stage4
|
||||
check: check "scenario list"(stage2) in stage 4
|
||||
"""
|
||||
def __init__(self, prob_data:dict, task_dir:str, config:object):
|
||||
super().__init__(prob_data, task_dir, config)
|
||||
self.max_check_iter = self.config.autoline.checklist.max
|
||||
self.py_code = ""
|
||||
|
||||
def make_and_run_stages(self):
|
||||
# stage1
|
||||
self.stage1 = Stage1(self.prob_data, **self.gptkwargs)
|
||||
self.stage_operation(self.stage1)
|
||||
# stage2
|
||||
self.stage2 = Stage2(self.prob_data, self.stage1.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage2)
|
||||
# stage3
|
||||
self.stage3 = Stage3(self.prob_data, self.stage1.response, self.stage2.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage3)
|
||||
# stage4
|
||||
self.stage4 = Stage4(self.prob_data, self.stage1.response, self.stage2.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage4)
|
||||
# stagechecklist
|
||||
self.stagecheck = StageChecklist(self.TB_code, self.stage2.response, self.max_check_iter, **self.gptkwargs)
|
||||
self.stage_operation(self.stagecheck)
|
||||
# stage5
|
||||
self.stage5 = Stage5(self.prob_data, self.stage1.response, self.stage3.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage5)
|
||||
# self.TB_code += "\n" + stage3b.response
|
||||
|
||||
def make_and_run_reboot_stages(self, debug_dir):
|
||||
if self.reboot_mode == "TB":
|
||||
# stage4
|
||||
self.stage4 = Stage4(self.prob_data, self.stage1.response, self.stage2.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage4, debug_dir, reboot_en=True)
|
||||
# stagechecklist
|
||||
self.stagecheck = StageChecklist(self.TB_code, self.stage2.response, self.max_check_iter, **self.gptkwargs)
|
||||
self.stage_operation(self.stagecheck, debug_dir, reboot_en=True)
|
||||
elif self.reboot_mode == "PY":
|
||||
# stage5
|
||||
self.stage5 = Stage5(self.prob_data, self.stage1.response, self.stage3.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage5, debug_dir, reboot_en=True)
|
||||
else:
|
||||
raise ValueError("invalid reboot_mode in WF_pychecker script (circuit type: CMB)")
|
||||
|
||||
class Stage1(script_RTLchecker0306.Stage1):
|
||||
"""
|
||||
stage1 for pychecker, the same as RTLchecker0306.Stage1
|
||||
"""
|
||||
def __init__(self, prob_data:dict, **gptkwargs):
|
||||
super().__init__(prob_data, **gptkwargs)
|
||||
|
||||
class Stage2(script_RTLchecker0306.Stage2):
|
||||
"""
|
||||
stage2 for pychecker, the same as RTLchecker0306.Stage2
|
||||
"""
|
||||
def __init__(self, prob_data:dict, response_stage1:str, **gptkwargs):
|
||||
super().__init__(prob_data, response_stage1, **gptkwargs)
|
||||
|
||||
class Stage3(script_RTLchecker0306.Stage3):
|
||||
"""
|
||||
stage3 for pychecker, the same as RTLchecker0306.Stage3
|
||||
"""
|
||||
def __init__(self, prob_data:dict, response_stage1:str, response_stage2:str, **gptkwargs):
|
||||
super().__init__(prob_data, response_stage1, response_stage2, **gptkwargs)
|
||||
|
||||
SIGNALTEMP_PLACEHOLDER_1 = "/* SIGNAL TEMPLATE 1 */"
|
||||
SIGNALTEMP_PLACEHOLDER_1A = "/* SIGNAL TEMPLATE 1A */"
|
||||
SIGNALTEMP_PLACEHOLDER_1B = "/* SIGNAL TEMPLATE 1B */"
|
||||
|
||||
STAGE4_TXT1 = """
|
||||
1. Your task is to write a verilog testbench for an verilog RTL module code (we call it as "DUT", device under test). The infomation we have is
|
||||
- 1.1. the problem description that guides student to write the RTL code (DUT) and the header of the "DUT".
|
||||
- 1.2. the module header.
|
||||
- 1.3. the technical specification of testbench
|
||||
- 1.4. test scenarios which determines value and sequential information of test vectors
|
||||
|
||||
2. you are in section 4. in this section, our target is to generate the verilog testbench for the DUT. This testbench can export the input and output signals of DUT at the important time points. The exported data will be send to a python script to check the correctness of DUT.
|
||||
ATTENTION: The testbench does not need to check the DUT's output but only export the signals of DUT.
|
||||
Instruction of saving signals to file:
|
||||
(1) you should use $fopen and $fdisplay to export the important signals in testbench. the file name is "TBout.txt".
|
||||
(2) When running testbench, for one time point, you should export 1 line. the example of the printed line is "%s". There could be multiple $fdisplay statements under one scenario, which means multiple test stimuli in one scenario.
|
||||
(3) Attention: before $fdisplay, you should always have a delay statement to make sure the signals are stable.
|
||||
(4) the signals you save is the input and output of DUT, you should determine the signals according to DUT's header:
|
||||
"""%(SIGNALTEMP_PLACEHOLDER_1)
|
||||
|
||||
STAGE4_TXT2 = """
|
||||
The testbench does not need to check the DUT's output but only export the signals of DUT.
|
||||
Instruction of saving signals to file:
|
||||
(1) you should use $fopen and $fdisplay to export the important signals in testbench. the file name is "TBout.txt".
|
||||
(2) When running testbench, for one time point, you should export 1 line. the example of the printed line is "%s"; There could be multiple $fdisplay statements under one scenario, which means multiple test stimuli in one scenario.
|
||||
(3) Attention: before $fdisplay, you should always have a delay statement (#10) to make sure the signals are stable.
|
||||
(4) the signals you save is the input and output of DUT, you should determine the signals according to DUT's header.
|
||||
please only generate the verilog codes, no other words.
|
||||
"""%(SIGNALTEMP_PLACEHOLDER_1)
|
||||
|
||||
class Stage4(BaseScriptStage):
|
||||
"""stage 4: generate the testbench that export the signals of DUT to a file"""
|
||||
def __init__(self, prob_data, response_stage1, response_stage2, **gptkwargs) -> None:
|
||||
super().__init__("stage_4", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.response_stage1 = response_stage1
|
||||
self.response_stage2 = response_stage2
|
||||
self.txt1 = STAGE4_TXT1
|
||||
self.txt2 = STAGE4_TXT2
|
||||
self.txt1 = self.txt1.replace(SIGNALTEMP_PLACEHOLDER_1, header_to_SignalTxt_template(prob_data["header"]))
|
||||
self.txt2 = self.txt2.replace(SIGNALTEMP_PLACEHOLDER_1, header_to_SignalTxt_template(prob_data["header"]))
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line(self.txt1)
|
||||
# DUT header
|
||||
self.add_prompt_line(self.prob_data["header"])
|
||||
# other information:
|
||||
self.add_prompt_line("Your other information:")
|
||||
# problem description
|
||||
self.add_prompt_line("RTL circuit problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
# specification
|
||||
self.add_prompt_line("RTL testbench specification:")
|
||||
self.add_prompt_line(self.response_stage1)
|
||||
# rules
|
||||
self.add_prompt_line("IMPORTANT - test scenario:")
|
||||
self.add_prompt_line(self.response_stage2)
|
||||
# end
|
||||
self.add_prompt_line(self.txt2)
|
||||
|
||||
def postprocessing(self):
|
||||
# verilog codes
|
||||
self.response = self.extract_code(self.response, "verilog")[-1]
|
||||
self.TB_code_out = self.response
|
||||
# newly added
|
||||
self.TB_code_out = utils.pychecker_CMB_TB_standardization(self.TB_code_out, self.prob_data["header"])
|
||||
|
||||
STAGEPYGEN_PYFORMAT = """Your current task is: write a python class "GoldenDUT". This python class can represent the golden DUT (the ideal one). In your "GoldenDUT", you should do the following things:
|
||||
- a. Write a method "def __init__(self)". Set the inner states/values of the golden DUT. The "__init__" method has no input parameters except "self".
|
||||
- b. Write a method "def load(self, signal_vector)". This method is to load the important input signals and get the expected output signals. it should return the expected output values. It can call other methods to help computing the expected output. It will be called by other inner methods later.
|
||||
- c. Write a method "def check(self, signal_vector)". This method is to call "load" to get the expected output values, and compare them with output signals from DUT. It should return True or False only. It can call other methods to help checking.
|
||||
- d. write other methods you need, they can be called by "__init__", "load" or "check".
|
||||
- e. the input of "load" and "check" is the signal vector. The signal vector is a dictionary, the key is the signal name, the value is the signal value.
|
||||
You can use binary (like 0x1101), hexadecimal (like 0x1a) or normal number format in python. But the signal vector input to GoldenDUT is always in decimal format""" # TODO: later this function will also show the failed scenario idx
|
||||
|
||||
# STAGEPYGEN_TXT1 = """
|
||||
# 1. background: Your task is to verify the functional correctness of a verilog RTL module code (we call it as "DUT", device under test). Our plan is to first export the signals (input and output) of the DUT under test scenarios. Then, we will use a python script to check the correctness of DUT.
|
||||
# 2. You are in the last stage. In this stage, we already export the signals of DUT. Your task is to write a python script. The python script contains one main function "check_dut" and other functions to be called by "check_dut" (this is optional). The input of "check_dut" is the signals of DUT in the format below: (the signal names are real, but the values are just for example)
|
||||
# %s
|
||||
# The main function "check_dut" should check the correctness according to the input signals. The input signals are all in decimal format. It will be called by other codes later.
|
||||
# 3. %s
|
||||
# 4. You have the information below to help you check the correctness of DUT:
|
||||
# """%(SIGNALTEMP_PLACEHOLDER_1, STAGEPYGEN_PYFORMAT)
|
||||
|
||||
STAGEPYGEN_TXT1 = """
|
||||
1. background: Your task is to verify the functional correctness of a verilog RTL module code (we call it as "DUT", device under test). Our plan is to first export the signals (input and output) of the DUT under test scenarios. Then, we will use a python script to check the correctness of DUT.
|
||||
2. You are in the last stage. In this stage, we already export the signals of DUT. The signals of DUT are in the format below: (the signal names are real, but the values are just for example)
|
||||
%s
|
||||
The input signals are all in decimal format. The "scenario" is not DUT's signal but to tell you the current scenario index.
|
||||
3. %s
|
||||
4. You have the information below to help you check the correctness of DUT:
|
||||
"""%(SIGNALTEMP_PLACEHOLDER_1, STAGEPYGEN_PYFORMAT)
|
||||
|
||||
STAGEPYGEN_TXT2 = """
|
||||
[IMPORTANT] %s
|
||||
Optional: You can also use functions from numpy and scipy to help you check the correctness of DUT.
|
||||
you can use binary (like 0b1011), hexadeciaml (like 0x1a) or normal number format in python for convenience.
|
||||
please only generate the python codes, no other words.
|
||||
"""%(STAGEPYGEN_PYFORMAT)
|
||||
|
||||
STAGEPYGEN_TAIL1 = """
|
||||
def check_dut(vectors_in):
|
||||
golden_dut = GoldenDUT()
|
||||
failed_scenarios = []
|
||||
for vector in vectors_in:
|
||||
check_pass = golden_dut.check(vector)
|
||||
if check_pass:
|
||||
print(f"Passed; vector: {vector}")
|
||||
else:
|
||||
print(f"Failed; vector: {vector}")
|
||||
failed_scenarios.append(vector["scenario"])
|
||||
return failed_scenarios
|
||||
"""
|
||||
|
||||
STAGEPYGEN_TAIL2 = """
|
||||
def SignalTxt_to_dictlist(txt:str):
|
||||
lines = txt.strip().split("\\n")
|
||||
signals = []
|
||||
for line in lines:
|
||||
signal = {}
|
||||
line = line.strip().split(", ")
|
||||
for item in line:
|
||||
if "scenario" in item:
|
||||
item = item.split(": ")
|
||||
signal["scenario"] = item[1]
|
||||
else:
|
||||
item = item.split(" = ")
|
||||
key = item[0]
|
||||
value = item[1]
|
||||
if "x" not in value and "z" not in value:
|
||||
signal[key] = int(value)
|
||||
else:
|
||||
signal[key] = value
|
||||
signals.append(signal)
|
||||
return signals
|
||||
with open("TBout.txt", "r") as f:
|
||||
txt = f.read()
|
||||
vectors_in = SignalTxt_to_dictlist(txt)
|
||||
tb_pass = check_dut(vectors_in)
|
||||
print(tb_pass)
|
||||
"""
|
||||
class Stage5(BaseScriptStage):
|
||||
"""stage 5: generate the pychecker that receive the signals from testbench and check the correctness of DUT"""
|
||||
def __init__(self, prob_data, response_stage1, response_stage3, **gptkwargs) -> None:
|
||||
super().__init__("stage_5", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.response_stage1 = response_stage1
|
||||
self.response_stage3 = response_stage3 # currently not used
|
||||
self.txt1 = STAGEPYGEN_TXT1.replace(SIGNALTEMP_PLACEHOLDER_1, utils.signal_dictlist_template(prob_data["header"], use_check_en=False))
|
||||
self.txt2 = STAGEPYGEN_TXT2
|
||||
self.pycode_tail = STAGEPYGEN_TAIL1 + STAGEPYGEN_TAIL2
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
# introduction
|
||||
self.add_prompt_line(self.txt1)
|
||||
# problem description
|
||||
self.add_prompt_line("RTL circuit problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
# specification
|
||||
self.add_prompt_line("Checker specification:")
|
||||
self.add_prompt_line(self.response_stage1)
|
||||
# python rules (optional)
|
||||
self.add_prompt_line("Here is the basic rules in python for the module. It is generated in previous stage. You can use it as a reference, but you should write your own python script. This is just for your better understanding:")
|
||||
self.add_prompt_line(self.response_stage3)
|
||||
# end
|
||||
self.add_prompt_line(self.txt2)
|
||||
|
||||
def postprocessing(self):
|
||||
# python codes
|
||||
self.response = self.extract_code(self.response, "python")[-1]
|
||||
self.Pychecker_code_out = self.response + self.pycode_tail
|
||||
|
||||
# @staticmethod
|
||||
# def signal_dictlist_template(header:str) -> str:
|
||||
# """
|
||||
# for the automatic generation of signals in testbench
|
||||
# target: given the DUT header, generate the signal output template
|
||||
# eg: if we have a DUT header like "module DUT(input a, b, c, output d, e);", the signal output template should be like "[{"scenario": "1", "a": 1, "b": 0, "c":1, "d": 0, "e": 0}, {"scenario": "2", "a": 0, "b": 0, "c":1, "d": 0, "e": 0}]"
|
||||
# """
|
||||
# signals1 = header_to_SignalTxt_template(header, "1")
|
||||
# signals2 = header_to_SignalTxt_template(header, "2")
|
||||
# signals_dictlist1 = SignalTxt_to_dictlist(signals1)
|
||||
# signals_dictlist2 = SignalTxt_to_dictlist(signals2)
|
||||
# signals_dictlist = signals_dictlist1 + signals_dictlist2
|
||||
# return str(signals_dictlist)
|
||||
|
||||
def header_to_SignalTxt_template(header:str):
|
||||
"""
|
||||
- header: the header of DUT
|
||||
- from header to signals in txt
|
||||
- for the automatic generation of signals in testbench
|
||||
- target: given the DUT header, generate the signal output template
|
||||
- eg: if we have a DUT header like "module DUT(input clk, load, data, output q);", the signal output template should be like "$fdisplay(file, "scenario: %d, clk = %d, load = %d, data = %d, q = %d", scenario, clk, load, data, q);"
|
||||
"""
|
||||
signals = utils.extract_signals(header)
|
||||
# generate ", clk = %d, load = %d, data = %d, q = %d"
|
||||
signal_form1 = ""
|
||||
signal_form2 = ""
|
||||
for signal in signals:
|
||||
signal_form1 += f", {signal['name']} = %d"
|
||||
signal_form2 += f", {signal['name']}"
|
||||
txt = r'$fdisplay(file, "scenario: %d' + signal_form1 + r'", scenario' + signal_form2 + r');'
|
||||
return txt
|
||||
|
||||
# def SignalTxt_to_dictlist(txt:str) -> list:
|
||||
# """
|
||||
# - from txt to list of dicts
|
||||
# - this function is used to extract signals and scenario information from a out.txt file.
|
||||
# - the TBout.txt file is generated by testbench, which is in the pychecker workflow
|
||||
# - the format of each line in TBout.txt is like:
|
||||
# - "scenario: x, a = x, b = x, c = x, d = x, e = x"
|
||||
# - we want: [{"scenario": x, "a": x, ...}, {...}]
|
||||
# """
|
||||
# lines = txt.strip().split("\n")
|
||||
# signals = []
|
||||
# for line in lines:
|
||||
# signal = {}
|
||||
# line = line.strip().split(", ")
|
||||
# for item in line:
|
||||
# if "scenario" in item:
|
||||
# item = item.split(": ")
|
||||
# signal["scenario"] = item[1]
|
||||
# else:
|
||||
# item = item.split(" = ")
|
||||
# key = item[0]
|
||||
# value = item[1]
|
||||
# if "x" not in value and "z" not in value:
|
||||
# signal[key] = int(value)
|
||||
# else:
|
||||
# signal[key] = value
|
||||
# signals.append(signal)
|
||||
# return signals
|
||||
527
prompt_scripts/script_pychecker_SEQ.py
Normal file
527
prompt_scripts/script_pychecker_SEQ.py
Normal file
@@ -0,0 +1,527 @@
|
||||
"""
|
||||
Description : The prompt script for pychecker workflow
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/3/22 10:40:43
|
||||
LastEdited : 2024/9/3 17:01:56
|
||||
"""
|
||||
|
||||
from .base_script import BaseScript, BaseScriptStage
|
||||
from .legacy import script_RTLchecker0306
|
||||
from . import utils
|
||||
from .utils import given_TB
|
||||
# from .script_RTLchecker0306 import StageChecklist
|
||||
import json
|
||||
|
||||
class WF_pychecker_SEQ(BaseScript):
|
||||
"""
|
||||
WF_pychecker_SEQ
|
||||
"""
|
||||
def __init__(self, prob_data:dict, task_dir:str, config:object):
|
||||
super().__init__(prob_data, task_dir, config)
|
||||
self.max_check_iter = self.config.autoline.checklist.max
|
||||
self.py_code = ""
|
||||
self.py_debug_focus = True # only for SEQ
|
||||
|
||||
def make_and_run_stages(self):
|
||||
# stage0
|
||||
self.stage0 = Stage0(self.prob_data, **self.gptkwargs)
|
||||
self.stage_operation(self.stage0)
|
||||
# stage1
|
||||
self.stage1 = script_RTLchecker0306.Stage1(self.prob_data, **self.gptkwargs)
|
||||
self.stage_operation(self.stage1)
|
||||
# stage2
|
||||
self.stage2 = script_RTLchecker0306.Stage2(self.prob_data, self.stage1.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage2)
|
||||
# stage3
|
||||
self.stage3 = Stage3(self.prob_data, self.stage1.response, self.stage2.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage3)
|
||||
# stage4
|
||||
self.stage4 = Stage4_SEQ(self.prob_data, self.stage1.response, self.stage2.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage4)
|
||||
# stagechecklist
|
||||
self.stagecheck = StageChecklist(self.TB_code, self.stage2.response, self.max_check_iter, **self.gptkwargs)
|
||||
self.stage_operation(self.stagecheck)
|
||||
# stage4b
|
||||
self.stage4b = Stage4b_SEQ(self.prob_data, self.TB_code, **self.gptkwargs)
|
||||
self.stage_operation(self.stage4b)
|
||||
# stage5
|
||||
self.stage5 = Stage5_SEQ(self.prob_data, self.stage1.response, self.stage3.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage5)
|
||||
|
||||
|
||||
def make_and_run_reboot_stages(self, debug_dir):
|
||||
if self.reboot_mode == "TB":
|
||||
# stage4
|
||||
self.stage4 = Stage4_SEQ(self.prob_data, self.stage1.response, self.stage2.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage4, debug_dir, reboot_en=True)
|
||||
# stagechecklist
|
||||
self.stagecheck = StageChecklist(self.TB_code, self.stage2.response, self.max_check_iter, **self.gptkwargs)
|
||||
self.stage_operation(self.stagecheck, debug_dir, reboot_en=True)
|
||||
# stage4b
|
||||
self.stage4b = Stage4b_SEQ(self.prob_data, self.TB_code, **self.gptkwargs)
|
||||
self.stage_operation(self.stage4b, debug_dir, reboot_en=True)
|
||||
elif self.reboot_mode == "PY":
|
||||
# stage5
|
||||
self.stage5 = Stage5_SEQ(self.prob_data, self.stage1.response, self.stage3.response, **self.gptkwargs)
|
||||
self.stage_operation(self.stage5, debug_dir, reboot_en=True)
|
||||
else:
|
||||
raise ValueError("invalid reboot_mode in WF_pychecker script (circuit type: SEQ)")
|
||||
|
||||
class Stage0(BaseScriptStage):
|
||||
def __init__(self, prob_data, **gptkwargs) -> None:
|
||||
super().__init__("stage_0", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.circuit_type = None
|
||||
|
||||
def make_prompt(self):
|
||||
self.add_prompt_line("Please generate the verilog RTL code according to the following description and header information:")
|
||||
self.add_prompt_line("problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
self.add_prompt_line("RTL header:")
|
||||
self.add_prompt_line(self.prob_data["header"])
|
||||
self.add_prompt_line("please only reply verilog codes, no other words.")
|
||||
|
||||
def postprocessing(self):
|
||||
self.response = self.extract_code(self.response, "verilog")[-1]
|
||||
self.circuit_type = utils.circuit_type_by_code(self.response)
|
||||
|
||||
SIGNALTEMP_PLACEHOLDER_1 = "/* SIGNAL TEMPLATE 1 */"
|
||||
SIGNALTEMP_PLACEHOLDER_1A = "/* SIGNAL TEMPLATE 1A */"
|
||||
SIGNALTEMP_PLACEHOLDER_1B = "/* SIGNAL TEMPLATE 1B */"
|
||||
|
||||
STAGE3_TXT1="""1. Your task is to write a verilog testbench for an verilog RTL module code (we call it as "DUT", device under test). The information we have is the problem description that guides student to write the RTL code (DUT) and the header of the "DUT". Our target is to generate the verilog testbench for the DUT. This testbench can check if the DUT in verilog satisfies all technical requirements of the problem description.
|
||||
2. you are in stage 3; in this stage, please give me the core rules of an ideal DUT. you should give these rules in python. (For convenience, you can use binary or hexadecimal format in python, i.e. 0b0010 and 0x1a). Later we will use these ideal rules to generate expected values in each test scenario. currently you must only generate the core part of the rules. the input of these rules should be related to the test vectors from test scenario. the rule should give the expected values under test vectors. You don't need to consider the control signals like clk or reset, unless the core rules of this task are about these signals. You can use numpy, scipy or other third party python libraries to help you write the rules. Please import them if you need.
|
||||
3. your information is:"""
|
||||
|
||||
class Stage3(BaseScriptStage):
|
||||
def __init__(self, prob_data, response_stage1, response_stage2, **gptkwargs) -> None:
|
||||
super().__init__("stage_3", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.response_stage1 = response_stage1
|
||||
self.response_stage2 = response_stage2
|
||||
self.txt1 = STAGE3_TXT1
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line(self.txt1)
|
||||
# problem description
|
||||
self.add_prompt_line("RTL circuit problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
# specification
|
||||
self.add_prompt_line("RTL testbench specification:")
|
||||
self.add_prompt_line(self.response_stage1)
|
||||
# DUT header
|
||||
self.add_prompt_line("DUT header:")
|
||||
self.add_prompt_line(self.prob_data["header"])
|
||||
# test scenarios
|
||||
self.add_prompt_line("test scenario: (please note the test vectors below, it will help you determine the input parameters of the rules)")
|
||||
self.add_prompt_line(self.response_stage2)
|
||||
# end
|
||||
self.add_prompt_line("your response should only contain python code. For convenience, you can use binary or hexadecimal format in python. For example: 0b0010 and 0x1a")
|
||||
|
||||
def postprocessing(self):
|
||||
# extract python codes; codes may be more than one
|
||||
python_codes = self.extract_code(self.response, "python")
|
||||
response = ""
|
||||
for python_code in python_codes:
|
||||
response += python_code + "\n"
|
||||
self.response = response
|
||||
|
||||
def header_to_SignalTxt_template(header:str, template_scenario_idx:str="1", signal_value:str="0"):
|
||||
"""
|
||||
- header: the header of DUT
|
||||
- template_scenario_idx: the scenario index in the template
|
||||
- signal_value: the value of the signal in the template
|
||||
- only: None: both input signal and output signal; "input": only input signal; "output": only output signal
|
||||
- from header to signals in txt
|
||||
- for the automatic generation of signals in testbench
|
||||
- target: given the DUT header, generate the signal output template
|
||||
- eg: if we have a DUT header like "module DUT(input a, b, c, output d, e);", the signal output template should be like "scenario: 1, a = 1, b = 0, c = 1, d = 0, e = 0"
|
||||
"""
|
||||
signals = header.split("(")[1].split(")")[0].split(",")
|
||||
# remove the "input" and "output" keywords
|
||||
signals = [signal.strip().split(" ")[-1] for signal in signals]
|
||||
# generate the signal output template
|
||||
signal_out = "scenario: " + template_scenario_idx
|
||||
for signal in signals:
|
||||
signal_out += f", {signal} = {signal_value}"
|
||||
return signal_out
|
||||
|
||||
STAGE4_SEQ_TXT1 = """
|
||||
1. Your task is to complete a given verilog testbench code. This testbench is for a verilog RTL module code (we call it as "DUT", device under test). This circuit is a sequential circuit. The infomation we have is
|
||||
- 1.1. the problem description that guides student to write the RTL code (DUT) and the header of the "DUT".
|
||||
- 1.2. the module header.
|
||||
- 1.3. test scenarios which determines values and sequential information of test vectors
|
||||
- 1.4. the testbench structure
|
||||
- 1.5. the instruction of writing our testbench
|
||||
"""
|
||||
|
||||
# STAGE4_SEQ_INSTR = """
|
||||
# The design Instruction is:
|
||||
# you should display the signals every clock cycle (#10). When it is time to check the output value of DUT, add [check] at the beginning of the output line
|
||||
# There is a example code (partial) for a DFF circuit:
|
||||
# exmaple (1):
|
||||
# ```
|
||||
# // the input of DFF is "d", the output of DFF is "q", the clock signal is "clk"
|
||||
# // scenario 1: test the function of DUT:
|
||||
# scenario = 1;
|
||||
# d = 1; $fdisplay(file, "scenario: 1, clk = %%d, d = %%d, q = %%d", clk, d, q); // set the input signal, display
|
||||
# #10;
|
||||
# $fdisplay(file, "[check]scenario: 1, clk = %%d, d = %%d, q = %%d", clk, d, q); // check the output signal, display
|
||||
# #10;
|
||||
# // scenario 2
|
||||
# scenario = 2;
|
||||
# d = 0; $fdisplay(file, "scenario: 2, clk = %%d, d = %%d, q = %%d", clk, d, q);
|
||||
# #10;
|
||||
# $fdisplay(file, "[check]scenario: 2, clk = %%d, d = %%d, q = %%d", clk, d, q);
|
||||
# #10;
|
||||
# ...
|
||||
# ```
|
||||
# example (2):
|
||||
# for a scenario that needs multiple clock cycles before checking, the example code is like this:
|
||||
# ```
|
||||
# // scenario 3: multiple clock cycles before checking
|
||||
# scenario = 3
|
||||
# d = 1; $fdisplay(file, "scenario: 3, clk = %%d, d = %%d, q = %%d", clk, d, q);
|
||||
# #10;
|
||||
# repeat(5) begin
|
||||
# $fdisplay(file, "scenario: 3, clk = %%d, d = %%d, q = %%d", clk, d, q);
|
||||
# #10;
|
||||
# end
|
||||
# $fdisplay(file, "[check]scenario: 3, clk = %%d, d = %%d, q = %%d", clk, d, q);
|
||||
# #10;
|
||||
# ```
|
||||
# for a scenario that has many checking time points, the example code is like this:
|
||||
# ```
|
||||
# // scenario 4: multi checking points
|
||||
# scenario = 4;
|
||||
# d = 1; $fdisplay(file, "scenario: 4, clk = %%d, d = %%d, q = %%d", clk, d, q);
|
||||
# #10;
|
||||
# repeat(5) begin
|
||||
# $fdisplay(file, "[check]scenario: 4, clk = %%d, d = %%d, q = %%d", clk, d, q);
|
||||
# #10;
|
||||
# end
|
||||
# ```
|
||||
# (3) the signals you save is the input and output of DUT, you should determine the signals according to DUT's header
|
||||
# """ # not used currently
|
||||
|
||||
|
||||
# STAGE4_SEQ_TXT2 = r"""
|
||||
# The testbench does not need to check the DUT's output but only export the signals of DUT. Please determine the input signal's exact values according to given test scenarios. please only complement the last initial code part. your code should begin from the "initial begin..." part to "end". You must use %d when exporting values.
|
||||
# """
|
||||
STAGE4_SEQ_TXT2 = """
|
||||
The testbench does not need to check the DUT's output but only export the signals of DUT. Please export the signals of DUT to a file named "TBout.txt" at the end of each scenario. The template is given below:
|
||||
%s
|
||||
The variables are already declared. The clock signal is already prepared. This output will be used to check the correctness of the DUT's output later.
|
||||
please only use "#10" as the delay when you need. If you need longer delay, you can use multiple "#10", such as "#10; #10; #10;". Avoid meaningless long delay in your code.
|
||||
If you need a loop in a scenario to check multiple time points, use "repeat" loop. for exmaple:
|
||||
```
|
||||
// scenario x
|
||||
scenario = x;
|
||||
signal_1 = 1;
|
||||
repeat(5) begin
|
||||
%s
|
||||
#10;
|
||||
end
|
||||
```
|
||||
Please determine the input signal's exact values according to given test scenarios.
|
||||
Note: please complete the last initial code part (marked in the given testbench template). You should give me the completed full code. The testbench template above is to help you generate the code. You must use %%d when exporting values.
|
||||
please generate the full testbench code. please only reply verilog codes, no other words.
|
||||
"""%(SIGNALTEMP_PLACEHOLDER_1, SIGNALTEMP_PLACEHOLDER_1)
|
||||
class Stage4_SEQ(BaseScriptStage):
|
||||
"""stage 4 (SEQ): generate the testbench that export the signals of DUT to a file"""
|
||||
def __init__(self, prob_data, response_stage1, response_stage2, **gptkwargs) -> None:
|
||||
super().__init__("stage_4", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.response_stage1 = response_stage1
|
||||
self.response_stage2 = response_stage2
|
||||
signals_output_template = self.header_to_SignalTxt_template(prob_data["header"], check_en=True)
|
||||
# self.txt_instruction = STAGE4_SEQ_INSTR.replace(SIGNALTEMP_PLACEHOLDER_1, signals_input_template).replace(SIGNALTEMP_PLACEHOLDER_1A, signals_output_template)
|
||||
self.txt1 = STAGE4_SEQ_TXT1
|
||||
self.txt2 = STAGE4_SEQ_TXT2.replace(SIGNALTEMP_PLACEHOLDER_1, signals_output_template)
|
||||
self.TB_code_object = given_TB(prob_data["header"])
|
||||
# signal_template_scenario = signals_input_template + "\n" + signals_input_template + "\n" + signals_input_template + "\n" + signals_output_template
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line(self.txt1)
|
||||
# DUT header
|
||||
self.add_prompt_line("DUT header:")
|
||||
self.add_prompt_line(self.prob_data["header"])
|
||||
# other information:
|
||||
self.add_prompt_line("Your other information:")
|
||||
# problem description
|
||||
self.add_prompt_line("RTL circuit problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
# scenarios
|
||||
self.add_prompt_line("IMPORTANT - test scenario (Please determine the values of input signals according to these test scenarios.):")
|
||||
self.add_prompt_line(self.response_stage2)
|
||||
# given codes
|
||||
self.add_prompt_line("below is the given testbench codes:")
|
||||
self.add_prompt_line(self.TB_code_object.gen_template())
|
||||
# end
|
||||
self.add_prompt_line(self.txt2)
|
||||
|
||||
def postprocessing(self):
|
||||
# verilog codes
|
||||
self.response = self.extract_code(self.response, "verilog")[-1]
|
||||
# self.TB_code_object.TB_code_test = self.response
|
||||
# self.TB_code_out = self.TB_code_object.gen_template()
|
||||
self.TB_code_out = self.response
|
||||
self.TB_code_out = utils.verilog_patch(self.TB_code_out)
|
||||
|
||||
@staticmethod
|
||||
def header_to_SignalTxt_template(header:str, check_en = False):
|
||||
"""
|
||||
- header: the header of DUT
|
||||
- only: None: both input signal and output signal; "input": only input signal; "output": only output signal
|
||||
- from header to signals in txt
|
||||
- for the automatic generation of signals in testbench
|
||||
- target: given the DUT header, generate the signal output template
|
||||
- eg: if we have a DUT header like "module DUT(input clk, load, data, output q);", the signal output template should be like "$fdisplay(file, "scenario: %d, clk = %d, load = %d, data = %d, q = %d", scenario, clk, load, data, q);"
|
||||
"""
|
||||
signals = utils.extract_signals(header)
|
||||
# generate ", clk = %d, load = %d, data = %d, q = %d"
|
||||
signal_form1 = ""
|
||||
signal_form2 = ""
|
||||
for signal in signals:
|
||||
signal_form1 += f", {signal['name']} = %d"
|
||||
signal_form2 += f", {signal['name']}"
|
||||
if check_en:
|
||||
txt = r'$fdisplay(file, "[check]scenario: %d' + signal_form1 + r'", scenario' + signal_form2 + r');'
|
||||
else:
|
||||
txt = r'$fdisplay(file, "scenario: %d' + signal_form1 + r'", scenario' + signal_form2 + r');'
|
||||
return txt
|
||||
|
||||
|
||||
|
||||
Stage4b_SEQ_TXT1 = """given the scenario based verilog testbench code below:"""
|
||||
Stage4b_SEQ_TXT2 = """
|
||||
please help me to export the input of DUT module by using code below:
|
||||
|
||||
[IMPORTANT]:
|
||||
%s
|
||||
|
||||
you should insert the code above into scenario checking part. In each scenario, you should insert the code above after the input of DUT module changed. Don't delete the existing $display codes.
|
||||
|
||||
For example, for a circuit that has two input signals changed at different times in one scenario, the original code is like this:
|
||||
- original code:
|
||||
// scenario 1 begins
|
||||
scenario = 1;
|
||||
signal_1 = 1;
|
||||
// insert $fdisplay here
|
||||
#10;
|
||||
signal_2 = 1;
|
||||
// insert $fdisplay here
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %%d, signal_1 = %%d, signal_2 = %%d", scenario, signal_1, signal_2); // this should be reserved. Never change the existing codes.
|
||||
#10;
|
||||
// scenario 1 ends
|
||||
|
||||
- after insertion:
|
||||
// scenario 1 begins
|
||||
scenario = 1;
|
||||
signal_1 = 1;
|
||||
$fdisplay(file, "scenario: %%d, signal_1 = %%d, signal_2 = %%d", scenario, signal_1, signal_2);
|
||||
#10;
|
||||
signal_2 = 1;
|
||||
$fdisplay(file, "scenario: %%d, signal_1 = %%d, signal_2 = %%d", scenario, signal_1, signal_2);
|
||||
#10;
|
||||
$fdisplay(file, "[check]scenario: %%d, signal_1 = %%d, signal_2 = %%d", scenario, signal_1, signal_2);
|
||||
#10;
|
||||
// scenario 1 ends
|
||||
|
||||
please insert codes according to the rules above. DO NOT modify other codes! please reply the modified full codes. please only reply verilog codes, no other words."""%(SIGNALTEMP_PLACEHOLDER_1)
|
||||
class Stage4b_SEQ(BaseScriptStage):
|
||||
def __init__(self, prob_data, TB_code, **gptkwargs) -> None:
|
||||
super().__init__("stage_4b", **gptkwargs)
|
||||
self.header = prob_data["header"]
|
||||
signals_input_template = Stage4_SEQ.header_to_SignalTxt_template(prob_data["header"], check_en=False)
|
||||
self.TB_code = TB_code
|
||||
self.txt1 = Stage4b_SEQ_TXT1
|
||||
self.txt2 = Stage4b_SEQ_TXT2.replace(SIGNALTEMP_PLACEHOLDER_1, signals_input_template)
|
||||
self.TB_code_out = self.TB_code
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line(self.txt1)
|
||||
self.add_prompt_line(self.TB_code)
|
||||
self.add_prompt_line(self.txt2)
|
||||
|
||||
def postprocessing(self):
|
||||
self.TB_code_out = self.extract_code(self.response, "verilog")[-1]
|
||||
self.TB_code_out = utils.pychecker_SEQ_TB_standardization(self.TB_code_out, self.header)
|
||||
|
||||
|
||||
STAGE5_SEQ_TXT1 = """
|
||||
1. background: Your task is to verify the functional correctness of a verilog RTL module code (we call it as "DUT", device under test). This module is a sequential circuit. Our plan is to first export the signals (input and output) of the DUT under test scenarios. Then, we will use a python script to check the correctness of DUT.
|
||||
2. You are in stage 5. In this stage, we already exported the signals of DUT. The signals are like below: (the signal names are real, but the values are just for example, clock signals are not included, each vector represents a new clock cycle)
|
||||
%s
|
||||
Here's the explanation of some special signals in signal vectors:
|
||||
- "scenario": The "scenario" is not DUT's signal but to tell you the current scenario index.
|
||||
- "check_en": The "check_en" signal is not from the DUT. "Check_en" is a bool value to tell you this is the time to check the output of DUT. It is related to the class method "check" (we will explain it later). After checking the output, a new scenario will start.
|
||||
3. Your current task is: write a python class "GoldenDUT". This python class can represent the golden DUT (the ideal one). In your "GoldenDUT", you should do the following things:
|
||||
- 3.1. write a method "def __init__(self)". Set the inner states/values of the golden DUT. These values have suffix "_reg". The initial value of these inner values is "x", but later will be digits. The "__init__" method has no input parameters except "self".
|
||||
- 3.2. write a method "def load(self, signal_vector)". This method is to load the important input signals and the inner values of "GoldenDUT" shall change according to the input signals. There is no clock signal in the input signal vector, every time the "load" method is called, it means a new clock cycle. The initial values "x" should be changed according to the input signals. This method has no return value.
|
||||
- 3.3. write a method "def check(self, signal_vector)". This method is to determine the expected output values and compare them with output signals from DUT. It should return True or False only. If return false, please print the error message. Hint: you can use code like "print(f"Scenario: {signal_vector['scenario']}, expected: a={a_reg}, observed a={a_observed}")" to print, suppose "a" is the output signal's name.
|
||||
- 3.4. write other methods you need, they can be called by "load" or "check".
|
||||
- 3.5. the input of "load" and "check" is the signal vector. The signal vector is a dictionary, the key is the signal name, the value is the signal value.
|
||||
4. Other information:
|
||||
- You can use binary (like 0x1101), hexadecimal (like 0x1a) or normal number format in python.
|
||||
- if the bit width of one variable is limited, use bit mask to assure the correctness of the value.
|
||||
- you can import numpy, math, scipy or other python libraries to help you write the python class.
|
||||
5. You have the information below to help you check the correctness of DUT:
|
||||
"""%(SIGNALTEMP_PLACEHOLDER_1)
|
||||
|
||||
STAGE5_SEQ_TXT2 = """
|
||||
[IMPORTANT]
|
||||
I will repeat the important information:
|
||||
3. Your current task is: write a python class "GoldenDUT". This python class can represent the golden DUT (the ideal one). In your "GoldenDUT", you should do the following things:
|
||||
- 3.1. write a method "def __init__(self)". Set the inner states/values of the golden DUT. These values have suffix "_reg". The initial value of these inner values should be digits. You can set the initial values according to information or just "0"s. The "__init__" method has no input parameters except "self".
|
||||
- 3.2. write a method "def load(self, signal_vector)". This method is to load the important input signals and the inner values of "GoldenDUT" shall change according to the input signals. There is no clock signal in the input signal vector, every time the "load" method is called, it means a new clock cycle. The initial values "x" should be changed according to the input signals. This method has no return value.
|
||||
- 3.3. write a method "def check(self, signal_vector)". This method is to determine the expected output values and compare them with output signals from DUT. It should return True or False only. If return false, please print the error message. Hint: you can use code like "print(f"Scenario: {signal_vector['scenario']}, expected: a={a_reg}, observed a={a_observed}")" to print, suppose "a" is the output signal's name.
|
||||
- 3.4. write other methods you need, they can be called by "load" or "check".
|
||||
- 3.5. the input of "load" and "check" is the signal vector. The signal vector is a dictionary, the key is the signal name, the value is the signal value.
|
||||
4. Other information:
|
||||
- You can use binary (like 0x1101), hexadecimal (like 0x1a) or normal number format in python.
|
||||
- if the bit width of one variable is limited, use bit mask to assure the correctness of the value.
|
||||
- you can import numpy, math, scipy or other python libraries to help you write the python class.
|
||||
|
||||
please only reply the python codes of the python class. no other words.
|
||||
"""
|
||||
|
||||
STAGE5_SEQ_CODE1 = """
|
||||
def check_dut(vectors_in):
|
||||
golden_dut = GoldenDUT()
|
||||
failed_scenarios = []
|
||||
for vector in vectors_in:
|
||||
if vector["check_en"]:
|
||||
check_pass = golden_dut.check(vector)
|
||||
if check_pass:
|
||||
print(f"Passed; vector: {vector}")
|
||||
else:
|
||||
print(f"Failed; vector: {vector}")
|
||||
failed_scenarios.append(vector["scenario"])
|
||||
golden_dut.load(vector)
|
||||
return failed_scenarios
|
||||
"""
|
||||
|
||||
STAGE5_SEQ_CODE2 = """
|
||||
def SignalTxt_to_dictlist(txt:str):
|
||||
signals = []
|
||||
lines = txt.strip().split("\\n")
|
||||
for line in lines:
|
||||
signal = {}
|
||||
if line.startswith("[check]"):
|
||||
signal["check_en"] = True
|
||||
line = line[7:]
|
||||
elif line.startswith("scenario"):
|
||||
signal["check_en"] = False
|
||||
else:
|
||||
continue
|
||||
line = line.strip().split(", ")
|
||||
for item in line:
|
||||
if "scenario" in item:
|
||||
item = item.split(": ")
|
||||
signal["scenario"] = item[1].replace(" ", "")
|
||||
else:
|
||||
item = item.split(" = ")
|
||||
key = item[0]
|
||||
value = item[1]
|
||||
if ("x" not in value) and ("X" not in value) and ("z" not in value):
|
||||
signal[key] = int(value)
|
||||
else:
|
||||
if ("x" in value) or ("X" in value):
|
||||
signal[key] = 0 # used to be "x"
|
||||
else:
|
||||
signal[key] = 0 # used to be "z"
|
||||
signals.append(signal)
|
||||
return signals
|
||||
with open("TBout.txt", "r") as f:
|
||||
txt = f.read()
|
||||
vectors_in = SignalTxt_to_dictlist(txt)
|
||||
tb_pass = check_dut(vectors_in)
|
||||
print(tb_pass)
|
||||
"""
|
||||
class Stage5_SEQ(BaseScriptStage):
|
||||
"""stage 5 (SEQ): generate the pychecker that receive the signals from testbench and check the correctness of DUT"""
|
||||
def __init__(self, prob_data, response_stage1, response_stage3, **gptkwargs) -> None:
|
||||
super().__init__("stage_5", **gptkwargs)
|
||||
self.prob_data = prob_data
|
||||
self.response_stage1 = response_stage1 # currently not used
|
||||
self.response_stage3 = response_stage3
|
||||
self.txt1 = STAGE5_SEQ_TXT1.replace(SIGNALTEMP_PLACEHOLDER_1, utils.signal_dictlist_template(prob_data["header"], exclude_clk=True))
|
||||
self.txt2 = STAGE5_SEQ_TXT2
|
||||
self.code_tail = STAGE5_SEQ_CODE1 + STAGE5_SEQ_CODE2
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
# introduction
|
||||
self.add_prompt_line(self.txt1)
|
||||
# problem description
|
||||
self.add_prompt_line("DUT circuit problem description:")
|
||||
self.add_prompt_line(self.prob_data["description"])
|
||||
# DUT header
|
||||
self.add_prompt_line("The header of DUT (note the input and output signals):")
|
||||
self.add_prompt_line(self.prob_data["header"])
|
||||
# python rules
|
||||
self.add_prompt_line("Here is the basic rules in python for the module. It was generated in previous stage. You can use it as a reference, but you should write your own python script. This is just for your better understanding. You can use them or not in your python class")
|
||||
self.add_prompt_line(self.response_stage3)
|
||||
# end
|
||||
self.add_prompt_line(self.txt2)
|
||||
|
||||
def postprocessing(self):
|
||||
# python codes
|
||||
self.response = self.extract_code(self.response, "python")[-1]
|
||||
self.Pychecker_code_out = self.response + self.code_tail
|
||||
|
||||
|
||||
class StageChecklist(BaseScriptStage):
|
||||
def __init__(self, TB_code:str, checklist_str:str, max_iter:int, **gptkwargs) -> None:
|
||||
super().__init__("stage_checklist", **gptkwargs)
|
||||
self.checklist = checklist_str # {"scenario 1": "xxx", "scenario 2": "xxx", ...}
|
||||
self.checklist_dict = json.loads(checklist_str)
|
||||
self.missing_scenarios = []
|
||||
self.max_iter = max_iter
|
||||
self.TB_code_out = TB_code
|
||||
self.exit = False
|
||||
self.iter = 0
|
||||
self.TB_modified = False
|
||||
|
||||
def make_prompt(self):
|
||||
self.prompt = ""
|
||||
self.add_prompt_line("please check the if the testbench code contains all the items in the checklist:")
|
||||
self.add_prompt_line("testbench code here...\n")
|
||||
self.add_prompt_line(self.TB_code_out + "\n")
|
||||
self.add_prompt_line("please check the if the testbench code above contains all the scenarios in the checklist:")
|
||||
self.add_prompt_line(self.checklist)
|
||||
self.add_prompt_line("please reply 'YES' if all the items are included. If some of the items are missed in testbench, please add the missing items and reply the modified testbench code (full code).")
|
||||
self.add_prompt_line("HINT: the missing scenarios may be: " + str(self.missing_scenarios))
|
||||
self.add_prompt_line("VERY IMPORTANT: please ONLY reply 'YES' or the full code modified. NEVER remove other irrelevant codes!!!")
|
||||
|
||||
def postprocessing(self):
|
||||
self.iter += 1
|
||||
if "YES" in self.response:
|
||||
self.exit = True
|
||||
else:
|
||||
self.TB_modified = True
|
||||
self.TB_code_out = self.extract_code(self.response, "verilog")[-1]
|
||||
|
||||
def pre_check(self):
|
||||
"""this function is called at the beginning of run() so that the stage can be skipped if needed"""
|
||||
self.missing_scenarios = []
|
||||
for key in self.checklist_dict.keys():
|
||||
if key.replace(" ", " = ") not in self.TB_code_out:
|
||||
self.missing_scenarios.append(key)
|
||||
|
||||
def run(self):
|
||||
self.TB_modified = False
|
||||
while (not self.exit) and (self.iter < self.max_iter):
|
||||
self.pre_check()
|
||||
if self.missing_scenarios == []:
|
||||
self.exit = True
|
||||
self.conversation_message += "\n[SYSTEM PRECHECK] All scenarios are included in the testbench code. You can continue to the next stage."
|
||||
else:
|
||||
self.make_prompt()
|
||||
self.call_gpt()
|
||||
self.postprocessing()
|
||||
803
prompt_scripts/utils.py
Normal file
803
prompt_scripts/utils.py
Normal file
@@ -0,0 +1,803 @@
|
||||
"""
|
||||
Description : some tool functions for prompt scripts and their stages
|
||||
Author : Ruidi Qiu (r.qiu@tum.de)
|
||||
Time : 2024/4/25 13:26:06
|
||||
LastEdited : 2024/9/3 20:55:11
|
||||
"""
|
||||
|
||||
import math
|
||||
from utils.utils import run_with_timeout
|
||||
####################################
|
||||
# used by pychecker_SEQ
|
||||
def extract_signals(header):
|
||||
"""
|
||||
- given the header of a module, extract the signals
|
||||
- output format: [{"name": "signal_name", "width": "[x:x]", "type": "input/output"}, ...]
|
||||
"""
|
||||
def get_width_ifhave(signal):
|
||||
if len(signal) > 2 and "[" in signal[-2] and "]" in signal[-2]:
|
||||
# remove other parts except the [x:x]
|
||||
width = signal[-2]
|
||||
width = width.split("[")[1].split("]")[0]
|
||||
width = "[" + width + "]"
|
||||
return width
|
||||
else:
|
||||
return ""
|
||||
signals = header.split("(")[1].split(")")[0].split(",")
|
||||
signals = [signal.strip().split(" ") for signal in signals]
|
||||
signals = [{"name": signal[-1], "width": get_width_ifhave(signal), "type": signal[0]} for signal in signals]
|
||||
return signals
|
||||
|
||||
def fdisplay_code_gen(header, ckeck_en=True):
|
||||
"""
|
||||
- input: head, like:
|
||||
module top_module(
|
||||
input clk,
|
||||
input reset,
|
||||
output reg [3:0] q);
|
||||
- return:
|
||||
- no check: $fdisplay(file, "scenario: %d, clk = %d, reset = %d, q = %d", scenario, clk, reset, q);
|
||||
- check: $fdisplay(file, "[check]scenario: %d, clk = %d, reset = %d, q = %d", scenario, clk, reset, q);
|
||||
"""
|
||||
signals = extract_signals(header)
|
||||
begining = '$fdisplay(file, "'
|
||||
ending = ");"
|
||||
check = "[check]" if ckeck_en else ""
|
||||
middle1 = check + "scenario: %d"
|
||||
middle2 = ", scenario"
|
||||
middle1_signals = ""
|
||||
middle2_signals = ""
|
||||
for signal in signals:
|
||||
middle1_signals += ", %s = %%d" % signal["name"]
|
||||
middle2_signals += ", %s" % signal["name"]
|
||||
middle1 += middle1_signals + '"'
|
||||
middle2 += middle2_signals
|
||||
return begining + middle1 + middle2 + ending
|
||||
|
||||
@run_with_timeout(timeout=30)
|
||||
def pychecker_SEQ_TB_standardization(code, header):
|
||||
"""
|
||||
- refine the TB code
|
||||
- 1. patch the weird bug of gpt generated verilog code
|
||||
- 2. add $fdisplay in the repeat block if not exist
|
||||
- 3. split the delay to multiple #10
|
||||
- 4. add #10 in front of the second $display if there are two $display and no delay between them
|
||||
- 5. add $fdisplay in front of the second #10 if there are two #10 and no $display between them
|
||||
- 6. find the repeat block and change from repeat-#10-$fdisplay-#10 to #10-repeat-$fdisplay-#10
|
||||
- 7. find all the $fdisplay sentence and rewrite them in a standard format
|
||||
"""
|
||||
code = verilog_patch(code)
|
||||
code = add_fdisplay_to_repeat(code, header)
|
||||
code = split_delay_to_delays(code)
|
||||
code = find_and_rewrite_fdisplay(code, header)
|
||||
code = add_delay_into_2displays_or_scenarios(code)
|
||||
code = refine_repeat_fdisplay(code)
|
||||
code = add_display_into_2delays(code, header)
|
||||
return code
|
||||
|
||||
@run_with_timeout(timeout=30)
|
||||
def pychecker_CMB_TB_standardization(code, header):
|
||||
"""
|
||||
different from pychecker_SEQ_TB_standardization, there is no timing issues in CMB
|
||||
"""
|
||||
code = verilog_patch(code)
|
||||
code = find_and_rewrite_fdisplay(code, header)
|
||||
return code
|
||||
|
||||
def find_and_rewrite_fdisplay(code:str, header:str):
|
||||
"""
|
||||
This function is used to find all the $fdisplay sentence and rewrite them in a standard format
|
||||
"""
|
||||
fdisplay_check = fdisplay_code_gen(header, ckeck_en=True)
|
||||
fdisplay_nocheck = fdisplay_code_gen(header, ckeck_en=False)
|
||||
current_location = 0
|
||||
code_processed = ""
|
||||
code_todo = code
|
||||
start = 0
|
||||
end = 0
|
||||
while True:
|
||||
start = code_todo[current_location:].find("$fdisplay")
|
||||
if start == -1:
|
||||
break
|
||||
end = code_todo[start:].find(");") + start +1
|
||||
display_sentence = code_todo[start:end+1]
|
||||
code_processed += code_todo[:start]
|
||||
code_todo = code_todo[end+1:]
|
||||
check_en = True if "[check]" in display_sentence else False
|
||||
if check_en:
|
||||
code_processed += fdisplay_check
|
||||
else:
|
||||
code_processed += fdisplay_nocheck
|
||||
code = code_processed + code_todo
|
||||
return code
|
||||
|
||||
def add_fdisplay_to_repeat(code:str, header:str):
|
||||
code_done = ""
|
||||
code_todo = code
|
||||
while True:
|
||||
repeat_start = code_todo.find("repeat")
|
||||
if repeat_start == -1:
|
||||
break
|
||||
# check if no display until the next scenario
|
||||
next_scenario = code_todo[repeat_start:].find("$fdisplay") # it is ok even if it is -1
|
||||
if "[check]" not in code_todo[repeat_start:repeat_start+next_scenario]:
|
||||
fdisplay_code = fdisplay_code_gen(header, ckeck_en=True) + " "
|
||||
else:
|
||||
fdisplay_code = fdisplay_code_gen(header, ckeck_en=False) + " "
|
||||
# check if this repeat is single-line or multi-line
|
||||
new_line = min_no_minusone(code_todo[repeat_start:].find("\n"), code_todo[repeat_start:].find("//"))
|
||||
if "begin" not in code_todo[repeat_start:repeat_start+new_line]:
|
||||
# single-line repeat, add begin end
|
||||
repeat_end = new_line + repeat_start
|
||||
after_repeat = code_todo[repeat_start:repeat_start+new_line].find(")") + 2 + repeat_start
|
||||
repeat_block = code_todo[repeat_start:after_repeat] + "begin " + code_todo[after_repeat:repeat_end] + " end"
|
||||
else:
|
||||
repeat_end = code_todo[repeat_start:].find("end") + repeat_start
|
||||
repeat_block = code_todo[repeat_start:repeat_end]
|
||||
# check if there is a $fdisplay in the repeat block
|
||||
if "$fdisplay" not in repeat_block:
|
||||
# no fdisplay, add one in front of the first delay
|
||||
delay_start = repeat_block.find("#")
|
||||
# add the fdisplay before the first delay
|
||||
code_done += code_todo[:repeat_start] + repeat_block[:delay_start] + fdisplay_code + repeat_block[delay_start:]
|
||||
else:
|
||||
code_done += code_todo[:repeat_start] + repeat_block
|
||||
code_todo = code_todo[repeat_end:]
|
||||
code_done += code_todo
|
||||
return code_done
|
||||
|
||||
# def add_delay_into_2displays(code):
|
||||
# """
|
||||
# - is there are two $display and there is no delay between them, add #10 at the front of the second $display
|
||||
# - two kinds of action: insert #10 into two displays; insert #10 into one display and
|
||||
# """
|
||||
# code_todo = code
|
||||
# code_done = ""
|
||||
# while True:
|
||||
# if "$fdisplay" in code_todo:
|
||||
# # find the next $fdisplay
|
||||
# start_first = code_todo.find("$fdisplay")
|
||||
# end_first = code_todo[start_first:].find(");") + start_first + 2
|
||||
# start_second = code_todo[end_first:].find("$fdisplay") + end_first
|
||||
# if start_second == -1:
|
||||
# break
|
||||
# # check if there is a delay between them
|
||||
# subcode = code_todo[end_first:start_second]
|
||||
# delay_exist = ("#" in subcode)
|
||||
# if (not delay_exist):
|
||||
# code_done += code_todo[:end_first] + " #10; "
|
||||
# else:
|
||||
# code_done += code_todo[:end_first]
|
||||
# code_todo = code_todo[end_first:]
|
||||
# else:
|
||||
# code_done += code_todo
|
||||
# break
|
||||
# return code_done
|
||||
|
||||
def add_delay_into_2displays_or_scenarios(code):
|
||||
"""
|
||||
- is there are two $display and there is no delay between them, add #10 at the front of the second $display
|
||||
- three cases:
|
||||
- two displays: if no delay, insert
|
||||
- display and scenario: if no delay, insert
|
||||
- scenario and display: delete the delay (not sure if we should do, to be continue)
|
||||
"""
|
||||
code_todo = code
|
||||
code_done = ""
|
||||
new_scenario_next = True
|
||||
while True:
|
||||
if "$fdisplay" in code_todo:
|
||||
# find the next $fdisplay or scenario
|
||||
if new_scenario_next:
|
||||
start_first = code_todo.find("scenario =")
|
||||
end_first = code_todo[start_first:].find(";") + start_first + 1
|
||||
new_scenario_next = False
|
||||
new_scenario_now = True
|
||||
else:
|
||||
start_first = code_todo.find("$fdisplay")
|
||||
end_first = code_todo[start_first:].find(");") + start_first + 2
|
||||
new_scenario_now = False
|
||||
# check scenario
|
||||
start_scenario = code_todo[end_first:].find("scenario =" )
|
||||
start_second = code_todo[end_first:].find("$fdisplay")
|
||||
if start_second == -1:
|
||||
code_done += code_todo
|
||||
break
|
||||
if not (start_scenario == -1) and (start_scenario < start_second):
|
||||
# next is a new scenario
|
||||
start_second = start_scenario
|
||||
new_scenario_next = True
|
||||
start_second += end_first
|
||||
# check and insert delay
|
||||
subcode = code_todo[end_first:start_second]
|
||||
if new_scenario_now:
|
||||
# it is ok if there is no delay between scenario and display because delay already exists behind the last scenario
|
||||
code_done += code_todo[:end_first]
|
||||
else:
|
||||
if (not ("#" in subcode)):
|
||||
code_done += code_todo[:end_first] + " #10; "
|
||||
else:
|
||||
code_done += code_todo[:end_first]
|
||||
code_todo = code_todo[end_first:]
|
||||
else:
|
||||
code_done += code_todo
|
||||
break
|
||||
return code_done
|
||||
|
||||
def refine_repeat_fdisplay(code:str):
|
||||
"""
|
||||
- good repeat block: $display->#10->repeat{$display->#10}n->$display->#10
|
||||
- bad repeat block: $display->repeat{#10->$display->#10}n->$display->#10
|
||||
- this code standardization is newly added in AutoBench2
|
||||
"""
|
||||
# capture the repeat block
|
||||
code_todo = code
|
||||
code_done = ""
|
||||
while "repeat" in code_todo:
|
||||
repeat_start = code_todo.find("repeat")
|
||||
repeat_end = code_todo[repeat_start:].find("end") + repeat_start
|
||||
repeat_block = code_todo[repeat_start:repeat_end]
|
||||
# check if the repeat block satisfies the condition
|
||||
delay_amount_in_repeat = repeat_block.count("#10")
|
||||
display_amount_in_repeat = repeat_block.count("$fdisplay")
|
||||
if delay_amount_in_repeat == display_amount_in_repeat + 1:
|
||||
# then we need to move one delay out
|
||||
first_delay_pos = repeat_block.find("#10")
|
||||
first_display_pos = repeat_block[first_delay_pos:].find("$fdisplay") + first_delay_pos
|
||||
repeat_block = repeat_block[:first_delay_pos] + repeat_block[first_display_pos:]
|
||||
before_repeat = code_todo[:repeat_start]
|
||||
before_repeat_last_newline = before_repeat.rfind("\n")
|
||||
# replace the last "\n" with " #10;\n"
|
||||
before_repeat = before_repeat[:before_repeat_last_newline] + " #10;\n" + before_repeat[before_repeat_last_newline+1:]
|
||||
code_done += before_repeat + repeat_block
|
||||
# code_done += code_todo[:repeat_start] + repeat_block
|
||||
code_todo = code_todo[repeat_end:]
|
||||
elif delay_amount_in_repeat + 1 == display_amount_in_repeat:
|
||||
# then we need to move one #10 in
|
||||
# we add a #10 before the first $fdisplay
|
||||
first_display_pos = repeat_block.find("$fdisplay")
|
||||
repeat_block = repeat_block[:first_display_pos] + "#10; " + repeat_block[first_display_pos:]
|
||||
# then we delete the last #10 before the repeat block
|
||||
before_repeat = code_todo[:repeat_start]
|
||||
before_repeat_last_delay = before_repeat.rfind("#10")
|
||||
before_repeat = before_repeat[:before_repeat_last_delay] + before_repeat[before_repeat_last_delay+4:]
|
||||
code_done += before_repeat + repeat_block
|
||||
code_todo = code_todo[repeat_end:]
|
||||
else:
|
||||
code_done += code_todo[:repeat_start] + repeat_block
|
||||
code_todo = code_todo[repeat_end:]
|
||||
code_done += code_todo
|
||||
return code_done
|
||||
|
||||
def add_display_into_2delays(code:str, header:str=None):
|
||||
"""if there are two #10 and there is no $fdisplay between them, add $fdisplay at the front of the second #10"""
|
||||
def find_display(code:str):
|
||||
load = ""
|
||||
check = ""
|
||||
start = code.find("$fdisplay")
|
||||
end = code[start:].find(")") + start
|
||||
first_display = code[start:end+1] + ";"
|
||||
if "[check]" in first_display:
|
||||
check = first_display
|
||||
load = check.replace("[check]", "")
|
||||
else:
|
||||
load = first_display
|
||||
check = load.replace('"scenario: ', '"[check]scenario: ')
|
||||
return load, check
|
||||
if header is None:
|
||||
load, check = find_display(code)
|
||||
else:
|
||||
load = fdisplay_code_gen(header, ckeck_en=False)
|
||||
check = fdisplay_code_gen(header, ckeck_en=True)
|
||||
code_parts = code.split("#10")
|
||||
if len(code_parts) >= 2:
|
||||
# make sure there are at least two #10
|
||||
for idx, subcode in enumerate(code_parts[:-2]):
|
||||
real_idx = idx
|
||||
if "$fdisplay" not in subcode:
|
||||
code_parts[real_idx] += load + " "
|
||||
return "#10".join(code_parts)
|
||||
|
||||
def split_delay_to_delays(code:str):
|
||||
# start from the first Scenario/scenario
|
||||
start = max(code.find("scenario"), code.find("Scenario"))
|
||||
code_before = code[:start]
|
||||
code = code[start:]
|
||||
code = code.split("#")
|
||||
for idx, subcode in enumerate(code):
|
||||
if idx != 0:
|
||||
# find the delay number; i.e., "20 asdbuaw" return "20"
|
||||
digit = ""
|
||||
for char in subcode:
|
||||
if char.isdigit():
|
||||
digit += char
|
||||
else:
|
||||
break
|
||||
if digit and (digit != "10"):
|
||||
delay_time = int(digit)
|
||||
delay10_num = math.ceil(delay_time / 10.0)
|
||||
# replace the original delay with multiple #10
|
||||
new_delay = "#10; " * delay10_num
|
||||
new_delay = new_delay[1:-2]
|
||||
code[idx] = new_delay + subcode[len(digit):]
|
||||
return code_before + "#".join(code)
|
||||
|
||||
def verilog_patch(vcode:str):
|
||||
"""
|
||||
here is a patch for a weird bug of gpt generated verilog code
|
||||
the bug is "initial begin ... }" or "initial { ... }"
|
||||
"""
|
||||
if r"{\n" in vcode:
|
||||
vcode = vcode.replace(r"{\n", r"begin\n")
|
||||
# scan the code line by line
|
||||
vcode_lines = vcode.split("\n")
|
||||
endmodule = False
|
||||
for i, line in enumerate(vcode_lines):
|
||||
line_temp = line.replace(" ", "")
|
||||
if line_temp == "}":
|
||||
vcode_lines[i] = line.replace("}", "end")
|
||||
if "endmodule" in line_temp:
|
||||
if endmodule:
|
||||
vcode_lines[i] = line.replace("endmodule", "")
|
||||
else:
|
||||
endmodule = True
|
||||
return "\n".join(vcode_lines)
|
||||
|
||||
@run_with_timeout(timeout=30)
|
||||
def circuit_type_by_code(code:str):
|
||||
"""
|
||||
- input: code
|
||||
- output: "CMB" or "SEQ"
|
||||
"""
|
||||
def string_to_words(string:str):
|
||||
words = string.split(" ")
|
||||
words = [word for word in words if word != ""]
|
||||
return words
|
||||
# _SEQ_exit_pos = 0 # for debug
|
||||
circuit_type = "CMB" # will be changed to "SEQ" if sequential
|
||||
if "always" in code:
|
||||
while True:
|
||||
always_start = code.find("always")
|
||||
if always_start == -1:
|
||||
break
|
||||
if code[always_start-1] not in [" ", "\n", "\t", ";"]:
|
||||
code = code[always_start+6:]
|
||||
continue
|
||||
elif code[always_start+6] not in [" ", "@"]:
|
||||
# check always_ff, _comb and _latch
|
||||
if code[always_start+6] == "_":
|
||||
always_word = code[always_start:code[always_start:].find(" ")+always_start]
|
||||
if always_word == "always_ff" or always_word == "always_latch":
|
||||
circuit_type = "SEQ"
|
||||
break
|
||||
code = code[always_start+6:]
|
||||
continue
|
||||
# check if there is a begin till next ";"
|
||||
next_semicolon = code[always_start:].find(";")
|
||||
if "begin" in code[always_start:always_start+next_semicolon]:
|
||||
has_begin = True
|
||||
always_end = code[always_start:].find("end") + always_start
|
||||
else:
|
||||
has_begin = False
|
||||
always_end = next_semicolon + always_start
|
||||
always_block = code[always_start:always_end]
|
||||
# currently we use a naive way to check if the always block is sequential or not; will be improved in the future
|
||||
# check if () exist for the sensitivity list
|
||||
at_pos = always_block.find("@")
|
||||
# check the first not-" " character after "@"
|
||||
char_pos = at_pos
|
||||
for char in always_block[at_pos+1:]:
|
||||
char_pos += 1
|
||||
if char != " ":
|
||||
break
|
||||
has_bracket = True if char == "(" else False
|
||||
signal_list = []
|
||||
if has_bracket:
|
||||
sensitivity_list = always_block[always_block.find("(")+1:always_block.find(")")]
|
||||
sensitivity_list = sensitivity_list.split(",")
|
||||
for signal in sensitivity_list:
|
||||
# get none-space words:
|
||||
signal_seg = string_to_words(signal)
|
||||
if len(signal_seg) > 1 and ("posedge" in signal_seg or "negedge" in signal_seg):
|
||||
circuit_type = "SEQ"
|
||||
# _SEQ_exit_pos = 1
|
||||
break
|
||||
signal_list.append(signal_seg[-1])
|
||||
else: # no bracket, always @ a begin xxx = xxx end;
|
||||
sensitivity_list_end = always_block[char_pos:].find(" ")
|
||||
sensitivity_signal = always_block[char_pos:char_pos+sensitivity_list_end]
|
||||
signal_list.append(sensitivity_signal)
|
||||
if "*" in signal_list:
|
||||
code = code[always_end:]
|
||||
continue
|
||||
if circuit_type == "SEQ":
|
||||
# _SEQ_exit_pos = 2
|
||||
break
|
||||
else:
|
||||
break_always_block = string_to_words(always_block)
|
||||
if "<=" in break_always_block:
|
||||
circuit_type = "SEQ"
|
||||
# currently we use a naive way. Following codes are skipped
|
||||
# check_next_signal = False
|
||||
# for seg in break_always_block:
|
||||
# if check_next_signal:
|
||||
# if seg not in signal_list:
|
||||
# circuit_type = "SEQ"
|
||||
# break
|
||||
# if "=" in seg:
|
||||
# check_next_signal = True
|
||||
# else:
|
||||
# check_next_signal = False
|
||||
if circuit_type == "SEQ":
|
||||
# _SEQ_exit_pos = 3
|
||||
break
|
||||
code = code[always_end:]
|
||||
return circuit_type
|
||||
|
||||
class given_TB:
|
||||
def __init__(self, header) -> None:
|
||||
"""
|
||||
1. initialize sim time, module testbench and signals
|
||||
2. initialize "integer file, scenario;"
|
||||
3. instantiate the DUT
|
||||
4. clock generation (if have)
|
||||
5. scenario based test
|
||||
6. endmodule
|
||||
"""
|
||||
self.header = header
|
||||
self.signals = extract_signals(self.header)
|
||||
self.TB_code_head = ""
|
||||
self.TB_code_head += "`timescale 1ns / 1ps\nmodule testbench;\n"
|
||||
self.TB_code_head += self.initial_signals(self.signals) + "\n"
|
||||
self.TB_code_head += "integer file, scenario;\n"
|
||||
self.TB_code_head += "// DUT instantiation\n"
|
||||
self.TB_code_head += self.instantiate_module_by_signals("top_module", "DUT", self.signals) + "\n"
|
||||
self.TB_code_head += self.clock_generation()
|
||||
self.TB_code_head += '\ninitial begin\n file = $fopen("TBout.txt", "w");\nend\n'
|
||||
# self.TB_code_test = '// Test scenarios\ninitial begin\n file = $fopen("TBout.txt", "w");\n\n // write your codes here\n\n $fclose(file);\n $finish;\nend\n'
|
||||
self.TB_code_test = '// Scenario Based Test\ninitial begin\n\n // write your scenario checking codes here, according to scenario information\n\n $fclose(file);\n $finish;\nend\n'
|
||||
self.TB_code_tail = "\nendmodule\n"
|
||||
|
||||
def gen_template(self):
|
||||
return self.TB_code_head + self.TB_code_test + self.TB_code_tail
|
||||
|
||||
def clock_generation(self):
|
||||
clk_en = False
|
||||
for signal in self.signals:
|
||||
if signal["name"] in ["clk", "clock"]:
|
||||
clk_en = True
|
||||
clk = signal["name"]
|
||||
break
|
||||
if not clk_en:
|
||||
return ""
|
||||
else:
|
||||
return "// Clock generation\ninitial begin\n [clk] = 0;\n forever #5 [clk] = ~[clk];\nend\n".replace("[clk]", clk)
|
||||
|
||||
@staticmethod
|
||||
def initial_signals(signals):
|
||||
"""
|
||||
- this function is used to initialize signals
|
||||
"""
|
||||
initial_str = ""
|
||||
for signal in signals:
|
||||
if signal["type"] == "input":
|
||||
initial_str += f"reg {signal['width']} {signal['name']};\n"
|
||||
else:
|
||||
initial_str += f"wire {signal['width']} {signal['name']};\n"
|
||||
return initial_str
|
||||
|
||||
@staticmethod
|
||||
def instantiate_module_by_signals(module_name, instantiate_name, signals):
|
||||
"""
|
||||
- this function is used to instantiate a module by signals
|
||||
- the signals should be like [{"name": "a", "width": "[3:0]", "type": "input"}, ...]
|
||||
"""
|
||||
instantiate_str = f"{module_name} {instantiate_name} (\n"
|
||||
for signal in signals:
|
||||
if signal["width"]:
|
||||
instantiate_str += f"\t.{signal['name']}({signal['name']}),\n"
|
||||
else:
|
||||
instantiate_str += f"\t.{signal['name']}({signal['name']}),\n"
|
||||
instantiate_str = instantiate_str[:-2] + "\n);"
|
||||
return instantiate_str
|
||||
|
||||
# used by stage 5
|
||||
def signal_dictlist_template(header:str, exclude_clk:bool=False, use_check_en:bool = True) -> str:
|
||||
"""
|
||||
for the automatic generation of signals in testbench
|
||||
target: given the DUT header, generate the signal output template
|
||||
eg: if we have a DUT header like "module DUT(input a, b, c, output d, e);", the signal output template should be like "[{"check_en": 0, "scenario": 1, "a": 1, "b": 0, "c":1, "d": 0, "e": 0}, {"check_en": 1, "scenario": 1, "a": 0, "b": 0, "c":1, "d": 0, "e": 0}]"
|
||||
"""
|
||||
signals_dictlist1 = header_to_dictlist(header, exclude_clk=exclude_clk, use_check_en=use_check_en)
|
||||
signals_dictlist2 = header_to_dictlist(header, exclude_clk=exclude_clk, use_check_en=use_check_en)
|
||||
signals_dictlist3 = header_to_dictlist(header, check_en=True, exclude_clk=exclude_clk, use_check_en=use_check_en)
|
||||
signals_dictlist = signals_dictlist1 + signals_dictlist2 + signals_dictlist3
|
||||
return str(signals_dictlist)
|
||||
|
||||
def header_to_dictlist(header:str, value=1, scenario_idx=1, check_en = False, exclude_clk:bool=False, use_check_en:bool = True) -> str:
|
||||
"""
|
||||
- header: the header of DUT
|
||||
- template_scenario_idx: the scenario index in the template
|
||||
- signal_value: the value of the signal in the template
|
||||
- only: None: both input signal and output signal; "input": only input signal; "output": only output signal
|
||||
- from header to signals in txt
|
||||
- for the automatic generation of signals in testbench
|
||||
- target: given the DUT header, generate the signal output template
|
||||
- eg: if we have a DUT header like "module DUT(input clk, load, data, output q);", the signal output template should be like "$fdisplay(file, "scenario: %d, clk = %d, load = %d, data = %d, q = %d", scenario, clk, load, data, q);"
|
||||
"""
|
||||
signals = extract_signals(header)
|
||||
if exclude_clk:
|
||||
signals = [signal for signal in signals if signal["name"] not in ["clk", "clock"]]
|
||||
dict_out = {}
|
||||
dict_list_out = [dict_out]
|
||||
if use_check_en:
|
||||
dict_out["check_en"] = check_en
|
||||
dict_out["scenario"] = scenario_idx
|
||||
for signal in signals:
|
||||
dict_out[signal["name"]] = value
|
||||
return dict_list_out
|
||||
|
||||
|
||||
def signal_dictlist_template_CMB(header:str, exclude_clk:bool=False) -> str:
|
||||
"""
|
||||
for the automatic generation of signals in testbench
|
||||
target: given the DUT header, generate the signal output template
|
||||
eg: if we have a DUT header like "module DUT(input a, b, c, output d, e);", the signal output template should be like "[{"check_en": 0, "scenario": 1, "a": 1, "b": 0, "c":1, "d": 0, "e": 0}, {"check_en": 1, "scenario": 1, "a": 0, "b": 0, "c":1, "d": 0, "e": 0}]"
|
||||
"""
|
||||
signals_dictlist1 = header_to_dictlist(header, exclude_clk=exclude_clk)
|
||||
|
||||
return str(signals_dictlist1)
|
||||
|
||||
def header_to_dictlist_CMB(header:str, value=1, scenario_idx=1, exclude_clk:bool=False) -> str:
|
||||
"""
|
||||
- header: the header of DUT
|
||||
- template_scenario_idx: the scenario index in the template
|
||||
- signal_value: the value of the signal in the template
|
||||
- only: None: both input signal and output signal; "input": only input signal; "output": only output signal
|
||||
- from header to signals in txt
|
||||
- for the automatic generation of signals in testbench
|
||||
- target: given the DUT header, generate the signal output template
|
||||
- eg: if we have a DUT header like "module DUT(input clk, load, data, output q);", the signal output template should be like "$fdisplay(file, "scenario: %d, clk = %d, load = %d, data = %d, q = %d", scenario, clk, load, data, q);"
|
||||
"""
|
||||
signals = extract_signals(header)
|
||||
if exclude_clk:
|
||||
signals = [signal for signal in signals if signal["name"] not in ["clk", "clock"]]
|
||||
dict_out = {}
|
||||
dict_list_out = [dict_out]
|
||||
# dict_out["scenario"] = scenario_idx
|
||||
for signal in signals:
|
||||
dict_out[signal["name"]] = value
|
||||
return dict_list_out
|
||||
|
||||
def min_no_minusone(a, b):
|
||||
if a == -1:
|
||||
return b
|
||||
if b == -1:
|
||||
return a
|
||||
return min(a, b)
|
||||
|
||||
if __name__ == "__main__":
|
||||
code = """
|
||||
`timescale 1ns / 1ps
|
||||
module testbench;
|
||||
reg clk;
|
||||
reg areset;
|
||||
reg x;
|
||||
wire z;
|
||||
|
||||
integer file, scenario;
|
||||
// DUT instantiation
|
||||
top_module DUT (
|
||||
.clk(clk),
|
||||
.areset(areset),
|
||||
.x(x),
|
||||
.z(z)
|
||||
);
|
||||
// Clock generation
|
||||
initial begin
|
||||
clk = 0;
|
||||
forever #5 clk = ~clk;
|
||||
end
|
||||
|
||||
initial begin
|
||||
file = $fopen("TBout.txt", "w");
|
||||
end
|
||||
// Scenario Based Test
|
||||
initial begin
|
||||
// Scenario 1
|
||||
scenario = 1;
|
||||
areset = 1;
|
||||
x = 0;
|
||||
repeat(2) begin
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", scenario, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
areset = 0;
|
||||
repeat(4) begin
|
||||
x = scenario % 2;
|
||||
scenario = scenario / 2;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 1, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
|
||||
// Scenario 2
|
||||
scenario = 2;
|
||||
areset = 1;
|
||||
x = 0;
|
||||
repeat(3) begin
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", scenario, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
areset = 0;
|
||||
repeat(8) begin
|
||||
x = scenario % 2;
|
||||
scenario = scenario / 2;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 2, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
areset = 1;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 2, clk, areset, x, z);
|
||||
#10;
|
||||
areset = 0;
|
||||
repeat(4) begin
|
||||
x = 1;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 2, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
|
||||
// Scenario 3
|
||||
scenario = 3;
|
||||
areset = 0;
|
||||
repeat(3) begin
|
||||
x = scenario % 2;
|
||||
scenario = scenario / 2;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 3, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
areset = 1;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 3, clk, areset, x, z);
|
||||
#10;
|
||||
areset = 0;
|
||||
repeat(3) begin
|
||||
x = scenario % 2;
|
||||
scenario = scenario / 2;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 3, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
|
||||
// Scenario 4
|
||||
scenario = 4;
|
||||
areset = 1;
|
||||
x = 0;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", scenario, clk, areset, x, z);
|
||||
#10;
|
||||
areset = 0;
|
||||
repeat(3) begin
|
||||
x = scenario % 2;
|
||||
scenario = scenario / 2;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 4, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
x = 0;
|
||||
repeat(2) begin
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 4, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
scenario = 25; // 11001 in binary
|
||||
repeat(5) begin
|
||||
x = scenario % 2;
|
||||
scenario = scenario / 2;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 4, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
|
||||
// Scenario 5
|
||||
scenario = 5;
|
||||
areset = 0;
|
||||
repeat(8) begin
|
||||
x = scenario % 2;
|
||||
scenario = scenario / 2;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 5, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
areset = 1;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 5, clk, areset, x, z);
|
||||
#10;
|
||||
areset = 0;
|
||||
scenario = 170; // 10101010 in binary
|
||||
repeat(8) begin
|
||||
x = scenario % 2;
|
||||
scenario = scenario / 2;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 5, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
|
||||
// Scenario 6
|
||||
scenario = 6;
|
||||
areset = 1;
|
||||
x = 0;
|
||||
repeat(4) begin
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", scenario, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
areset = 0;
|
||||
repeat(8) begin
|
||||
x = scenario % 2;
|
||||
scenario = scenario / 2;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 6, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
x = 1;
|
||||
repeat(5) begin
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 6, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
|
||||
// Scenario 7
|
||||
scenario = 7;
|
||||
areset = 0;
|
||||
x = 0;
|
||||
repeat(5) begin
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", scenario, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
x = 1;
|
||||
repeat(5) begin
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", scenario, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
areset = 1;
|
||||
repeat(2) begin
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", scenario, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
areset = 0;
|
||||
scenario = 10; // 01010 in binary
|
||||
repeat(5) begin
|
||||
x = scenario % 2;
|
||||
scenario = scenario / 2;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 7, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
|
||||
// Scenario 8
|
||||
scenario = 8;
|
||||
areset = 1;
|
||||
x = 0;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", scenario, clk, areset, x, z);
|
||||
#10;
|
||||
areset = 0;
|
||||
scenario = 455; // 111000111 in binary
|
||||
repeat(9) begin
|
||||
x = scenario % 2;
|
||||
scenario = scenario / 2;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 8, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
areset = 1;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 8, clk, areset, x, z);
|
||||
#10;
|
||||
areset = 0;
|
||||
scenario = 56; // 000111000 in binary
|
||||
repeat(9) begin
|
||||
x = scenario % 2;
|
||||
scenario = scenario / 2;
|
||||
$fdisplay(file, "[check]scenario: %d, clk = %d, areset = %d, x = %d, z = %d", 8, clk, areset, x, z);
|
||||
#10;
|
||||
end
|
||||
|
||||
$fclose(file);
|
||||
$finish;
|
||||
end
|
||||
|
||||
endmodule"""
|
||||
|
||||
header = """module top_module (
|
||||
input clk,
|
||||
input areset,
|
||||
input x,
|
||||
output z
|
||||
);"""
|
||||
|
||||
code = pychecker_SEQ_TB_standardization(code, header)
|
||||
print(code)
|
||||
6
requirements.txt
Normal file
6
requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
openai>=1.0.0
|
||||
anthropic>=0.0.8
|
||||
httpx>=0.25.0
|
||||
loguru>=0.7.0
|
||||
pyyaml>=6.0
|
||||
tiktoken>=0.5.0
|
||||
9
results/example_mul.json
Normal file
9
results/example_mul.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"task_id": "example_mul",
|
||||
"stage": "TBeval",
|
||||
"pass": null,
|
||||
"coverage": 0.0,
|
||||
"cga_enabled": true,
|
||||
"Eval1_pass": true,
|
||||
"full_pass": null
|
||||
}
|
||||
209
run_tbgen.py
Normal file
209
run_tbgen.py
Normal file
@@ -0,0 +1,209 @@
|
||||
"""
|
||||
TB Generator - 根据DUT和项目要求生成Testbench(完整版)
|
||||
支持多阶段流程: TBgen → TBsim → TBcheck → CGA → TBeval
|
||||
|
||||
用法:
|
||||
from run_tbgen import generate_tb
|
||||
tb_path, result = generate_tb(
|
||||
dut_code="module example(...); endmodule",
|
||||
description="项目描述",
|
||||
header="module example(input clk, ...);",
|
||||
task_id="my_task",
|
||||
model="qwen-max"
|
||||
)
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# 添加项目路径
|
||||
PROJECT_ROOT = Path(__file__).parent
|
||||
sys.path.insert(0, str(PROJECT_ROOT))
|
||||
|
||||
|
||||
# 延迟导入和初始化,避免单例顺序问题
|
||||
_config_instance = None
|
||||
_auto_logger_instance = None
|
||||
|
||||
def _ensure_init():
|
||||
"""确保Config和AutoLogger正确初始化"""
|
||||
global _config_instance, _auto_logger_instance
|
||||
|
||||
if _config_instance is None:
|
||||
# 创建临时配置文件
|
||||
_temp_config_path = os.path.join(PROJECT_ROOT, "config", "custom.yaml")
|
||||
_config_content = """
|
||||
run:
|
||||
mode: 'autoline'
|
||||
gpt:
|
||||
model: "qwen-max"
|
||||
key_path: "config/key_API.json"
|
||||
save:
|
||||
en: True
|
||||
root: "./output/"
|
||||
autoline:
|
||||
cga:
|
||||
enabled: True
|
||||
max_iter: 10
|
||||
promptscript: "pychecker"
|
||||
onlyrun: "TBgensimeval"
|
||||
"""
|
||||
with open(_temp_config_path, "w") as f:
|
||||
f.write(_config_content)
|
||||
|
||||
# 必须在导入autoline之前创建Config
|
||||
from config import Config
|
||||
_config_instance = Config(_temp_config_path)
|
||||
|
||||
# 初始化AutoLogger
|
||||
from loader_saver import AutoLogger
|
||||
_auto_logger_instance = AutoLogger()
|
||||
|
||||
return _config_instance
|
||||
|
||||
|
||||
def _create_config_for_task(task_id, model, enable_cga, cga_iter):
|
||||
"""为特定任务创建配置"""
|
||||
config_path = os.path.join(PROJECT_ROOT, "config", "custom.yaml")
|
||||
|
||||
config_content = f"""
|
||||
run:
|
||||
mode: 'autoline'
|
||||
gpt:
|
||||
model: "{model}"
|
||||
key_path: "config/key_API.json"
|
||||
save:
|
||||
en: True
|
||||
root: "{os.path.join(PROJECT_ROOT, 'output', task_id)}/"
|
||||
autoline:
|
||||
cga:
|
||||
enabled: {enable_cga}
|
||||
max_iter: {cga_iter}
|
||||
promptscript: "pychecker"
|
||||
onlyrun: "TBgensimeval"
|
||||
"""
|
||||
with open(config_path, "w") as f:
|
||||
f.write(config_content)
|
||||
|
||||
from config import Config
|
||||
return Config(config_path)
|
||||
|
||||
|
||||
class TBGenerator:
|
||||
"""完整的TB生成器,支持多阶段流程"""
|
||||
|
||||
def __init__(self, api_key_path="config/key_API.json", model="qwen-max"):
|
||||
self.api_key_path = api_key_path
|
||||
self.model = model
|
||||
|
||||
def generate(self, dut_code, description, header, task_id="test",
|
||||
enable_cga=True, cga_iter=10):
|
||||
"""
|
||||
生成Testbench(完整流程)
|
||||
|
||||
参数:
|
||||
dut_code: str, DUT的Verilog代码
|
||||
description: str, 项目描述/需求
|
||||
header: str, DUT的module header
|
||||
task_id: str, 任务ID
|
||||
enable_cga: bool, 是否启用CGA优化
|
||||
cga_iter: int, CGA最大迭代次数
|
||||
|
||||
返回:
|
||||
dict: 包含TB代码、评估结果等
|
||||
"""
|
||||
# 确保单例初始化
|
||||
_ensure_init()
|
||||
|
||||
# 导入autoline(现在可以安全导入了)
|
||||
from autoline.TB_autoline import AutoLine_Task
|
||||
|
||||
# 构建prob_data(符合HDLBitsProbset格式)
|
||||
prob_data = {
|
||||
"task_id": task_id,
|
||||
"task_number": 1,
|
||||
"description": description,
|
||||
"header": header,
|
||||
"module_code": dut_code,
|
||||
"testbench": None,
|
||||
"mutants": [],
|
||||
"llmgen_RTL": []
|
||||
}
|
||||
|
||||
# 为任务创建配置
|
||||
config = _create_config_for_task(task_id, self.model, enable_cga, cga_iter)
|
||||
|
||||
# 创建任务并运行
|
||||
task = AutoLine_Task(prob_data, config)
|
||||
task.run()
|
||||
|
||||
return {
|
||||
"TB_code_v": task.TB_code_v,
|
||||
"TB_code_py": task.TB_code_py,
|
||||
"run_info": task.run_info,
|
||||
"cga_coverage": task.cga_coverage,
|
||||
"full_pass": task.full_pass
|
||||
}
|
||||
|
||||
|
||||
def generate_tb(dut_code, description, header, task_id,
|
||||
api_key_path="config/key_API.json",
|
||||
model="qwen-max",
|
||||
enable_cga=True,
|
||||
output_dir="./output"):
|
||||
"""
|
||||
便捷函数:生成TB并保存
|
||||
|
||||
参数:
|
||||
dut_code: str, DUT代码
|
||||
description: str, 项目描述
|
||||
header: str, module header
|
||||
task_id: str, 任务ID
|
||||
api_key_path: str, API密钥路径
|
||||
model: str, 使用的模型
|
||||
enable_cga: bool, 是否启用CGA
|
||||
output_dir: str, 输出目录
|
||||
|
||||
返回:
|
||||
tuple: (TB文件路径, 结果字典)
|
||||
"""
|
||||
generator = TBGenerator(api_key_path, model)
|
||||
result = generator.generate(dut_code, description, header, task_id, enable_cga)
|
||||
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
tb_path = os.path.join(output_dir, f"{task_id}_tb.v")
|
||||
from loader_saver import save_code
|
||||
save_code(result["TB_code_v"], tb_path)
|
||||
|
||||
return tb_path, result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# 示例用法
|
||||
example_dut = """
|
||||
module example(
|
||||
input clk,
|
||||
input rst,
|
||||
input [7:0] a,
|
||||
input [7:0] b,
|
||||
output [15:0] y
|
||||
);
|
||||
assign y = a * b;
|
||||
endmodule
|
||||
"""
|
||||
|
||||
example_desc = "一个8位乘法器,输入两个8位无符号数,输出16位乘积"
|
||||
|
||||
example_header = "module example(input clk, input rst, input [7:0] a, input [7:0] b, output [15:0] y);"
|
||||
|
||||
print("Generating TB for example multiplier...")
|
||||
tb_path, result = generate_tb(
|
||||
dut_code=example_dut,
|
||||
description=example_desc,
|
||||
header=example_header,
|
||||
task_id="example_mul",
|
||||
model="qwen-max"
|
||||
)
|
||||
print(f"TB saved to: {tb_path}")
|
||||
print(f"Coverage: {result.get('cga_coverage', 0)}")
|
||||
print(f"Full Pass: {result.get('full_pass', False)}")
|
||||
255
saves/0330~0405/20260330_154844/20260330_154844.log
Normal file
255
saves/0330~0405/20260330_154844/20260330_154844.log
Normal file
@@ -0,0 +1,255 @@
|
||||
|
||||
---------------custom config--------------
|
||||
run:
|
||||
version: 2.0
|
||||
author: Ruidi Qiu - Technical University of Munich
|
||||
time: None
|
||||
custom_path: None
|
||||
mode: qwen-max
|
||||
save:
|
||||
en: True
|
||||
root: None
|
||||
pub:
|
||||
prefix: None
|
||||
dir: saves/$weekrange$/
|
||||
subdir:
|
||||
log:
|
||||
en: True
|
||||
dir: logs/
|
||||
notes: None
|
||||
cfg_pmode: iwantall
|
||||
debug_en: False
|
||||
level: TRACE
|
||||
message:
|
||||
en: True
|
||||
dir: messages/
|
||||
format: json
|
||||
iverilog:
|
||||
en: True
|
||||
subdir: ivcode_nodebug
|
||||
load:
|
||||
prompt:
|
||||
path: config/initial_prompts/prompt1.txt
|
||||
pick_idx: []
|
||||
stage_template:
|
||||
path: config/templates/stage_template0301.txt
|
||||
gpt:
|
||||
model: 4o
|
||||
key_path: config/key_API.json
|
||||
temperature: None
|
||||
json_mode: False
|
||||
chatgpt:
|
||||
start_form: chat
|
||||
one_time_talk: False
|
||||
rtlgen_model: None
|
||||
iverilog:
|
||||
dir:
|
||||
task_id:
|
||||
autoline:
|
||||
result_path: results
|
||||
cga:
|
||||
enabled: True
|
||||
max_iter: 10
|
||||
target_coverage: 100.0
|
||||
probset:
|
||||
path: None
|
||||
mutant_path: None
|
||||
gptgenRTL_path: None
|
||||
more_info_paths: []
|
||||
only: ['lemmings3', 'lemmings4', 'ece241_2013_q8', '2014_q3fsm', 'm2014_q6', 'review2015_fsm', 'rule110', 'fsm_ps2']
|
||||
exclude: []
|
||||
exclude_json: None
|
||||
filter: [{}]
|
||||
checklist:
|
||||
max: 3
|
||||
debug:
|
||||
max: 5
|
||||
reboot: 1
|
||||
py_rollback: 2
|
||||
onlyrun: None
|
||||
promptscript: None
|
||||
timeout: 300
|
||||
TBcheck:
|
||||
rtl_num: 20
|
||||
correct_max: 3
|
||||
discrim_mode: col_full_wrong
|
||||
correct_mode: naive
|
||||
rtl_compens_en: True
|
||||
rtl_compens_max_iter: 3
|
||||
itermax: 10
|
||||
update_desc: False
|
||||
save_compile: True
|
||||
save_finalcodes: True
|
||||
error_interruption: False
|
||||
------------------------------------------
|
||||
------config info (custom + default)------
|
||||
run:
|
||||
version: 2.0
|
||||
author: Ruidi Qiu - Technical University of Munich
|
||||
time: 20260330_154844
|
||||
custom_path: /home/zhang/CorrectBench/TBgen_App/config/default.yaml
|
||||
mode: qwen-max
|
||||
hostname: localhost
|
||||
pid: 919939
|
||||
pyversion: 3.12.3 (main, Mar 3 2026, 12:15:18) [GCC 13.3.0]
|
||||
save:
|
||||
en: True
|
||||
root: None
|
||||
pub:
|
||||
prefix: None
|
||||
dir: saves/0330~0405/
|
||||
subdir:
|
||||
log:
|
||||
en: True
|
||||
dir: logs/
|
||||
notes: None
|
||||
cfg_pmode: iwantall
|
||||
debug_en: False
|
||||
level: TRACE
|
||||
message:
|
||||
en: True
|
||||
dir: messages/
|
||||
format: json
|
||||
iverilog:
|
||||
en: True
|
||||
subdir: ivcode_nodebug
|
||||
load:
|
||||
prompt:
|
||||
path: config/initial_prompts/prompt1.txt
|
||||
pick_idx: []
|
||||
stage_template:
|
||||
path: config/templates/stage_template0301.txt
|
||||
gpt:
|
||||
model: gpt-4o-2024-08-06
|
||||
key_path: config/key_API.json
|
||||
temperature: None
|
||||
json_mode: False
|
||||
chatgpt:
|
||||
start_form: chat
|
||||
one_time_talk: False
|
||||
rtlgen_model: gpt-4o-2024-08-06
|
||||
iverilog:
|
||||
dir:
|
||||
task_id:
|
||||
autoline:
|
||||
result_path: results
|
||||
cga:
|
||||
enabled: True
|
||||
max_iter: 10
|
||||
target_coverage: 100.0
|
||||
probset:
|
||||
path: None
|
||||
mutant_path: None
|
||||
gptgenRTL_path: None
|
||||
more_info_paths: []
|
||||
only: ['lemmings3', 'lemmings4', 'ece241_2013_q8', '2014_q3fsm', 'm2014_q6', 'review2015_fsm', 'rule110', 'fsm_ps2']
|
||||
exclude: []
|
||||
exclude_json: None
|
||||
filter: [{}]
|
||||
checklist:
|
||||
max: 3
|
||||
debug:
|
||||
max: 5
|
||||
reboot: 1
|
||||
py_rollback: 2
|
||||
onlyrun: None
|
||||
promptscript: None
|
||||
timeout: 300
|
||||
TBcheck:
|
||||
rtl_num: 20
|
||||
correct_max: 3
|
||||
discrim_mode: col_full_wrong
|
||||
correct_mode: naive
|
||||
rtl_compens_en: True
|
||||
rtl_compens_max_iter: 3
|
||||
itermax: 10
|
||||
update_desc: False
|
||||
save_compile: True
|
||||
save_finalcodes: True
|
||||
error_interruption: False
|
||||
_initialized: True
|
||||
------------------------------------------
|
||||
|
||||
--------------default config--------------
|
||||
run:
|
||||
version: 2.0
|
||||
author: Ruidi Qiu - Technical University of Munich
|
||||
time: None
|
||||
custom_path: None
|
||||
mode: qwen-max
|
||||
save:
|
||||
en: True
|
||||
root: None
|
||||
pub:
|
||||
prefix: None
|
||||
dir: saves/$weekrange$/
|
||||
subdir:
|
||||
log:
|
||||
en: True
|
||||
dir: logs/
|
||||
notes: None
|
||||
cfg_pmode: iwantall
|
||||
debug_en: False
|
||||
level: TRACE
|
||||
message:
|
||||
en: True
|
||||
dir: messages/
|
||||
format: json
|
||||
iverilog:
|
||||
en: True
|
||||
subdir: ivcode_nodebug
|
||||
load:
|
||||
prompt:
|
||||
path: config/initial_prompts/prompt1.txt
|
||||
pick_idx: []
|
||||
stage_template:
|
||||
path: config/templates/stage_template0301.txt
|
||||
gpt:
|
||||
model: 4o
|
||||
key_path: config/key_API.json
|
||||
temperature: None
|
||||
json_mode: False
|
||||
chatgpt:
|
||||
start_form: chat
|
||||
one_time_talk: False
|
||||
rtlgen_model: None
|
||||
iverilog:
|
||||
dir:
|
||||
task_id:
|
||||
autoline:
|
||||
result_path: results
|
||||
cga:
|
||||
enabled: True
|
||||
max_iter: 10
|
||||
target_coverage: 100.0
|
||||
probset:
|
||||
path: None
|
||||
mutant_path: None
|
||||
gptgenRTL_path: None
|
||||
more_info_paths: []
|
||||
only: ['lemmings3', 'lemmings4', 'ece241_2013_q8', '2014_q3fsm', 'm2014_q6', 'review2015_fsm', 'rule110', 'fsm_ps2']
|
||||
exclude: []
|
||||
exclude_json: None
|
||||
filter: [{}]
|
||||
checklist:
|
||||
max: 3
|
||||
debug:
|
||||
max: 5
|
||||
reboot: 1
|
||||
py_rollback: 2
|
||||
onlyrun: None
|
||||
promptscript: None
|
||||
timeout: 300
|
||||
TBcheck:
|
||||
rtl_num: 20
|
||||
correct_max: 3
|
||||
discrim_mode: col_full_wrong
|
||||
correct_mode: naive
|
||||
rtl_compens_en: True
|
||||
rtl_compens_max_iter: 3
|
||||
itermax: 10
|
||||
update_desc: False
|
||||
save_compile: True
|
||||
save_finalcodes: True
|
||||
error_interruption: False
|
||||
------------------------------------------
|
||||
204
saves/0330~0405/20260330_155017/20260330_155017.log
Normal file
204
saves/0330~0405/20260330_155017/20260330_155017.log
Normal file
@@ -0,0 +1,204 @@
|
||||
|
||||
---------------custom config--------------
|
||||
run:
|
||||
mode: autoline
|
||||
gpt:
|
||||
model: qwen-max
|
||||
key_path: config/key_API.json
|
||||
save:
|
||||
en: True
|
||||
root: ./output/
|
||||
autoline:
|
||||
cga:
|
||||
enabled: True
|
||||
max_iter: 10
|
||||
promptscript: pychecker
|
||||
onlyrun: TBgensimeval
|
||||
------------------------------------------
|
||||
------config info (custom + default)------
|
||||
run:
|
||||
version: 2.0
|
||||
author: Ruidi Qiu - Technical University of Munich
|
||||
time: 20260330_155017
|
||||
custom_path: /home/zhang/CorrectBench/TBgen_App/config/custom.yaml
|
||||
mode: autoline
|
||||
hostname: localhost
|
||||
pid: 928959
|
||||
pyversion: 3.12.3 (main, Mar 3 2026, 12:15:18) [GCC 13.3.0]
|
||||
save:
|
||||
en: True
|
||||
root: ./output/
|
||||
pub:
|
||||
prefix: None
|
||||
dir: saves/0330~0405/
|
||||
subdir:
|
||||
log:
|
||||
en: True
|
||||
dir: logs/
|
||||
notes: None
|
||||
cfg_pmode: iwantall
|
||||
debug_en: False
|
||||
level: TRACE
|
||||
message:
|
||||
en: True
|
||||
dir: messages/
|
||||
format: json
|
||||
iverilog:
|
||||
en: True
|
||||
subdir: ivcode_nodebug
|
||||
load:
|
||||
prompt:
|
||||
path: config/initial_prompts/prompt1.txt
|
||||
pick_idx: []
|
||||
stage_template:
|
||||
path: config/templates/stage_template0301.txt
|
||||
gpt:
|
||||
model: qwen-max
|
||||
key_path: config/key_API.json
|
||||
temperature: None
|
||||
json_mode: False
|
||||
chatgpt:
|
||||
start_form: chat
|
||||
one_time_talk: False
|
||||
rtlgen_model: qwen-max
|
||||
iverilog:
|
||||
dir:
|
||||
task_id:
|
||||
autoline:
|
||||
result_path: results
|
||||
cga:
|
||||
enabled: True
|
||||
max_iter: 10
|
||||
target_coverage: 100.0
|
||||
probset:
|
||||
path: None
|
||||
mutant_path: None
|
||||
gptgenRTL_path: None
|
||||
more_info_paths: []
|
||||
only: ['lemmings3', 'lemmings4', 'ece241_2013_q8', '2014_q3fsm', 'm2014_q6', 'review2015_fsm', 'rule110', 'fsm_ps2']
|
||||
exclude: []
|
||||
exclude_json: None
|
||||
filter: [{}]
|
||||
checklist:
|
||||
max: 3
|
||||
debug:
|
||||
max: 5
|
||||
reboot: 1
|
||||
py_rollback: 2
|
||||
onlyrun: TBgensimeval
|
||||
promptscript: pychecker
|
||||
timeout: 300
|
||||
TBcheck:
|
||||
rtl_num: 20
|
||||
correct_max: 3
|
||||
discrim_mode: col_full_wrong
|
||||
correct_mode: naive
|
||||
rtl_compens_en: True
|
||||
rtl_compens_max_iter: 3
|
||||
itermax: 10
|
||||
update_desc: False
|
||||
save_compile: True
|
||||
save_finalcodes: True
|
||||
error_interruption: False
|
||||
_initialized: True
|
||||
------------------------------------------
|
||||
|
||||
--------------default config--------------
|
||||
run:
|
||||
version: 2.0
|
||||
author: Ruidi Qiu - Technical University of Munich
|
||||
time: None
|
||||
custom_path: None
|
||||
mode: qwen-max
|
||||
save:
|
||||
en: True
|
||||
root: None
|
||||
pub:
|
||||
prefix: None
|
||||
dir: saves/$weekrange$/
|
||||
subdir:
|
||||
log:
|
||||
en: True
|
||||
dir: logs/
|
||||
notes: None
|
||||
cfg_pmode: iwantall
|
||||
debug_en: False
|
||||
level: TRACE
|
||||
message:
|
||||
en: True
|
||||
dir: messages/
|
||||
format: json
|
||||
iverilog:
|
||||
en: True
|
||||
subdir: ivcode_nodebug
|
||||
load:
|
||||
prompt:
|
||||
path: config/initial_prompts/prompt1.txt
|
||||
pick_idx: []
|
||||
stage_template:
|
||||
path: config/templates/stage_template0301.txt
|
||||
gpt:
|
||||
model: 4o
|
||||
key_path: config/key_API.json
|
||||
temperature: None
|
||||
json_mode: False
|
||||
chatgpt:
|
||||
start_form: chat
|
||||
one_time_talk: False
|
||||
rtlgen_model: None
|
||||
iverilog:
|
||||
dir:
|
||||
task_id:
|
||||
autoline:
|
||||
result_path: results
|
||||
cga:
|
||||
enabled: True
|
||||
max_iter: 10
|
||||
target_coverage: 100.0
|
||||
probset:
|
||||
path: None
|
||||
mutant_path: None
|
||||
gptgenRTL_path: None
|
||||
more_info_paths: []
|
||||
only: ['lemmings3', 'lemmings4', 'ece241_2013_q8', '2014_q3fsm', 'm2014_q6', 'review2015_fsm', 'rule110', 'fsm_ps2']
|
||||
exclude: []
|
||||
exclude_json: None
|
||||
filter: [{}]
|
||||
checklist:
|
||||
max: 3
|
||||
debug:
|
||||
max: 5
|
||||
reboot: 1
|
||||
py_rollback: 2
|
||||
onlyrun: None
|
||||
promptscript: None
|
||||
timeout: 300
|
||||
TBcheck:
|
||||
rtl_num: 20
|
||||
correct_max: 3
|
||||
discrim_mode: col_full_wrong
|
||||
correct_mode: naive
|
||||
rtl_compens_en: True
|
||||
rtl_compens_max_iter: 3
|
||||
itermax: 10
|
||||
update_desc: False
|
||||
save_compile: True
|
||||
save_finalcodes: True
|
||||
error_interruption: False
|
||||
------------------------------------------
|
||||
2026-03-30 15:50:24 | INFO | [example_mul] [TBgen] stage_0 ends (5.68s used)
|
||||
2026-03-30 15:50:48 | INFO | [example_mul] [TBgen] stage_1 ends (24.62s used)
|
||||
2026-03-30 15:51:23 | INFO | [example_mul] [TBgen] stage_2 ends (34.70s used)
|
||||
2026-03-30 15:51:53 | INFO | [example_mul] [TBgen] stage_3 ends (29.57s used)
|
||||
2026-03-30 15:53:48 | INFO | [example_mul] [TBgen] stage_4 ends (115.37s used)
|
||||
2026-03-30 15:53:48 | INFO | [example_mul] [TBgen] stage_checklist ends (0.00s used)
|
||||
2026-03-30 15:56:14 | INFO | [example_mul] [TBgen] stage_4b ends (146.02s used)
|
||||
2026-03-30 15:56:31 | INFO | [example_mul] [TBgen] stage_5 ends (16.58s used)
|
||||
2026-03-30 15:56:31 | INFO | [example_mul]
|
||||
2026-03-30 15:56:31 | INFO | [example_mul] [TBsim] iverilog compilation : passed!
|
||||
2026-03-30 15:56:31 | INFO | [example_mul] [TBsim] python simulation : passed!
|
||||
2026-03-30 15:56:31 | INFO | [example_mul] [TBsim] TBsim finished : True!
|
||||
2026-03-30 15:56:31 | INFO | [example_mul]
|
||||
2026-03-30 15:56:31 | INFO | [example_mul] [TBeval] Eval 1: Golden RTL checking begins
|
||||
2026-03-30 15:56:31 | POSITIVE | [example_mul] [TBeval] Eval 1: Golden RTL checking passed!
|
||||
2026-03-30 15:56:31 | INFO | [example_mul]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user