za_eHAyhG
以下に対応した
openaiライブラリ1.0.0以上の書き方に準拠
gpt-4.5-previewは2025/07/14で廃止されるので、例から省いた
code:py
import argparse
import openai
import os
import concurrent.futures
client = openai.OpenAI()
# 事前定義するモデル一覧
MODELS = [
"gpt-4o",
"gpt-4.1",
"o1",
"o3-mini",
"o4-mini",
]
def read_prompt(filename):
with open(filename, "r", encoding="utf-8") as f:
return f.read()
def request_to_model(model_name, prompt, timeout=130):
try:
response = client.chat.completions.create(
model=model_name,
messages=[
{'role': 'user', 'content': prompt},
],
timeout=timeout
)
return response.choices0.message.content except Exception as e:
def save_response(base_filename, model_name, content):
name, ext = os.path.splitext(base_filename)
out_filename = f"{name}-{model_name}.md"
with open(out_filename, "w", encoding="utf-8") as f:
f.write(content)
f.write('\n') # ファイル末尾に空行を追加
print(f"Saved: {out_filename}")
def main():
parser = argparse.ArgumentParser(description="Prompt multi LLMs and save responses.")
parser.add_argument("-i", "--input", required=True, help="Input markdown file as prompt.")
args = parser.parse_args()
prompt = read_prompt(args.input)
# 並列で各モデルにリクエスト
with concurrent.futures.ThreadPoolExecutor() as executor:
future_to_model = {
executor.submit(request_to_model, model, prompt): model
for model in MODELS
}
for future in concurrent.futures.as_completed(future_to_model):
content = future.result()
save_response(args.input, model, content)
if __name__ == "__main__":
main()