複数のLLMモデルを同時に走らせてテキストファイルに吐くコマンドをつくった
pgpt = Parallel GPT
https://gyazo.com/51eb18349f425c3982cb93fda6a3bd9b
知的生産に便利そう、6人同時に尋ねる感じの体験になった
python pgpt.py -i prompt.md みたいなコマンドで実装して、これを愛用の秀丸エディタからシームレスに組み込む OpenAI以外は予定してる?bsahd.icon
してないsta.icon
そうか、API違うからラップしないといけないのかsta.icon
コードをGithub Copilotシェアリンクから抽出(問題あったら消して)bsahd.icon
問題なしsta.icon
code:pgpt.py
import argparse
import openai
import os
import concurrent.futures
# 事前定義するモデル一覧
MODELS = [
"gpt-4o",
"gpt-4.1",
"o3-mini",
"o1"
]
def read_prompt(filename):
with open(filename, "r", encoding="utf-8") as f:
return f.read()
def request_to_model(model_name, prompt, timeout=130):
try:
response = openai.ChatCompletion.create(
model=model_name,
messages=[
{'role': 'user', 'content': prompt},
],
request_timeout=timeout
)
except Exception as e:
def save_response(base_filename, model_name, content):
name, ext = os.path.splitext(base_filename)
out_filename = f"{name}-{model_name}.md"
with open(out_filename, "w", encoding="utf-8") as f:
f.write(content)
f.write('\n') # ファイル末尾に空行を追加
print(f"Saved: {out_filename}")
def main():
parser = argparse.ArgumentParser(description="Prompt multi LLMs and save responses.")
parser.add_argument("-i", "--input", required=True, help="Input markdown file as prompt.")
args = parser.parse_args()
prompt = read_prompt(args.input)
# 並列で各モデルにリクエスト
with concurrent.futures.ThreadPoolExecutor() as executor:
future_to_model = {
executor.submit(request_to_model, model, prompt): model
for model in MODELS
}
for future in concurrent.futures.as_completed(future_to_model):
content = future.result()
save_response(args.input, model, content)
if __name__ == "__main__":
main()
こういう用途ならthreadよりasyncioのほうが良い気がするbsahd.icon
なるほど、ノンブロッキングできるのか、こっちがいいかもsta.icon
code:asyncpgpt.py
import argparse
from openai import AsyncOpenAI
import os
import asyncio
# 事前定義するモデル一覧
MODELS = [
"gpt-4o",
"gpt-4.1",
"o3-mini",
"o1"
]
client = AsyncOpenAI()
def read_prompt(filename):
with open(filename, "r", encoding="utf-8") as f:
return f.read()
async def request_to_model(model_name, prompt, filename):
try:
response = await client.chat.completions.create(
model=model_name,
messages=[
{'role': 'user', 'content': prompt},
]
)
save_response(filename, model_name, response.choices0.message.content) return response.choices0.message.content except Exception as e:
def save_response(base_filename, model_name, content):
name, ext = os.path.splitext(base_filename)
out_filename = f"{name}-{model_name}.md"
with open(out_filename, "w", encoding="utf-8") as f:
f.write(content)
f.write('\n') # ファイル末尾に空行を追加
print(f"Saved: {out_filename}")
async def main():
parser = argparse.ArgumentParser(description="Prompt multi LLMs and save responses.")
parser.add_argument("-i", "--input", required=True, help="Input markdown file as prompt.")
args = parser.parse_args()
prompt = read_prompt(args.input)
tasks = []
for name in MODELS:
task = asyncio.create_task(request_to_model(name,prompt,args.input))
tasks.append(task)
await asyncio.gather(*tasks)
if __name__ == "__main__":
asyncio.run(main())