Cellpose-SAM
概要
これまでと少しコードが変わってくるので注意。
MacでもGPUデバイスを指定する必要がなくなった
準備
code:Anaconda Prompt
pip install --upgrade cellpose
Sample code
code:jupyter notebook
## Import modules
import os
import numpy as np
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
from cellpose import models, io, utils, plot,core
import pandas as pd
import torch
print(core.use_gpu())
code:jupyter notebook
images = io.imread('test.tif') #Multitiff, grayscale-one color print(f"Image shape: {images.shape}")
print(f"Timepoints: {images.shape0}") plt.imshow(images0, cmap='gray') plt.title('First timepoint')
plt.colorbar()
plt.show()
code:jupyter notebook
%%time
model = models.CellposeModel(gpu=False)
masks, flows, styles = model.eval(images0,flow_threshold=0.4,cellprob_threshold=0.0) fig = plt.figure(figsize=(16, 10))
plot.show_segmentation(fig, images0, masks, flows0) plt.tight_layout()
plt.show()
code:jupyter notebook
%%time
model = models.CellposeModel(gpu=True)
masks, flows, styles = model.eval(images0,flow_threshold=0.4,cellprob_threshold=0.0) fig = plt.figure(figsize=(16, 10))
plot.show_segmentation(fig, images0, masks, flows0) plt.tight_layout()
plt.show()
code:jupyter notebook
%%time
batch_size = 10
num_batches = (len(images) + batch_size - 1) // batch_size
model = models.CellposeModel(gpu=True)
masks = []
for b in tqdm(range(num_batches), desc="Processing batches"):
start_idx = b * batch_size
end_idx = min((b + 1) * batch_size, len(images))
for i, img in enumerate(tqdm(batch_images, desc=f"Batch {b+1}/{num_batches}", leave=False)):
mask, flow, style = model.eval(img,flow_threshold=0.4,cellprob_threshold=0.0)
masks.append(mask)
io.save_to_png(img, mask, flow, 'segmentation/'+str(i)+'.png')
created by y-goto.icon
2025/5/20