Mod of Zach Muller's WWF 01_Custom.ipynb.
Here we'll take cropped images of antinodes and try to count the rings, by fashioning a regression model out of a one-class classification model and scaling the output sigmoid (via fastai's y_range parameter) so that our fitted values stay within the linear regime of the sigmoid.
And we also want to "clamp" our output between a min of about 0.2 rings and a max of 11 rings, because that's how the dataset was created; so sigmoid makes a good choice for this "clamping" too.
import espiownage
from espiownage.core import *
sysinfo()
print(f"espiownage version {espiownage.__version__}")
And import our libraries
from fastai.vision.all import *
from espiownage.core import *
Below you will find the exact imports for everything we use today
from fastcore.foundation import L
from fastcore.xtras import Path # @patch'd properties to the Pathlib module
from fastai.callback.fp16 import to_fp16
from fastai.callback.schedule import fit_one_cycle, lr_find
from fastai.data.external import untar_data, URLs
from fastai.data.block import RegressionBlock, DataBlock
from fastai.data.transforms import get_image_files, Normalize, RandomSplitter, parent_label
from fastai.interpret import ClassificationInterpretation
from fastai.learner import Learner # imports @patch'd properties to Learner including `save`, `load`, `freeze`, and `unfreeze`
from fastai.optimizer import ranger
from fastai.vision.augment import aug_transforms, RandomResizedCrop, Resize
from fastai.vision.core import imagenet_stats
from fastai.vision.data import ImageBlock
from fastai.vision.learner import cnn_learner
from fastai.vision.utils import download_images, verify_images
dataset_name = 'spnet' # choose from:
# - cleaner (*real* data that's clean-er than "preclean"),
# - preclean (unedited aggregates of 15-or-more volunteers)
# - spnet, (original SPNet Real dataset)
# - cyclegan (original SPNet CGSmall dataset)
# - fake (newer than SPNet fake, this includes non-int ring #s)
use_wandb = False # WandB.ai logging
project = 'count_in_crops' # project name for wandb
if use_wandb:
!pip install wandb -qqq
import wandb
from fastai.callback.wandb import *
from fastai.callback.tracker import SaveModelCallback
wandb.login()
path = get_data(dataset_name) / 'crops'; path
fnames = get_image_files(path)
print(f"{len(fnames)} total cropped images")
ind = 1 # pick one cropped image
fnames[ind]
For labels, we want the ring count which is the number between the last '_' and the '.png'
def label_func(x):
return round(float(x.stem.split('_')[-1]),2)
print(label_func(fnames[ind]))
cropsize = (300,300) # pixels
croppedrings = DataBlock(blocks=(ImageBlock, RegressionBlock(n_out=1)),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=label_func,
item_tfms=Resize(cropsize, ResizeMethod.Squish),
batch_tfms=[*aug_transforms(size=cropsize, flip_vert=True, max_rotate=360.0),
Normalize.from_stats(*imagenet_stats)])
dls = croppedrings.dataloaders(path, bs=32)
dls.show_batch(max_n=9)
opt = ranger
y_range=(0.2,13) # balance between "clamping" to range of real data vs too much "compression" from sigmoid nonlinearity
learn = cnn_learner(dls, resnet34, n_out=1, y_range=y_range, metrics=[mae, acc_reg05,acc_reg1,acc_reg15,acc_reg2], loss_func=MSELossFlat(), opt_func=opt)
learn.lr_find()
lr = 5e-3
learn.fine_tune(30, lr, freeze_epochs=2) # accidentally ran this twice in a row :-O
learn.save(f'crop-rings-{dataset_name}')
learn.load(f'crop-rings-{dataset_name}');
preds, targs, losses = learn.get_preds(with_loss=True) # validation set only
len(preds)
I'll define a method to show a single prediction
def showpred(ind, preds, targs, losses, dls): # show prediction at this index
print(f"preds[{ind}] = {preds[ind]}, targs[{ind}] = {targs[ind]}, loss = {losses[ind]}")
print(f"file = {os.path.basename(dls.valid.items[ind])}")
print("Image:")
dls.valid.dataset[ind][0].show()
showpred(0, preds, targs, losses, dls)
And now we'll run through predictions for the whole validation set:
results = []
for i in range(len(preds)):
line_list = [dls.valid.items[i].stem]+[round(targs[i].cpu().numpy().item(),2), round(preds[i][0].cpu().numpy().item(),2), losses[i].cpu().numpy(), i]
results.append(line_list)
# store as pandas dataframe
res_df = pd.DataFrame(results, columns=['filename', 'target', 'prediction', 'loss','i'])
We can do our own version of printing top_losses:
res_df = res_df.sort_values('loss', ascending=False)
res_df.head()
def show_top_losses(res_df, preds, targs, losses, dls, n=5):
for j in range(n):
showpred(res_df.iloc[j]['i'], preds, targs, losses, dls)
show_top_losses(res_df, preds, targs, losses, dls)
So then we can these results output into a CSV file, and use it to direct our data-cleaning efforts, i.e. look at the top-loss images first!
res_df.to_csv(f'ring_count_top_losses_{dataset_name}.csv', index=False)
df2 = res_df.reset_index(drop=True)
plt.plot(df2["target"],'o',label='target')
plt.plot(df2["prediction"],'s', label='prediction')
plt.xlabel('Top-loss order (left=worse, right=better)')
plt.legend(loc='lower right')
plt.ylabel('Ring count')
plt.plot(df2["target"],df2["prediction"],'o')
plt.xlabel('Target ring count')
plt.ylabel('Predicted ring count')
plt.axis('square')
print(f"Target ring count range: ({df2['target'].min()}, {df2['target'].max()})")
print(f"Predicted ring count range: ({df2['prediction'].min()}, {df2['prediction'].max()})")