Mod of Zach Muller's WWF 01_Custom.ipynb.
Here we'll take cropped images of antinodes and try to count the rings, by fashioning a regression model out of a one-class classification model and scaling the output sigmoid (via fastai's y_range
parameter) so that our fitted values stay within the linear regime of the sigmoid.
And we also want to "clamp" our output between a min of about 0.2 rings and a max of 11 rings, because that's how the dataset was created; so sigmoid makes a good choice for this "clamping" too.
!pip install espiownage --upgrade -q
import espiownage
from espiownage.core import *
sysinfo()
print(f"espiownage version {espiownage.__version__}")
And import our libraries
from fastai.vision.all import *
Below you will find the exact imports for everything we use today
from fastcore.foundation import L
from fastcore.xtras import Path # @patch'd properties to the Pathlib module
from fastai.callback.fp16 import to_fp16
from fastai.callback.schedule import fit_one_cycle, lr_find
from fastai.data.external import untar_data, URLs
from fastai.data.block import RegressionBlock, DataBlock
from fastai.data.transforms import get_image_files, Normalize, RandomSplitter, parent_label
from fastai.interpret import ClassificationInterpretation
from fastai.learner import Learner # imports @patch'd properties to Learner including `save`, `load`, `freeze`, and `unfreeze`
from fastai.optimizer import ranger
from fastai.vision.augment import aug_transforms, RandomResizedCrop, Resize, ResizeMethod
from fastai.vision.core import imagenet_stats
from fastai.vision.data import ImageBlock
from fastai.vision.learner import cnn_learner
from fastai.vision.utils import download_images, verify_images
import os
dataset_name = 'fake' # choose from:
# - cleaner (*real* data that's clean-er than "preclean"),
# - preclean (unedited aggregates of 15-or-more volunteers)
# - spnet, (original SPNet Real dataset)
# - cyclegan (original SPNet CGSmall dataset)
# - fake (newer than SPNet fake, this includes non-int ring #s)
use_wandb = False # WandB.ai logging
project = 'count_in_crops' # project name for wandb
if use_wandb:
!pip install wandb -qqq
import wandb
from fastai.callback.wandb import *
from fastai.callback.tracker import SaveModelCallback
wandb.login()
path = get_data(dataset_name) / 'crops'; path
fnames = get_image_files(path) # image filenames
print(f"{len(fnames)} total cropped images")
ind = 1 # pick one cropped image
fnames[ind]
For labels, we want the ring count which extract from the filename: It's the number between the last '_' and the '.png'
def label_func(x):
return round(float(x.stem.split('_')[-1]),2)
print(label_func(fnames[ind]))
cropsize = (300,300) # we will resize/reshape all input images to squares of this size
croppedrings = DataBlock(blocks=(ImageBlock, RegressionBlock(n_out=1)),
get_items=get_image_files,
splitter=RandomSplitter(), # Note the random splitting. K-fold is another notebook
get_y=label_func,
item_tfms=Resize(cropsize, ResizeMethod.Squish),
batch_tfms=[*aug_transforms(size=cropsize, flip_vert=True, max_rotate=360.0),
Normalize.from_stats(*imagenet_stats)])
# define dataloaders
dls = croppedrings.dataloaders(path, bs=32)
Take a look at sample target data. Notice how they're very circular! That's how we 'got away with' arbitrary (360 degree) rotations in the DataBlock's batch_tfms
, above^.
dls.show_batch(max_n=9)
opt = ranger # optimizer the kids love these days
y_range=(0.2,13) # balance between "clamping" to range of real data vs too much "compression" from sigmoid nonlinearity
if use_wandb:
wandb.init(project=project, name=f'{dataset_name}')
cbs = [WandbCallback()]
else:
cbs = []
learn = cnn_learner(dls, resnet34, n_out=1, y_range=y_range,
metrics=[mae, acc_reg05, acc_reg07, acc_reg1,acc_reg15,acc_reg2],
loss_func=MSELossFlat(), opt_func=opt, cbs=cbs)
learn.lr_find() # we're just going to use 5e-3 though
lr = 5e-3
epochs = 30 # 10-11 epochs is fine for lr=1e-2; here we do 30 w/ lower lr to see if we can "do better"
learn.fine_tune(epochs, lr, freeze_epochs=2)
^ we could go back up and cut this off at 10, 15 or 20 epochs. In this case I just wanted to explore how low the val_loss would go!
if use_wandb: wandb.finish()
learn.save(f'crop-rings-{dataset_name}') # save a checkpoint so we can restart from here later
learn.load(f'crop-rings-{dataset_name}'); # can start from here assuming learn, dls, etc are defined
preds, targs, losses = learn.get_preds(with_loss=True) # validation set only
print(f"We have {len(preds)} predictions.")
Let's define a method to show a single prediction
def showpred(ind, preds, targs, losses, dls): # show prediction at this index
print(f"preds[{ind}] = {preds[ind]}, targs[{ind}] = {targs[ind]}, loss = {losses[ind]}")
print(f"file = {os.path.basename(dls.valid.items[ind])}")
print("Image:")
dls.valid.dataset[ind][0].show()
showpred(0, preds, targs, losses, dls)
And now we'll run through predictions for the whole validation set:
results = []
for i in range(len(preds)):
line_list = [dls.valid.items[i].stem]+[round(targs[i].cpu().numpy().item(),2), round(preds[i][0].cpu().numpy().item(),2), losses[i].cpu().numpy(), i]
results.append(line_list)
# store ring counts as as Pandas dataframe
res_df = pd.DataFrame(results, columns=['filename', 'target', 'prediction', 'loss','i'])
There is no fastai top_losses
defined for this type, but we can do our own version of printing top_losses:
res_df = res_df.sort_values('loss', ascending=False)
res_df.head()
def show_top_losses(res_df, preds, targs, losses, dls, n=5):
for j in range(n):
showpred(res_df.iloc[j]['i'], preds, targs, losses, dls)
show_top_losses(res_df, preds, targs, losses, dls)
So then we can these results output into a CSV file, and use it to direct our data-cleaning efforts, i.e. look at the top-loss images first!
res_df.to_csv(f'ring_count_top_losses_{dataset_name}.csv', index=False)
df2 = res_df.reset_index(drop=True)
plt.plot(df2["target"],'o',label='target')
plt.plot(df2["prediction"],'s', label='prediction')
plt.xlabel('Top-loss order (left=worse, right=better)')
plt.legend(loc='lower right')
plt.ylabel('Ring count')
plt.plot(df2["target"],df2["prediction"],'o')
plt.xlabel('Target ring count')
plt.ylabel('Predicted ring count')
plt.axis('square')
print(f"Target ring count range: ({df2['target'].min()}, {df2['target'].max()})")
print(f"Predicted ring count range: ({df2['prediction'].min()}, {df2['prediction'].max()})")