Skip to content
Snippets Groups Projects
Commit 3b6917c3 authored by Unknown's avatar Unknown
Browse files

added binary localization model

parent 7724e578
Branches
No related merge requests found
%% Cell type:code id: tags:
``` python
import imageio
import os
import cv2
from datetime import datetime
from tqdm import tqdm_notebook as tqdm
```
%% Output
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-1-b928f8d648e2> in <module>()
3 import cv2
4 from datetime import datetime
----> 5 from tqdm import tqdm_notebook as tqdm
ModuleNotFoundError: No module named 'tqdm'
%% Cell type:code id: tags:
``` python
in_dir = r'C:\Users\Wheeler\Desktop\LCL_software\Experiments\green_lysis2'
out_loc = r'out_loc = rC:\Users\Wheeler\Desktop\LCL_software\Experiments\green_lysis2.avi'
in_dir = r'C:\Users\Wheeler\Desktop\LCL_software\Experiments\experiment_18_07_2018___12.20.17.722253'
out_loc = r'C:\Users\Wheeler\Desktop\LCL_software\Experiments\experiment_18_07_2018___12.20.17.avi'
images = []
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
files = [os.path.join(in_dir, f) for f in os.listdir(in_dir) if '.tif' in f]
def get_time(file_name):
# t = file_name.split('_')[-2].split('_')[0]
t=file_name.split('_')[-1].split('.tif')[0]
t = datetime.strptime(t, '%H.%M.%S.%f')
t = t.hour*3600 + t.minute*60 + t.second + t.microsecond/10**6
return t
files.sort(key=get_time,reverse = False)
# for file in files:
# print('processing...',file.split('/')[-1])
# img = cv2.imread(file,0)
# img = cv2.resize(img,(int(img.shape[1]/3),int(img.shape[0]/3)),interpolation = cv2.INTER_CUBIC)
# img = clahe.apply(img)
# images.append(img)
# images = [images[0]] * 2 + images
# imageio.mimsave(out_loc, images,duration=.2)
# print('done')
```
%% Cell type:code id: tags:
``` python
img = cv2.imread(files[0],1)
width = img.shape[1]
height = img.shape[0]
print(img.shape)
use_clahe = True
use_clahe = False
fourcc = cv2.VideoWriter_fourcc(*'MJPG') # Be sure to use lower case
out = cv2.VideoWriter(out_loc, fourcc, 6.0, (int(width/1), int(height/1)))
out = cv2.VideoWriter(out_loc, fourcc, 14.0, (int(width/1), int(height/1)))
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
for file in tqdm(files):
img = cv2.imread(file,1)
img = cv2.resize(img,(int(width/1),int(height/1)))
if use_clahe == True:
r,g,b = img[:,:,0],img[:,:,1],img[:,:,2]
r = clahe.apply(r)
g = clahe.apply(g)
b = clahe.apply(b)
img[:,:,0] = r
img[:,:,1] = g
img[:,:,2] = b
out.write(img)
out.release()
print('done')
```
%% Output
(822, 1024, 3)
done
%% Cell type:code id: tags:
``` python
```
......
......@@ -235,7 +235,7 @@ class main_window(QMainWindow):
'100x']
self.ui.magnification_combobox.addItems(magnifications)
self.ui.magnification_combobox.currentIndexChanged.connect(stage.change_magnification)
self.ui.cell_type_to_lyse_comboBox.addItems(['red','green'])
self.ui.cell_type_to_lyse_comboBox.addItems(['red','green','green hope'])
self.ui.cell_type_to_lyse_comboBox.currentIndexChanged.connect(self.localizer.change_type_to_lyse)
self.ui.lysis_mode_comboBox.addItems(['direct','excision'])
self.ui.lysis_mode_comboBox.currentIndexChanged.connect(self.localizer.change_lysis_mode)
......
No preview for this file type
%% Cell type:code id: tags:
``` python
import imageio
import os
import cv2
from datetime import datetime
from tqdm import tqdm_notebook as tqdm
```
%% Cell type:code id: tags:
``` python
in_dir = r'C:\Users\Wheeler\Desktop\LCL_software\Experiments\green_lysis2'
out_loc = r'C:\Users\Wheeler\Desktop\LCL_software\Experiments\green_lysis2.avi'
in_dir = r'C:\Users\Wheeler\Desktop\LCL_software\Experiments\experiment_18_07_2018___12.20.17.722253'
out_loc = r'C:\Users\Wheeler\Desktop\LCL_software\Experiments\experiment_18_07_2018___12.20.17.avi'
images = []
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
files = [os.path.join(in_dir, f) for f in os.listdir(in_dir) if '.tif' in f]
def get_time(file_name):
# t = file_name.split('_')[-2].split('_')[0]
t=file_name.split('_')[-1].split('.tif')[0]
t = datetime.strptime(t, '%H.%M.%S.%f')
t = t.hour*3600 + t.minute*60 + t.second + t.microsecond/10**6
return t
files.sort(key=get_time,reverse = False)
# for file in files:
# print('processing...',file.split('/')[-1])
# img = cv2.imread(file,0)
# img = cv2.resize(img,(int(img.shape[1]/3),int(img.shape[0]/3)),interpolation = cv2.INTER_CUBIC)
# img = clahe.apply(img)
# images.append(img)
# images = [images[0]] * 2 + images
# imageio.mimsave(out_loc, images,duration=.2)
# print('done')
```
%% Cell type:code id: tags:
``` python
img = cv2.imread(files[0],1)
width = img.shape[1]
height = img.shape[0]
print(img.shape)
use_clahe = False
fourcc = cv2.VideoWriter_fourcc(*'MJPG') # Be sure to use lower case
out = cv2.VideoWriter(out_loc, fourcc, 6.0, (int(width/1), int(height/1)))
out = cv2.VideoWriter(out_loc, fourcc, 14.0, (int(width/1), int(height/1)))
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
for file in tqdm(files):
img = cv2.imread(file,1)
img = cv2.resize(img,(int(width/1),int(height/1)))
if use_clahe == True:
r,g,b = img[:,:,0],img[:,:,1],img[:,:,2]
r = clahe.apply(r)
g = clahe.apply(g)
b = clahe.apply(b)
img[:,:,0] = r
img[:,:,1] = g
img[:,:,2] = b
out.write(img)
out.release()
print('done')
```
%% Output
(822, 1024, 3)
done
%% Cell type:code id: tags:
``` python
```
......
......@@ -31,7 +31,8 @@ class Localizer(QtCore.QObject):
def __init__(self, parent = None):
super(Localizer, self).__init__(parent)
self.localizer_model = load_model(os.path.join(experiment_folder_location,'multiclass_localizer18_2.hdf5'),custom_objects={'mean_iou': mean_iou})
# self.localizer_model = load_model(os.path.join(experiment_folder_location,'multiclass_localizer18_2.hdf5'),custom_objects={'mean_iou': mean_iou})
self.localizer_model = load_model(os.path.join(experiment_folder_location,'binary_green_hope_localizer3.hdf5'))
# self.localizer_model = load_model(os.path.join(experiment_folder_location,'multiclass_localizer14.hdf5'))
# self.localizer_model = load_model(os.path.join(experiment_folder_location,'binary_localizer6.hdf5'))
self.norm = StandardScaler()
......@@ -47,8 +48,9 @@ class Localizer(QtCore.QObject):
self.lysis_mode = 'direct'
self.auto_lysis = False
# cv2.imshow('img',self.get_network_output(self.hallucination_img,'multi'))
img = self.get_network_output(self.hallucination_img,'binary')
print('IMG SHAPE:',img.shape)
cv2.imshow('img',self.get_network_output(self.hallucination_img,'binary'))
def stop_auto_lysis(self):
self.auto_lysis = False
......@@ -56,7 +58,8 @@ class Localizer(QtCore.QObject):
def change_type_to_lyse(self,index):
map_dict = {
0:'red',
1:'green'
1:'green',
2:'green hope'
}
self.cell_type_to_lyse = map_dict[index]
comment('changed cell type to:'+str(self.cell_type_to_lyse))
......@@ -93,7 +96,7 @@ class Localizer(QtCore.QObject):
#green cell
return_img[:,:,1] = segmented_image[0,:,:,2]
elif mode == 'binary':
return_img = segmented_image
return_img = segmented_image[0,:,:,0]
return return_img
@QtCore.pyqtSlot('PyQt_PyObject')
......@@ -111,7 +114,7 @@ class Localizer(QtCore.QObject):
return self.position
def move_frame(self,direction,relative=True):
distance = 80
distance = 95
frame_dir_dict = {
'u': np.array([0,-distance]),
'd': np.array([0,distance]),
......@@ -182,11 +185,15 @@ class Localizer(QtCore.QObject):
view_center = self.get_stage_position()
print('lysing all in view...')
self.delay()
segmented_image = self.get_network_output(self.image,'multi')
# segmented_image = self.get_network_output(self.hallucination_img,'multi')
# cv2.imshow('Cell Outlines and Centers',segmented_image)
# lyse all cells in view
self.lyse_cells(segmented_image,self.cell_type_to_lyse,self.lysis_mode)
if self.cell_type_to_lyse == 'green hope':
segmented_image = self.get_network_output(self.image,'binary')
self.lyse_cells(segmented_image,self.cell_type_to_lyse,self.lysis_mode)
else:
segmented_image = self.get_network_output(self.image,'multi')
# segmented_image = self.get_network_output(self.hallucination_img,'multi')
# cv2.imshow('Cell Outlines and Centers',segmented_image)
# lyse all cells in view
self.lyse_cells(segmented_image,self.cell_type_to_lyse,self.lysis_mode)
if self.auto_lysis == False:
self.stop_laser_flash_signal.emit()
return
......@@ -326,9 +333,9 @@ class Localizer(QtCore.QObject):
_,confidence_image = cv2.threshold(segmented_image[:,:,1],.5,1,cv2.THRESH_BINARY)
elif cell_type == 'red':
_,confidence_image = cv2.threshold(segmented_image[:,:,2],.5,1,cv2.THRESH_BINARY)
elif cell_type == 'any':
elif cell_type == 'green hope':
# assumes a binary image!
_,confidence_image = cv2.threshold(segmented_image,.5,1,cv2.THRESH_BINARY)
_,confidence_image = cv2.threshold(segmented_image,.3,1,cv2.THRESH_BINARY)
return confidence_image
def move_to_target(self,center,goto_reticle = False):
......
File added
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment