Skip to content
Snippets Groups Projects
Commit efab24b3 authored by Unknown's avatar Unknown
Browse files

added tiling functionality

parent 7ee284b7
No related merge requests found
......@@ -34,8 +34,8 @@ class ShowVideo(QtCore.QObject):
self.camera.set(4,822)
self.center_x = int(1024/2)
self.center_y = int(822/2)
self.reticle_x = int(self.center_x+28)
self.reticle_y = int(self.center_y+118)
self.reticle_x = int(self.center_x+6)
self.reticle_y = int(self.center_y+115)
def draw_reticle(self,image):
cv2.circle(image,(self.reticle_x,self.reticle_y),
......@@ -129,7 +129,7 @@ class main_window(QMainWindow):
self.vid = ShowVideo(self.ui.verticalLayoutWidget.size())
self.screen_shooter = screen_shooter()
self.image_viewer = ImageViewer()
# self.autofocuser = autofocuser()
self.autofocuser = autofocuser()
self.localizer = Localizer()
# add the viewer to our ui
......@@ -140,9 +140,9 @@ class main_window(QMainWindow):
self.screenshooter_thread.start()
self.screen_shooter.moveToThread(self.screenshooter_thread)
# self.autofocuser_thread = QThread()
# self.autofocuser_thread.start()
# self.autofocuser.moveToThread(self.autofocuser_thread)
self.autofocuser_thread = QThread()
self.autofocuser_thread.start()
self.autofocuser.moveToThread(self.autofocuser_thread)
self.localizer_thread = QThread()
self.localizer_thread.start()
......@@ -155,13 +155,13 @@ class main_window(QMainWindow):
# connect the outputs to our signals
self.vid.VideoSignal.connect(self.image_viewer.setImage)
self.vid.vid_process_signal.connect(self.screen_shooter.screenshot_slot)
# self.vid.vid_process_signal.connect(self.autofocuser.vid_process_slot)
self.vid.vid_process_signal.connect(self.autofocuser.vid_process_slot)
self.vid.vid_process_signal.connect(self.localizer.vid_process_slot)
self.qswitch_screenshot_signal.connect(self.screen_shooter.save_qswitch_fire_slot)
self.localizer.qswitch_screenshot_signal.connect(self.screen_shooter.save_qswitch_fire_slot)
# self.start_focus_signal.connect(self.autofocuser.autofocus)
self.start_focus_signal.connect(self.autofocuser.autofocus)
self.start_localization_signal.connect(self.localizer.localize)
# self.autofocuser.position_and_variance_signal.connect(self.plot_variance_and_position)
self.autofocuser.position_and_variance_signal.connect(self.plot_variance_and_position)
self.image_viewer.click_move_signal.connect(stage.click_move_slot)
self.localizer.localizer_move_signal.connect(stage.localizer_move_slot)
self.localizer.ai_fire_qswitch_signal.connect(self.ai_fire_qswitch_slot)
......@@ -290,8 +290,8 @@ class main_window(QMainWindow):
16777249:laser.fire_auto,
70:self.qswitch_screenshot_slot,
81:laser.qswitch_auto,
# 73:self.autofocuser.roll_forward,
# 75:self.autofocuser.roll_backward,
73:self.autofocuser.roll_forward,
75:self.autofocuser.roll_backward,
79:self.start_autofocus,
# 71:self.toggle_dmf_or_lysis,
84:stage.move_left_one_well_slot,
......@@ -307,8 +307,8 @@ class main_window(QMainWindow):
# print('key released: {}'.format(event.key()))
key_control_dict = {
16777249:laser.stop_flash,
# 73:self.autofocuser.stop_roll,
# 75:self.autofocuser.stop_roll
73:self.autofocuser.stop_roll,
75:self.autofocuser.stop_roll
}
if event.key() in key_control_dict.keys():
key_control_dict[event.key()]()
......
No preview for this file type
No preview for this file type
No preview for this file type
......@@ -58,7 +58,7 @@ class autofocuser(QtCore.QObject):
self.ch.setOnVelocityChangeHandler(self.velocity_change_handler)
self.ch.setOnPositionChangeHandler(self.position_change_handler)
self.image_title = 0
self.focus_model = load_model(os.path.join(experiment_folder_location,'VGG_model.hdf5'))
self.focus_model = load_model(os.path.join(experiment_folder_location,'VGG_model_5.hdf5'))
self.focus_model._make_predict_function()
self.belt_slip_offset = 120
# self.step_to_position(self.full_scale)
......
%% Cell type:code id: tags:
``` python
import imageio
import os
import cv2
from datetime import datetime
from tqdm import tqdm_notebook as tqdm
```
%% Cell type:code id: tags:
``` python
in_dir = r'C:\Users\Wheeler\Desktop\LCL_software\Experiments\experiment_24_07_2018___14.41.25.882424'
out_loc = r'C:\Users\Wheeler\Desktop\LCL_software\Experiments\experiment_24_07_2018___14.41.25.avi'
in_dir = r'C:\Users\Wheeler\Desktop\LCL_software\Experiments\experiment_29_08_2018___10.38.18.959357'
out_loc = r'C:\Users\Wheeler\Desktop\LCL_software\Experiments\experiment_29_08_2018___10.38.18.avi'
images = []
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
files = [os.path.join(in_dir, f) for f in os.listdir(in_dir) if '.tif' in f]
def get_time(file_name):
# t = file_name.split('_')[-2].split('_')[0]
t=file_name.split('_')[-1].split('.tif')[0]
t = datetime.strptime(t, '%H.%M.%S.%f')
t = t.hour*3600 + t.minute*60 + t.second + t.microsecond/10**6
return t
files.sort(key=get_time,reverse = False)
# for file in files:
# print('processing...',file.split('/')[-1])
# img = cv2.imread(file,0)
# img = cv2.resize(img,(int(img.shape[1]/3),int(img.shape[0]/3)),interpolation = cv2.INTER_CUBIC)
# img = clahe.apply(img)
# images.append(img)
# images = [images[0]] * 2 + images
# imageio.mimsave(out_loc, images,duration=.2)
# print('done')
```
%% Cell type:code id: tags:
``` python
img = cv2.imread(files[0],1)
width = img.shape[1]
height = img.shape[0]
print(img.shape)
use_clahe = False
fourcc = cv2.VideoWriter_fourcc(*'MJPG') # Be sure to use lower case
out = cv2.VideoWriter(out_loc, fourcc, 14.0, (int(width/1), int(height/1)))
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
for file in tqdm(files):
img = cv2.imread(file,1)
img = cv2.resize(img,(int(width/1),int(height/1)))
if use_clahe == True:
r,g,b = img[:,:,0],img[:,:,1],img[:,:,2]
r = clahe.apply(r)
g = clahe.apply(g)
b = clahe.apply(b)
img[:,:,0] = r
img[:,:,1] = g
img[:,:,2] = b
out.write(img)
out.release()
print('done')
```
%% Output
(822, 1024, 3)
done
%% Cell type:code id: tags:
``` python
```
......
import time
from utils import comment
from utils import comment,now
from keras.models import load_model
import os
from PyQt5 import QtCore
......@@ -20,6 +20,40 @@ mean_iou = miou_metric.mean_iou
experiment_folder_location = os.path.join(os.path.dirname(os.path.abspath(__file__)),'models')
class wellStitcher():
def __init__(self,box_size,initial_img):
# get our inital coordinates
self.box_size = int(box_size*2 + 1)
self.center = int(np.ceil(box_size/2)+2)
self.curr_x = self.center
self.curr_y = self.center
# initialize the image and show it
self.img_x,self.img_y = int(1024),int(822)
self.well_img = np.zeros((self.img_y*self.box_size,self.img_x*self.box_size,3))
self.stitch_img(initial_img)
def stitch_img(self,img):
self.well_img[self.curr_y*self.img_y:(self.curr_y+1)*self.img_y, self.curr_x*self.img_x:(self.curr_x+1)*self.img_x,:] = img
self.resized_img = cv2.resize(self.well_img,(1024,822))
cv2.imshow('Stitch',self.resized_img)
def add_img(self,let,img):
if let == 'u': self.curr_y -= 1
if let == 'd': self.curr_y += 1
if let == 'l': self.curr_x -= 1
if let == 'r': self.curr_x += 1
print(self.curr_y,self.curr_x)
self.stitch_img(img)
def write_well_img(self):
experiment_folder_location = os.path.join(os.path.dirname(os.path.abspath(__file__)),'well_images')
cv2.imwrite(os.path.join(experiment_folder_location,
'{}___{}.tif'.format('well_image',now())),self.well_img)
class Localizer(QtCore.QObject):
localizer_move_signal = QtCore.pyqtSignal('PyQt_PyObject','PyQt_PyObject','PyQt_PyObject','PyQt_PyObject')
get_position_signal = QtCore.pyqtSignal()
......@@ -56,7 +90,7 @@ class Localizer(QtCore.QObject):
map_dict = {
0:('red','multiclass_localizer18_2.hdf5'),
1:('green','multiclass_localizer18_2.hdf5'),
2:('green hope','binary_green_hope_localizer3.hdf5')
2:('green hope','second_binary_green_hope_localizer_16_0.28892_1_54_7_12.hdf5')
}
self.cell_type_to_lyse = map_dict[index][0]
comment('loading cell localizer model...{}'.format(map_dict[index][1]))
......@@ -120,18 +154,34 @@ class Localizer(QtCore.QObject):
return self.position
def move_frame(self,direction,relative=True):
distance = 95
y_distance = 95
x_distance = 120
frame_dir_dict = {
'u': np.array([0,-distance]),
'd': np.array([0,distance]),
'l': np.array([-distance,0]),
'r': np.array([distance,0])
'u': np.array([0,-y_distance]),
'd': np.array([0,y_distance]),
'l': np.array([-x_distance,0]),
'r': np.array([x_distance,0])
}
self.localizer_move_signal.emit(frame_dir_dict[direction],False,True,False)
def return_to_original_position(self,position):
self.localizer_move_signal.emit(position,False,False,False)
@QtCore.pyqtSlot()
def tile_well(self):
self.well_center = self.get_stage_position()
stitcher = wellStitcher(box_size,self.image)
directions = self.get_spiral_directions(box_size)
for num,let in directions:
self.delay()
self.move_frame(let)
self.delay()
QApplication.processEvents()
stitcher.add_img(let,self.image)
comment('writing well tile file...')
stitcher.write_well_img()
comment('tiling completed!')
@QtCore.pyqtSlot()
def localize(self):
'''
......@@ -139,13 +189,14 @@ class Localizer(QtCore.QObject):
using the method of lysis that the user selects, then returns to the original
position (the center of the well)
'''
# first get our well center position
# first get our well center position
self.lysed_cell_count = 0
self.auto_lysis = True
self.well_center = self.get_stage_position()
# now start moving and lysing all in view
self.lyse_all_in_view()
box_size = 5
# stitcher = wellStitcher(box_size,self.image)
directions = self.get_spiral_directions(box_size)
self.get_well_center = False
for num,let in directions:
......@@ -163,8 +214,11 @@ class Localizer(QtCore.QObject):
QApplication.processEvents()
self.delay()
QApplication.processEvents()
# stitcher.add_img(let,self.image)
self.lyse_all_in_view()
self.return_to_original_position(self.well_center)
comment('lysis completed!')
# stitcher.write_well_img()
# self.return_to_original_position(self.well_center)
def get_spiral_directions(self,box_size):
......@@ -288,8 +342,9 @@ class Localizer(QtCore.QObject):
self.move_to_target(old_center-window_center,True)
self.delay()
self.qswitch_screenshot_signal.emit(10)
self.ai_fire_qswitch_signal.emit(False)
self.delay()
for i in range(3):
self.ai_fire_qswitch_signal.emit(False)
self.delay()
self.lysed_cell_count += 1
if self.lysed_cell_count >= self.cells_to_lyse:
self.return_to_original_position(self.well_center)
......@@ -301,7 +356,9 @@ class Localizer(QtCore.QObject):
self.move_to_target(-old_center + cell_centers[i],False)
old_center = cell_centers[i]
self.delay()
self.ai_fire_qswitch_signal.emit(False)
for i in range(3):
self.ai_fire_qswitch_signal.emit(False)
self.delay()
self.delay()
self.lysed_cell_count += 1
if self.auto_lysis == False:
......@@ -339,7 +396,7 @@ class Localizer(QtCore.QObject):
elif cell_type == 'green hope':
# assumes a binary image!
# TODO: find the optimal location on the AUC curve for the threshold
_,confidence_image = cv2.threshold(segmented_image,.3,1,cv2.THRESH_BINARY)
_,confidence_image = cv2.threshold(segmented_image,.9,1,cv2.THRESH_BINARY)
return confidence_image
def move_to_target(self,center,goto_reticle = False):
......
File added
File added
......@@ -82,6 +82,7 @@ class screen_shooter(QtCore.QObject):
self.image_title = 'during_qswitch_fire'
self.requested_frames += num_frames
class MeanIoU(object):
# taken from http://www.davidtvs.com/keras-custom-metrics/
def __init__(self, num_classes):
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment