Skip to content
Snippets Groups Projects
Commit d029efc6 authored by Harrison Edwards's avatar Harrison Edwards
Browse files

added a directory organizer and improved autofocus

parent 2fd59c56
No related merge requests found
......@@ -57,7 +57,11 @@ class ShowVideo(QtCore.QObject):
# print(cv2.Laplacian(image, cv2.CV_64F).var())
self.draw_reticle(image)
if self.noise_removal == True:
print('denoising...')
self.camera.set(3,1024)
self.camera.set(4,822)
image = cv2.fastNlMeansDenoisingColored(image,None,3,7,7)
print('done denoising')
color_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width, _ = color_swapped_image.shape
qt_image = QtGui.QImage(color_swapped_image.data,
......@@ -192,7 +196,7 @@ class main_window(QMainWindow):
self.vid.noise_removal = True
else:
print('unchecked!')
self.vid.noise_removal = True
self.vid.noise_removal = False
def setup_combobox(self):
magnifications = [
......@@ -252,6 +256,8 @@ class main_window(QMainWindow):
positions = ituple[0]
variances = ituple[1]
plt.plot(variances)
plt.xlabel('position')
plt.ylabel('variance of laplacian')
plt.show()
# def on_key_press(symbol, modifiers):
......
No preview for this file type
......@@ -26,10 +26,12 @@ class autofocuser(QtCore.QObject):
self.ch.openWaitForAttachment(5000)
self.ch.setEngaged(True)
self.full_scale = 27300
self.position = self.ch.getTargetPosition()
self.image_count = 0
self.track_position = False
self.pool = ThreadPool(processes=3)
self.velocity = 0
self.ch.setDataInterval(100)
self.position = 0
self.status_dict = {'current limit':self.ch.getCurrentLimit,
'control mode': self.ch.getControlMode,
# 'setting min position: ': self.ch.setMinPosition(0),
......@@ -40,13 +42,25 @@ class autofocuser(QtCore.QObject):
'target position': self.ch.getTargetPosition,
'acceleration': self.ch.getAcceleration,
'engaged?': self.ch.getEngaged,
'max velocity:': self.ch.getMaxVelocityLimit}
'max velocity:': self.ch.getMaxVelocityLimit,
'data interval': self.ch.getDataInterval,
'min data interval': self.ch.getMinDataInterval}
for k,v in self.status_dict.items():
comment('{}: {}'.format(k,v()))
self.ch.setOnVelocityChangeHandler(self.velocity_change_handler)
self.ch.setOnPositionChangeHandler(self.position_change_handler)
# self.step_to_position(self.full_scale)
# self.autofocus()
def velocity_change_handler(self,self2,velocity):
# print('VELOCITY CHANGED:',velocity)
self.velocity = velocity
def position_change_handler(self,self2,position):
# print('POSITION CHANGED:',position)
self.position = position
@QtCore.pyqtSlot('PyQt_PyObject')
def vid_process_slot(self,image):
self.image = image
......@@ -57,152 +71,80 @@ class autofocuser(QtCore.QObject):
self.position = self.ch.getPosition()
return self.position
def step_to_position(self,position):
def step_to_relative_position(self,position):
self.ch.setAcceleration(2000)
self.ch.setVelocityLimit(2000)
# TODO check if the given position will make us exceed the range
self.ch.setTargetPosition(self.ch.getTargetPosition() + position)
while self.ch.getIsMoving() == True:
time.sleep(.1)
self.ch.setTargetPosition(self.position + position)
# while self.ch.getIsMoving() == True:
# time.sleep(.1)
# print(self.ch.getTargetPosition())
def roll_forward(self):
self.track_position = True
self.ch.setControlMode(1)
self.ch.setAcceleration(750)
self.ch.setAcceleration(1500)
self.ch.setVelocityLimit(-5000)
res = self.pool.apply_async(self.position_tracker, (5000,750,-1))
return res
def roll_backward(self):
self.track_position = True
self.ch.setControlMode(1)
self.ch.setAcceleration(750)
self.ch.setAcceleration(1500)
self.ch.setVelocityLimit(5000)
res = self.pool.apply_async(self.position_tracker, (5000,750,1))
return res
def stop_roll(self):
self.ch.setVelocityLimit(0)
self.track_position = False
self.ch.setControlMode(1)
self.ch.setControlMode(0)
self.ch.setAcceleration(10000)
comment('focus at {} steps'.format(self.get_position()))
def swing_range(self):
self.ch.setVelocityLimit(2000)
self.ch.setTargetPosition(self.full_scale-2500)
def get_velocity(self):
try:
vel = self.ch.getVelocity()
return vel
except:
return None
def position_tracker(self,max_velocity,a,direction,x):
# always assume that initial velocity is 0
print('tracking')
v = 0
t0 = time.time()
if direction == -1: a = -a
i = 1
positions = []
self.track_position = True
while self.ch.getIsMoving() == True:
dt = (time.time()-t0)
# check if we are at max velocity
if v >= max_velocity:
x += v*dt
else:
x += v*dt + 1/2*a*dt**2
v += a*dt
if v > max_velocity: v = max_velocity
t0 += dt
i += 1
comment('position tracking x: {} v: {} a: {}'.format(
x,v,a))
positions.append(x)
time.sleep(.1)
return positions
def change_magnification(self,index):
print('changing mag')
if index == 4:
self.ch.setTargetPosition(16718)
self.ch.setTargetPosition(-self.full_scale+2500)
@QtCore.pyqtSlot()
def autofocus(self):
print(self.ch.getMaxVelocityLimit(),
self.ch.getAcceleration(),
1,0)
# we want to roll the focus forward steadily and capture
# laplacian variances as we do so, until we find a max
position_result = self.pool.apply_async(self.position_tracker,(2000,
self.ch.getAcceleration(),
1,0))
self.pool.apply_async(self.swing_range)
range = 4000
variance1, location1, variances1 = self.focus_over_range(range)
self.step_to_relative_position(-range)
variance2, location2, variances2 = self.focus_over_range(-range)
variances2.reverse()
total_variances = variances2 + variances1
self.position_and_variance_signal.emit(([],total_variances))
if variance1 > variance2:
self.ch.setTargetPosition(location1)
elif variance2 > variance1:
self.ch.setTargetPosition(location2)
while self.ch.getIsMoving() == True:
time.sleep(.1)
def focus_over_range(self,range):
self.pool.apply_async(self.step_to_relative_position(range))
variances = []
positions = []
old_position = 0
while self.ch.getIsMoving() == True:
time.sleep(.01)
QApplication.processEvents()
new_position = self.position
if old_position != new_position: positions.append(self.position)
old_position = new_position
img = self.image[512:1536,411:1233]
variance = cv2.Laplacian(img, cv2.CV_64F).var()
comment('variance calculated {}'.format(variance))
variances.append(variance)
positions = position_result.get()
# positions = [p*.94 for p in positions]
# now we want to find the point which corresponded to the highest
# VoL
unit_scaled_location_of_highest_variance = variances.index(max(variances))/len(variances)
print('location of highest variance: {}'.format(unit_scaled_location_of_highest_variance))
closest_position = int(np.rint(len(positions)*unit_scaled_location_of_highest_variance))
print('closest_position',closest_position)
print('max variance of {} occurred at location {}'.format(
max(variances),positions[closest_position]))
self.ch.setTargetPosition(positions[closest_position])
self.position_and_variance_signal.emit((positions,variances))
# comment('focus at {} steps'.format(self.get_position()))
# self.ch.setTargetPosition(positions[closest_position])
# self.position_and_variance_signal.emit((positions,variances))
while self.ch.getIsMoving() == True:
time.sleep(.1)
self.relative_focus()
def relative_focus(self):
# first get our initial variance
img = self.image[512:1536,411:1233]
variance = cv2.Laplacian(img, cv2.CV_64F).var()
QApplication.processEvents()
# now move in each direction
self.step_to_position(250)
img = self.image[512:1536,411:1233]
variance_backward = cv2.Laplacian(img, cv2.CV_64F).var()
QApplication.processEvents()
self.step_to_position(-500)
img = self.image[512:1536,411:1233]
variance_forward = cv2.Laplacian(img, cv2.CV_64F).var()
print('initial vairance: {} backward variance {} forward variance {}'.format(
variance,variance_backward,variance_forward))
if variance_backward > variance and variance_backward > variance_forward:
print('moving backward')
self.step_to_position(250)
elif variance_forward > variance and variance_forward > variance_backward:
print('moving forward')
self.step_to_position(-250)
elif variance > variance_forward and variance > variance_backward:
print('max found, staying put')
self.step_to_position(250)
self.relative_focus()
time.sleep(.1)
return max(variances),positions[closest_position],variances
if __name__ == '__main__':
a = autofocuser()
a.track_position = True
a.autofocus()
# time.sleep(4)
# a.stop_roll()
a.track_position = False
# print(b.get())
import os
import shutil
experiment_folder_location = os.path.join(os.path.dirname(os.path.abspath(__file__)),'Experiments')
# first lets delete all experiments with no pictures in them
for name in os.listdir(experiment_folder_location):
files = ''.join(os.listdir(os.path.join(experiment_folder_location,name)))
if '.tif' not in files:
response = input('WARNING directory {} found not to have files. Type Y to Delete\n'.format(name))
if response == 'Y':
shutil.rmtree(os.path.join(experiment_folder_location,name))
print('deleting:',name)
def get_480mb_worth_of_files(directory):
paths_out = []
running_total = 0
# files = filter(os.path.isfile, os.listdir(directory))
# print(files)
files = [os.path.join(directory, f) for f in os.listdir(directory)] # add path to each file
files.sort(key=lambda x: os.path.getmtime(x),reverse = True)
for file in files:
size = os.path.getsize(os.path.join(directory,file))
running_total += size/10**6
paths_out.append(file)
if running_total > 480:
return paths_out
return paths_out
# next lets sort the pictures into groups of < 500mb so they can be uploaded to owncloud
directories = os.listdir(experiment_folder_location)
directoryies_out = [os.path.join(experiment_folder_location,'sd_' + name) for name in directories]
for name in directoryies_out:
os.makedirs(name)
print(os.getcwd())
for i,directory in enumerate(directories):
subdirectory = os.path.join(experiment_folder_location,directory)
print('fixing directory:',subdirectory)
subdirectory_int = 0
while len(os.listdir(subdirectory)) > 0:
new_dir = os.path.join(directoryies_out[i],str(subdirectory_int))
os.makedirs(new_dir)
from_paths = get_480mb_worth_of_files(subdirectory)
for path in from_paths:
file_name = path.split('\\')[-1]
shutil.move(path,os.path.join(new_dir,file_name))
subdirectory_int += 1
\ No newline at end of file
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment