Last active
September 2, 2017 23:57
-
-
Save icaromag/cf9004e716a18857de01a41ec17e02dc to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from pprint import pprint | |
from PIL import Image | |
import numpy as np | |
import cv2 | |
import os | |
import io | |
DATASET_PATH = '/home/icaro/Workspace/classifier/generator-data/perfume/' | |
INPUT_DIR = '/home/icaro/Workspace/classifier/generator-data/test-inputs/' | |
INPUT_FILE = INPUT_DIR + './box10.jpg' | |
OPENCV_METHODS = { | |
'Correlation': cv2.HISTCMP_CORREL, | |
'Chi-Squared': cv2.HISTCMP_CHISQR, | |
'Intersection': cv2.HISTCMP_INTERSECT, | |
'Bhattacharyya': cv2.HISTCMP_BHATTACHARYYA, | |
} | |
class CVCalcHist: | |
@staticmethod | |
def calc(image): | |
hist = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8], | |
[0, 256, 0, 256, 0, 256]) | |
cv2.normalize(hist, hist) | |
return hist.flatten() | |
@staticmethod | |
def compare(hist1, hist2, eps=1e-10): | |
return 1. - cv2.compareHist(hist1, hist2, OPENCV_METHODS['Bhattacharyya']) | |
class ColorDescriptor: | |
def __init__(self, bins): | |
self.bins = bins | |
def histogram(self, image, mask): | |
hist = cv2.calcHist([image], [0, 1, 2], mask, self.bins, | |
[0, 180, 0, 256, 0, 256]) | |
cv2.normalize(hist, hist) | |
return hist.flatten() | |
def describe(self, image): | |
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) | |
features = [] | |
(h, w) = image.shape[:2] | |
(cX, cY) = (int(w * 0.5), int(h * 0.5)) | |
segments = [(0, cX, 0, cY), (cX, w, 0, cY), (cX, w, cY, h), | |
(0, cX, cY, h)] | |
(axesX, axesY) = (int(int(w * 0.75) / 2), int(int(h * 0.75) / 2)) | |
ellipMask = np.zeros(image.shape[:2], dtype="uint8") | |
cv2.ellipse(ellipMask, (cX, cY), | |
(axesX, axesY), 0, 0, 360, 255, -1) | |
for (startX, endX, startY, endY) in segments: | |
cornerMask = np.zeros(image.shape[:2], dtype="uint8") | |
cv2.rectangle(cornerMask, (startX, startY), | |
(endX, endY), 255, -1) | |
cornerMask = cv2.subtract(cornerMask, ellipMask) | |
hist = self.histogram(image, cornerMask) | |
features.extend(hist) | |
hist = self.histogram(image, ellipMask) | |
features.extend(hist) | |
return features | |
class CVCornerHarris: | |
def draw(self, image): | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
# gray = np.float32(gray) | |
dst = cv2.cornerHarris(gray, 2, 3, 0.04) | |
# result is dilated for marking the corners, not important | |
dst = cv2.dilate(dst, None) | |
# threshold for an optimal value, it may vary depending on the image. | |
image[dst > 0.01 * dst.max()] = [0, 0, 255] | |
return image | |
class CVGoodFeaturesToTrack: | |
def draw(self, image): | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
corners = cv2.goodFeaturesToTrack(gray, 25, 0.01, 10) | |
corners = np.int0(corners) | |
for i in corners: | |
x, y = i.ravel() | |
cv2.circle(image, (x, y), 3, 255, -1) | |
return image | |
class CVSIFT: | |
sift = cv2.xfeatures2d.SIFT_create() | |
def describe(self, image): | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
return self.sift.detectAndCompute(gray, None) | |
class CVSURF: | |
surf = cv2.xfeatures2d.SURF_create() | |
def describe(self, image): | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
return self.surf.detectAndCompute(gray, None) | |
class CVBRIEF: | |
star = cv2.xfeatures2d.StarDetector_create() | |
brief = cv2.xfeatures2d.BriefDescriptorExtractor_create() | |
def describe(self, image): | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
# find the keypoints with STAR | |
kp = self.star.detect(image, None) | |
# compute the descriptors with BRIEF | |
kp, des = self.brief.compute(image, kp) | |
des = des.astype(np.float32) | |
return kp, des | |
# fast, incredible results | |
class CVAKAZE: | |
akaze = cv2.AKAZE_create() | |
def describe(self, image): | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
kp, des = self.akaze.detectAndCompute(gray, None) | |
des = des.astype(np.float32) | |
return kp, des | |
# slow, bad results | |
class CVKAZE: | |
kaze = cv2.KAZE_create() | |
def describe(self, image): | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
return self.kaze.detectAndCompute(gray, None) | |
# too slow, incredible results | |
class CVBRISK: | |
brisk = cv2.BRISK_create() | |
def describe(self, image): | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
kp, des = self.brisk.detectAndCompute(gray, None) | |
des = des.astype(np.float32) | |
return kp, des | |
# bad | |
class CVFREAK: | |
freak = cv2.xfeatures2d.FREAK_create() | |
star = cv2.xfeatures2d.StarDetector_create() | |
def describe(self, image): | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
kp = self.star.detect(image, None) | |
kp, des = self.freak.compute(gray, kp) | |
des = des.astype(np.float32) | |
return kp, des | |
# fast and bad. only works with good centralized images | |
class CVORB: | |
orb = cv2.ORB_create() | |
star = cv2.xfeatures2d.StarDetector_create() | |
def describe(self, image): | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
kp = self.star.detect(image, None) | |
kp, des = self.orb.compute(gray, kp) | |
des = des.astype(np.float32) | |
return kp, des | |
def adapt_array(arr): | |
out = io.BytesIO() | |
np.save(out, arr) | |
out.seek(0) | |
return out.read() | |
def convert_array(text): | |
out = io.BytesIO(text) | |
out.seek(0) | |
return np.load(out) | |
# DATASET BUILDERS | |
def build_dataset(detector, images): | |
input_image = cv2.imread(INPUT_FILE, 1) | |
_, input_des = detector.describe(input_image) | |
dataset = [] | |
for current_image_name in os.listdir(DATASET_PATH): | |
print('.', end='', flush=True) | |
_, current_image_description = detector.describe( | |
images[current_image_name]) | |
dataset.append((current_image_name, current_image_description)) | |
return (dataset, input_des) | |
def build_empty_results_table(): | |
results = {} | |
for current_image_name in os.listdir(DATASET_PATH): | |
results[current_image_name] = [] | |
return results | |
def load_images_in_memory(): | |
images = {} | |
for current_image_name in os.listdir(DATASET_PATH): | |
images[current_image_name] = img = cv2.imread( | |
DATASET_PATH + current_image_name, 1) | |
return images | |
def build_results(dataset, input_des, results): | |
FLANN_INDEX_KDTREE = 0 | |
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) | |
search_params = dict(checks=100) | |
flann = cv2.FlannBasedMatcher(index_params, search_params) | |
for image in dataset: | |
image_name = image[0] | |
current_image_des = image[1] | |
matches = flann.knnMatch(input_des, current_image_des, k=2) | |
qt = 0 | |
for i, (m, n) in enumerate(matches): | |
if m.distance < 0.8 * n.distance: | |
qt += 1 | |
results[image_name].append(qt) | |
return results | |
def build_similarity_table(results, detector_qt): | |
max_res = [0] * detector_qt | |
for item in results: | |
for i, current_max_res in enumerate(max_res): | |
max_res[i] = max(max_res[i], results[item][i]) | |
for item in results: | |
for i, current_max_res in enumerate(max_res): | |
results[item][i] /= max_res[i] | |
for item in results: | |
results[item] = sum(results[item]) | |
return results | |
def add_histogram_to_results(results, images): | |
image = cv2.imread(INPUT_FILE) | |
input_hist = CVCalcHist.calc(image) | |
for current_image_name in os.listdir(DATASET_PATH): | |
current_hist = CVCalcHist.calc(images[current_image_name]) | |
current_distance = CVCalcHist.compare( | |
input_hist, current_hist) | |
# final param is histogram classifier weight | |
# TODO remove equal | |
results[current_image_name] += current_distance | |
return results | |
def main(): | |
detectors = [ | |
CVBRIEF(), | |
# CVSIFT(), | |
# CVSURF(), | |
# CVKAZE(), | |
# CVAKAZE(), | |
CVBRISK(), | |
# CVFREAK(), | |
# CVORB(), | |
] | |
print('Loading images into memory and building results table...') | |
results = build_empty_results_table() | |
images = load_images_in_memory() | |
selected_detectors_couter = 0 | |
for detector in detectors: | |
choice = input('\nCompute with ' + | |
detector.__class__.__name__ + '? [Press ENTER to confirm | Press D to deny]: ') | |
detector_class_name = detector.__class__.__name__ | |
if choice.lower() == 'd': | |
print('DENIED::' + detector_class_name) | |
else: | |
print('COMPUTING::' + detector_class_name) | |
selected_detectors_couter += 1 | |
dataset, input_des = build_dataset(detector, images) | |
results = build_results(dataset, input_des, results) | |
results = build_similarity_table(results, selected_detectors_couter) | |
input('\npress any key to compute Histograms') | |
results = add_histogram_to_results(results, images) | |
sorted_results = sorted(results, key=results.get) | |
input('\nPress any key to show results... ') | |
for i, res in enumerate(sorted_results): | |
print(i, format(results[res], '.2f'), res) | |
if __name__ == '__main__': | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment