修正少量错误

master
wufayuan 2 years ago
parent 6dd1be543c
commit 4ab20abf82

@ -8,21 +8,20 @@ License : MIT License
"""
from config import POSITIVE_SAMPLE
from config import NEGATIVE_SAMPLE
from config import TRAINING_IMG_HEIGHT
from config import TRAINING_IMG_WIDTH
from config import FEATURE_FILE_TRAINING
from config import FEATURE_NUM
from config import ADABOOST_LIMIT
from config import ADABOOST_CACHE_FILE
from config import DEBUG_MODEL
from config import POSITIVE_SAMPLE
from config import NEGATIVE_SAMPLE
from config import TRAINING_IMG_HEIGHT
from config import TRAINING_IMG_WIDTH
from config import FEATURE_FILE_TRAINING
from config import FEATURE_NUM
from config import ADABOOST_LIMIT
from config import ADABOOST_CACHE_FILE
from config import DEBUG_MODEL
from haarFeature import Feature
from image import ImageSet
from adaboost import AdaBoost
from adaboost import getCachedAdaBoost
from image import ImageSet
from adaboost import AdaBoost
from adaboost import getCachedAdaBoost
import os
import numpy
@ -30,17 +29,17 @@ import numpy
class Cascade:
def __init__(self, face_dir = "", nonface_dir = "", train = True, limit = 30):
#tot_samples = 0
def __init__(self, face_dir="", nonface_dir="", train=True, limit=30):
# tot_samples = 0
self.Face = ImageSet(face_dir, sampleNum = POSITIVE_SAMPLE)
self.nonFace = ImageSet(nonface_dir, sampleNum = NEGATIVE_SAMPLE)
self.Face = ImageSet(face_dir, sampleNum=POSITIVE_SAMPLE)
self.nonFace = ImageSet(nonface_dir, sampleNum=NEGATIVE_SAMPLE)
tot_samples = self.Face.sampleNum + self.nonFace.sampleNum
self.classifier = AdaBoost
self.haar = Feature(TRAINING_IMG_WIDTH, TRAINING_IMG_HEIGHT)
self.haar = Feature(TRAINING_IMG_WIDTH, TRAINING_IMG_HEIGHT)
if os.path.isfile(FEATURE_FILE_TRAINING + ".npy"):
@ -50,14 +49,14 @@ class Cascade:
if DEBUG_MODEL is True:
self._mat = numpy.zeros((self.haar.featuresNum, tot_samples))
for i in xrange(self.Face.sampleNum):
for i in range(self.Face.sampleNum):
featureVec = self.haar.calFeatureForImg(self.Face.images[i])
for j in xrange(self.haar.featuresNum):
self._mat[j][i ] = featureVec[j]
for j in range(self.haar.featuresNum):
self._mat[j][i] = featureVec[j]
for i in xrange(self.nonFace.sampleNum):
for i in range(self.nonFace.sampleNum):
featureVec = self.haar.calFeatureForImg(self.nonFace.images[i])
for j in xrange(self.haar.featuresNum):
for j in range(self.haar.featuresNum):
self._mat[j][i + self.Face.sampleNum] = featureVec[j]
numpy.save(FEATURE_FILE_TRAINING, self._mat)
@ -70,17 +69,16 @@ class Cascade:
featureNum, sampleNum = self._mat.shape
assert sampleNum == (POSITIVE_SAMPLE + NEGATIVE_SAMPLE)
assert sampleNum == (POSITIVE_SAMPLE + NEGATIVE_SAMPLE)
assert featureNum == FEATURE_NUM
Label_Face = [+1 for i in xrange(POSITIVE_SAMPLE)]
Label_NonFace = [-1 for i in xrange(NEGATIVE_SAMPLE)]
Label_Face = [+1 for i in range(POSITIVE_SAMPLE)]
Label_NonFace = [-1 for i in range(NEGATIVE_SAMPLE)]
self._label = numpy.array(Label_Face + Label_NonFace)
self.limit = limit
self.classifierNum = 0
self.strong_classifier = [None for i in xrange(limit)]
self.limit = limit
self.classifierNum = 0
self.strong_classifier = [None for i in range(limit)]
def train(self):
@ -92,10 +90,10 @@ class Cascade:
from config import LABEL_NEGATIVE
cur_fpr = 1.0
mat = self._mat
mat = self._mat
label = self._label
for i in xrange(self.limit):
for i in range(self.limit):
if cur_fpr < EXPECTED_FPR:
break
@ -103,12 +101,12 @@ class Cascade:
cache_filename = ADABOOST_CACHE_FILE + str(i)
if os.path.isfile(cache_filename):
self.strong_classifier[i] = getCachedAdaBoost(mat = self._mat,
label = self._label,
filename= cache_filename,
limit = ADABOOST_LIMIT)
self.strong_classifier[i] = getCachedAdaBoost(mat=self._mat,
label=self._label,
filename=cache_filename,
limit=ADABOOST_LIMIT)
else:
self.strong_classifier[i] = AdaBoost(mat, label, limit = ADABOOST_LIMIT)
self.strong_classifier[i] = AdaBoost(mat, label, limit=ADABOOST_LIMIT)
output, fpr = self.strong_classifier[i].train()
cur_fpr *= fpr
@ -120,7 +118,6 @@ class Cascade:
self.classifierNum += 1
def updateTrainingDate(self, mat, output, fp_num):
fp_num = int(fp_num)
@ -131,35 +128,33 @@ class Cascade:
_mat[:, :POSITIVE_SAMPLE] = mat[:, :POSITIVE_SAMPLE]
"""
for i in xrange(POSITIVE_SAMPLE):
for j in xrange(FEATURE_NUM):
for i in range(POSITIVE_SAMPLE):
for j in range(FEATURE_NUM):
mat[j][i] = self._mat[j][i]
"""
counter = 0
# only reserve negative samples which are classified wrong
for i in xrange(POSITIVE_SAMPLE, self._label.size):
for i in range(POSITIVE_SAMPLE, self._label.size):
if output[i] != self._label[i]:
for j in xrange(FEATURE_NUM):
for j in range(FEATURE_NUM):
_mat[j][POSITIVE_SAMPLE + counter] = mat[j][i]
counter += 1
assert counter == fp_num
Label_Face = [+1 for i in xrange(POSITIVE_SAMPLE)]
Label_NonFace = [-1 for i in xrange(fp_num)]
Label_Face = [+1 for i in range(POSITIVE_SAMPLE)]
Label_NonFace = [-1 for i in range(fp_num)]
_label = numpy.array(Label_Face + Label_NonFace)
return _mat, _label
def predict(self):
output = numpy.zeros(POSITIVE_SAMPLE + NEGATIVE_SAMPLE, dtype= numpy.float16)
for i in xrange(self.classifierNum):
self.strong_classifier[i].prediction(mat, th = 0)
output = numpy.zeros(POSITIVE_SAMPLE + NEGATIVE_SAMPLE, dtype=numpy.float16)
for i in range(self.classifierNum):
self.strong_classifier[i].prediction(output, th=0)
"""unfinished"""
@ -168,4 +163,3 @@ class Cascade:
def is_goodenough(self):
pass

@ -53,15 +53,15 @@ class Image:
iImg = numpy.zeros((row, col))
"""
for i in xrange(0, row):
for j in xrange(0, col):
for i in range(0, row):
for j in range(0, col):
if j == 0:
iImg[i][j] = image[i][j]
else:
iImg[i][j] = iImg[i][j - 1] + image[i][j]
for j in xrange(0, col):
for i in xrange(1, row):
for j in range(0, col):
for i in range(1, row):
iImg[i][j] += iImg[i - 1][j]
"""
@ -81,8 +81,8 @@ class Image:
What image.sum() do is the same as the following code
but more faster than this.
for i in xrange(self.Row):
for j in xrange(self.Col):
for i in range(self.Row):
for j in range(self.Col):
sigma += image[i][j]
"""
# sigma = image.sum()

@ -36,7 +36,7 @@ def map(Face, NonFace):
images_num= len(images)
processes = []
for i in xrange(PROCESS_NUM):
for i in range(PROCESS_NUM):
start = int((i *1./PROCESS_NUM) * images_num)
end = int(((i+1)*1./PROCESS_NUM) * images_num )
sub_imgs = images[start:end]
@ -46,10 +46,10 @@ def map(Face, NonFace):
FEATURE_FILE_SUBSET + str(i) + ".cache"))
processes.append(process)
for i in xrange(PROCESS_NUM):
for i in range(PROCESS_NUM):
processes[i].start()
for i in xrange(PROCESS_NUM):
for i in range(PROCESS_NUM):
processes[i].join()
@ -60,7 +60,7 @@ def reduce():
mats = []
tot_samples = 0
for i in xrange(PROCESS_NUM):
for i in range(PROCESS_NUM):
sub_mat = numpy.load(FEATURE_FILE_SUBSET + str(i) + ".cache" + ".npy")
mats.append(sub_mat)
tot_samples += sub_mat.shape[1]
@ -69,9 +69,9 @@ def reduce():
mat = numpy.zeros((haar.featuresNum, tot_samples), numpy.float32)
sample_readed = 0
for i in xrange(PROCESS_NUM):
for m in xrange(mats[i].shape[0]): # feature number
for n in xrange(mats[i].shape[1]): # sample number
for i in range(PROCESS_NUM):
for m in range(mats[i].shape[0]): # feature number
for n in range(mats[i].shape[1]): # sample number
mat[m][n + sample_readed] = mats[i][m][n]

@ -33,9 +33,9 @@ def routine(images, filename):
mat = numpy.zeros((haar.featuresNum, tot_samples), dtype = numpy.float32)
for i in xrange(tot_samples):
for i in range(tot_samples):
featureVec = haar.calFeatureForImg(images[i])
for j in xrange(haar.featuresNum):
for j in range(haar.featuresNum):
mat[j][i] = featureVec[j]
numpy.save(filename, mat)

@ -23,14 +23,14 @@ haar = Feature(TRAINING_IMG_WIDTH, TRAINING_IMG_HEIGHT)
mat = numpy.zeros((haar.featuresNum, tot_samples))
for i in xrange(face.sampleNum):
for i in range(face.sampleNum):
featureVec = haar.calFeatureForImg(face.images[i])
for j in xrange(haar.featuresNum):
for j in range(haar.featuresNum):
mat[j][i ] = featureVec[j]
for i in xrange(nonFace.sampleNum):
for i in range(nonFace.sampleNum):
featureVec = haar.calFeatureForImg(nonFace.images[i])
for j in xrange(haar.featuresNum):
for j in range(haar.featuresNum):
mat[j][i + face.sampleNum] = featureVec[j]

Loading…
Cancel
Save