修改前端

master
kefei 6 years ago
commit 8f18834ae5

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

BIN
gzy/.DS_Store vendored

Binary file not shown.

@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="JavaScriptSettings">
<option name="languageLevel" value="ES6" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.6" project-jdk-type="Python SDK" />
<component name="PyPackaging">
<option name="earlyReleasesAsUpgrades" value="true" />
</component>
</project>

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/untitled.iml" filepath="$PROJECT_DIR$/.idea/untitled.iml" />
</modules>
</component>
</project>

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="Python 3.6" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="TestRunnerService">
<option name="PROJECT_TEST_RUNNER" value="Unittests" />
</component>
</module>

@ -0,0 +1,248 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ChangeListManager">
<list default="true" id="43677961-aad9-4b46-92e4-61fb4ee90e93" name="Default Changelist" comment="" />
<option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" />
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="CoverageDataManager">
<SUITE FILE_PATH="coverage/untitled$PyPy.coverage" NAME="PyPy Coverage Results" MODIFIED="1544345586233" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/untitled$PyPy__1_.coverage" NAME="PyPy (1) Coverage Results" MODIFIED="1543408497222" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
</component>
<component name="DatabaseView">
<option name="SHOW_INTERMEDIATE" value="true" />
<option name="GROUP_DATA_SOURCES" value="true" />
<option name="GROUP_SCHEMA" value="true" />
<option name="GROUP_CONTENTS" value="false" />
<option name="SORT_POSITIONED" value="false" />
<option name="SHOW_EMPTY_GROUPS" value="false" />
<option name="AUTO_SCROLL_FROM_SOURCE" value="false" />
<option name="HIDDEN_KINDS">
<set />
</option>
<expand />
<select />
</component>
<component name="FUSProjectUsageTrigger">
<session id="507699379">
<usages-collector id="statistics.lifecycle.project">
<counts>
<entry key="project.closed" value="23" />
<entry key="project.open.time.3" value="5" />
<entry key="project.open.time.4" value="13" />
<entry key="project.open.time.421" value="1" />
<entry key="project.open.time.5" value="4" />
<entry key="project.opened" value="23" />
</counts>
</usages-collector>
<usages-collector id="statistics.file.extensions.edit">
<counts>
<entry key="py" value="2769" />
</counts>
</usages-collector>
<usages-collector id="statistics.file.types.edit">
<counts>
<entry key="Python" value="2769" />
</counts>
</usages-collector>
<usages-collector id="statistics.file.extensions.open">
<counts>
<entry key="py" value="3" />
</counts>
</usages-collector>
<usages-collector id="statistics.file.types.open">
<counts>
<entry key="Python" value="3" />
</counts>
</usages-collector>
</session>
</component>
<component name="FileEditorManager">
<leaf SIDE_TABS_SIZE_LIMIT_KEY="300">
<file pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/PyPy.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="-142">
<caret line="4" column="52" selection-start-line="4" selection-start-column="52" selection-end-line="4" selection-end-column="52" />
<folding>
<element signature="e#0#15#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
</file>
<file pinned="false" current-in-tab="true">
<entry file="file://$PROJECT_DIR$/KMP.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="279">
<caret line="9" column="19" selection-end-line="23" />
</state>
</provider>
</entry>
</file>
</leaf>
</component>
<component name="FileTemplateManagerImpl">
<option name="RECENT_TEMPLATES">
<list>
<option value="Python Script" />
</list>
</option>
</component>
<component name="IdeDocumentHistory">
<option name="CHANGED_PATHS">
<list>
<option value="$PROJECT_DIR$/PyPy.py" />
<option value="$PROJECT_DIR$/KMP.py" />
</list>
</option>
</component>
<component name="JsBuildToolGruntFileManager" detection-done="true" sorting="DEFINITION_ORDER" />
<component name="JsBuildToolPackageJson" detection-done="true" sorting="DEFINITION_ORDER" />
<component name="JsGulpfileManager">
<detection-done>true</detection-done>
<sorting>DEFINITION_ORDER</sorting>
</component>
<component name="ProjectFrameBounds">
<option name="x" value="139" />
<option name="y" value="72" />
<option name="width" value="1115" />
<option name="height" value="617" />
</component>
<component name="ProjectView">
<navigator proportions="" version="1">
<foldersAlwaysOnTop value="true" />
</navigator>
<panes>
<pane id="Scope" />
<pane id="ProjectPane" />
</panes>
</component>
<component name="PropertiesComponent">
<property name="WebServerToolWindowFactoryState" value="false" />
<property name="settings.editor.selected.configurable" value="com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable" />
</component>
<component name="RunDashboard">
<option name="ruleStates">
<list>
<RuleState>
<option name="name" value="ConfigurationTypeDashboardGroupingRule" />
</RuleState>
<RuleState>
<option name="name" value="StatusDashboardGroupingRule" />
</RuleState>
</list>
</option>
</component>
<component name="RunManager">
<configuration name="PyPy" type="PythonConfigurationType" factoryName="Python" temporary="true">
<module name="untitled" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/PyPy.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<recent_temporary>
<list>
<item itemvalue="Python.PyPy" />
</list>
</recent_temporary>
</component>
<component name="SvnConfiguration">
<configuration />
</component>
<component name="TaskManager">
<task active="true" id="Default" summary="Default task">
<changelist id="43677961-aad9-4b46-92e4-61fb4ee90e93" name="Default Changelist" comment="" />
<created>1543407615198</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1543407615198</updated>
</task>
<servers />
</component>
<component name="ToolWindowManager">
<frame x="139" y="72" width="1115" height="617" extended-state="0" />
<editor active="true" />
<layout>
<window_info active="true" content_ui="combo" id="Project" order="0" visible="true" weight="0.18452936" />
<window_info id="Structure" order="1" side_tool="true" weight="0.25" />
<window_info id="Favorites" order="2" side_tool="true" />
<window_info anchor="bottom" id="Message" order="0" />
<window_info anchor="bottom" id="Find" order="1" />
<window_info anchor="bottom" id="Run" order="2" weight="0.40571427" />
<window_info anchor="bottom" id="Debug" order="3" weight="0.4" />
<window_info anchor="bottom" id="Cvs" order="4" weight="0.25" />
<window_info anchor="bottom" id="Inspection" order="5" weight="0.4" />
<window_info anchor="bottom" id="TODO" order="6" />
<window_info anchor="bottom" id="Docker" order="7" show_stripe_button="false" />
<window_info anchor="bottom" id="Version Control" order="8" show_stripe_button="false" />
<window_info anchor="bottom" id="Database Changes" order="9" show_stripe_button="false" />
<window_info anchor="bottom" id="Event Log" order="10" side_tool="true" />
<window_info anchor="bottom" id="Terminal" order="11" weight="0.32978722" />
<window_info anchor="bottom" id="Python Console" order="12" />
<window_info anchor="right" id="Commander" internal_type="SLIDING" order="0" type="SLIDING" weight="0.4" />
<window_info anchor="right" id="Ant Build" order="1" weight="0.25" />
<window_info anchor="right" content_ui="combo" id="Hierarchy" order="2" weight="0.25" />
<window_info anchor="right" id="SciView" order="3" />
<window_info anchor="right" id="Database" order="4" weight="0.32952547" />
</layout>
</component>
<component name="TypeScriptGeneratedFilesManager">
<option name="version" value="1" />
</component>
<component name="VcsContentAnnotationSettings">
<option name="myLimit" value="2678400000" />
</component>
<component name="editorHistoryManager">
<entry file="file:///Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/urllib/request.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="89">
<caret line="1360" selection-start-line="1360" selection-end-line="1360" />
</state>
</provider>
</entry>
<entry file="file:///Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/PIL/JpegImagePlugin.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="107">
<caret line="619" selection-start-line="619" selection-end-line="619" />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/PyPy.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="-142">
<caret line="4" column="52" selection-start-line="4" selection-start-column="52" selection-end-line="4" selection-end-column="52" />
<folding>
<element signature="e#0#15#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/KMP.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="279">
<caret line="9" column="19" selection-end-line="23" />
</state>
</provider>
</entry>
</component>
</project>

@ -0,0 +1,127 @@
# -*- coding=UTF-8 -*-
import sys
import os
import random
import cv2
import math
import time
import numpy as np
import tensorflow as tf
import linecache
import string
import skimage
import imageio
# 输入数据
import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# 定义网络超参数
learning_rate = 0.001
training_iters = 200000
batch_size = 64
display_step = 20
# 定义网络参数
n_input = 784 # 输入的维度
n_classes = 10 # 标签的维度
dropout = 0.8 # Dropout 的概率
# 占位符输入
x = tf.placeholder(tf.types.float32, [None, n_input])
y = tf.placeholder(tf.types.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.types.float32)
# 卷积操作
def conv2d(name, l_input, w, b):
return tf.nn.relu(tf.nn.bias_add( \
tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], padding='SAME'),b) \
, name=name)
# 最大下采样操作
def max_pool(name, l_input, k):
return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], \
strides=[1, k, k, 1], padding='SAME', name=name)
# 归一化操作
def norm(name, l_input, lsize=4):
return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name)
# 定义整个网络
def alex_net(_X, _weights, _biases, _dropout):
_X = tf.reshape(_X, shape=[-1, 28, 28, 1]) # 向量转为矩阵
# 卷积层
conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1'])
# 下采样层
pool1 = max_pool('pool1', conv1, k=2)
# 归一化层
norm1 = norm('norm1', pool1, lsize=4)
# Dropout
norm1 = tf.nn.dropout(norm1, _dropout)
# 卷积
conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'])
# 下采样
pool2 = max_pool('pool2', conv2, k=2)
# 归一化
norm2 = norm('norm2', pool2, lsize=4)
# Dropout
norm2 = tf.nn.dropout(norm2, _dropout)
# 卷积
conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'])
# 下采样
pool3 = max_pool('pool3', conv3, k=2)
# 归一化
norm3 = norm('norm3', pool3, lsize=4)
# Dropout
norm3 = tf.nn.dropout(norm3, _dropout)
# 全连接层,先把特征图转为向量
dense1 = tf.reshape(norm3, [-1, _weights['wd1'].get_shape().as_list()[0]])
dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1')
# 全连接层
dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2')
# Relu activation
# 网络输出层
out = tf.matmul(dense2, _weights['out']) + _biases['out']
return out
# 存储所有的网络参数
weights = {
'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])),
'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])),
'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])),
'wd1': tf.Variable(tf.random_normal([4*4*256, 1024])),
'wd2': tf.Variable(tf.random_normal([1024, 1024])),
'out': tf.Variable(tf.random_normal([1024, 10]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([64])),
'bc2': tf.Variable(tf.random_normal([128])),
'bc3': tf.Variable(tf.random_normal([256])),
'bd1': tf.Variable(tf.random_normal([1024])),
'bd2': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# 构建模型
pred = alex_net(x, weights, biases, keep_prob)
# 定义损失函数和学习步骤
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# 测试网络
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# 初始化所有的共享变量
init = tf.initialize_all_variables()
# 开启一个训练
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# 获取批数据
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
if step % display_step == 0:
# 计算精度
acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
# 计算损失值
loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)
step += 1
print "Optimization Finished!"
# 计算测试精度
print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.})

@ -0,0 +1,24 @@
nexts = [0]*100
x = '<img class="ui image bqppsearch lazy" data-original="'
def KMPinit():
i,j,m = 0,-1,len(x)
nexts[0]=-1
while i<m:
while j!=-1 and x[i]!=x[j]: j=nexts[j]
nexts[i+1]=j+1
i,j=i+1,j+1
def findWord(y):
i,j,ans,n,m=0,0,[],len(y),len(x)
while i<n:
while j!=-1 and y[i]!=x[j]: j=nexts[j]
i,j=i+1,j+1
if j==m:
to = i+1
while y[to:to+7]!='\" title': to=to+1
if y[to-3:to]=='jpg' and y[i]=='h' :ans.append(y[i:to])
j=nexts[j]
return ans

@ -0,0 +1,68 @@
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def weight_variable(shape):
initial = tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0,1,shape=shape)
return tf.Variable(initial)
def conv2d(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
# 预处理
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
xs = tf.placeholder(tf.float32,[None,784])
ys = tf.placeholder(tf.float32,[None,10])
keep_prob = tf.placeholder(tf.float32)
# 第一层卷积
W_conv1 = weight_variable([5,5,1,32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(xs,[-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# 第二层卷积
W_conv2 = weight_variable([5,5,32,64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# 第一层全连接层
W_fc1 = weight_variable([7*7*64,1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)
# 第二层全连接层
W_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2)
# 训练模型
cross_entropy = -tf.reduce_sum(ys*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# 概率计算
correct_prediction = tf.equal(tf.argmax(y_conv,1),tf.argmax(ys,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(2001):
batch = mnist.train.next_batch(50)
sess.run(train_step,feed_dict={xs:batch[0],ys:batch[1],keep_prob:0.5})
if i%100==0:
tests = mnist.test.next_batch(200)
print(sess.run(accuracy,feed_dict={xs:tests[0],ys:tests[1],keep_prob:1.0}))

@ -0,0 +1,26 @@
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
xs = tf.placeholder(tf.float32,[None,784])
ys = tf.placeholder(tf.float32,[None,10])
Weight = tf.Variable(tf.zeros([784,10]))
biases = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(xs,Weight)+biases)
loss = -tf.reduce_sum(ys*tf.log(y))
train = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for step in range(10000):
batch = mnist.train.next_batch(100)
sess.run(train,feed_dict={xs:batch[0],ys:batch[1]})
if step%50==0:
correct_prediction = tf.equal(tf.arg_max(ys,1),tf.arg_max(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print(sess.run(accuracy,feed_dict={xs:mnist.test.images,ys:mnist.test.labels}))

@ -0,0 +1,26 @@
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
xs = tf.placeholder(tf.float32,[None,784])
ys = tf.placeholder(tf.float32,[None,10])
Weight = tf.Variable(tf.zeros([784,10]))
biases = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(xs,Weight)+biases)
loss = -tf.reduce_sum(ys*tf.log(y))
train = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for step in range(10000):
batch = mnist.train.next_batch(100)
sess.run(train,feed_dict={xs:batch[0],ys:batch[1]})
if step%50==0:
correct_prediction = tf.equal(tf.arg_max(ys,1),tf.arg_max(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print(sess.run(accuracy,feed_dict={xs:mnist.test.images,ys:mnist.test.labels}))

@ -0,0 +1,24 @@
import os
from PIL import Image
PATH = 'D:/tensorflow/未处理的训练源/'
PATH2 = 'D:/tensorflow/train/'
def resize_image(img_path,save_path):
total = 0
try:
for pic in os.listdir(img_path):
path = img_path+pic
image = Image.open(path).convert('L')
new_pic = image.resize((60,60),Image.ANTIALIAS)
new_path = save_path+str(total)+'.jpg'
new_pic.save(new_path,quality=100)
total = total+1
except Exception as e:
print(e)
if __name__ == '__main__':
for page in range(1,6):
print("During deal with %d"%(page))
resize_image(PATH+str(page)+'/',PATH2+str(page)+'/')

@ -0,0 +1,186 @@
# -*- coding=UTF-8 -*-
import os
import tensorflow as tf
import numpy as np
import time
from PIL import Image
w = 64
h = 64
c = 3
TOTAL_TYPE = 5
path = 'D:/tensorflow/imgaes/'
model_path='D:/tensorflow/saver/model.ckpt'
def read_source():
imgs,labels = [],[]
for dir in os.listdir(path):
idx = int(dir)
folder = os.path.join(path,dir)
zeros = np.zeros(TOTAL_TYPE)
zeros[idx]=1
print("folder :%s"%(folder))
total = 0
for f in os.listdir(folder):
file = os.path.join(folder,f)
image = Image.open(file).convert('RGB').resize((w, h), Image.ANTIALIAS)
arr = np.asarray(image)
imgs.append(arr)
labels.append(zeros)
total += 1
print(total)
return np.asarray(imgs,np.float32),np.asarray(labels,np.int32)
data,label=read_source()
num_example = data.shape[0]
arr = np.arange(num_example)
np.random.shuffle(arr)
data = data[arr]
label = label[arr]
# 将所有数据分为训练集和验证集
ratio = 0.8
s = np.int(num_example * ratio)
x_train = data[:s]
y_train = label[:s]
x_val = data[s:]
y_val = label[s:]
# -----------------构建网络----------------------
x = tf.placeholder(tf.float32, shape=[None, w, h, 3], name='x')
y_ = tf.placeholder(tf.float32, shape=[None,5 ], name='y_')
def inference(input_tensor, train, regularizer):
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.get_variable("weight", [5, 5, 3, 64],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1_biases = tf.get_variable("bias", [64], initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
relu1 = tf.layers.batch_normalization(relu1,training=train)
with tf.name_scope("layer2-pool1"):
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
with tf.variable_scope("layer3-conv2"):
conv2_weights = tf.get_variable("weight", [3, 3, 64, 128],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
relu2 = tf.layers.batch_normalization(relu2, training=train)
with tf.name_scope("layer4-pool2"):
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
with tf.variable_scope("layer5-conv3"):
conv3_weights = tf.get_variable("weight", [3, 3, 128, 256],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv3_biases = tf.get_variable("bias", [256], initializer=tf.constant_initializer(0.0))
conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))
relu3 = tf.layers.batch_normalization(relu3, training=train)
with tf.name_scope("layer8-pool4"):
pool4 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
norm4 = tf.nn.lrn(pool4, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
nodes = 8 * 8* 256
reshaped = tf.reshape(norm4, [-1, nodes])
with tf.variable_scope('layer9-fc1'):
fc1_weights = tf.get_variable("weight", [nodes, 1024],
initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights))
fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1))
fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
if train: fc1 = tf.nn.dropout(fc1, 0.8)
with tf.variable_scope('layer10-fc2'):
fc2_weights = tf.get_variable("weight", [1024, 512],
initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights))
fc2_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1))
fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases)
if train: fc2 = tf.nn.dropout(fc2, 0.8)
with tf.variable_scope('layer11-fc3'):
fc3_weights = tf.get_variable("weight", [512, 5],
initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights))
fc3_biases = tf.get_variable("bias", [5], initializer=tf.constant_initializer(0.1))
logit = tf.matmul(fc2, fc3_weights) + fc3_biases
return logit
# ---------------------------网络结束---------------------------
regularizer = tf.contrib.layers.l2_regularizer(0.0001)
logits = inference(x, False, regularizer)
# (小处理)将logits乘以1赋值给logits_eval定义name方便在后续调用模型时通过tensor名字调用输出tensor
b = tf.constant(value=1, dtype=tf.float32)
logits_eval = tf.multiply(logits, b, name='logits_eval')
# loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_)
# loss = -tf.reduce_sum(y_*tf.log(logits))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,labels=y_))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
correct_prediction = tf.equal(tf.argmax(logits, 1),tf.argmax(y_,1))
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 定义一个函数,按批次取数据
def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else:
excerpt = slice(start_idx, start_idx + batch_size)
yield inputs[excerpt], targets[excerpt]
# 训练和测试数据可将n_epoch设置更大一些
n_epoch = 200
batch_size = 64
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(n_epoch):
start_time = time.time()
# training
train_loss, train_acc, n_batch = 0, 0, 0
for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True):
_, err, ac = sess.run([train_op, loss, acc], feed_dict={x: x_train_a, y_: y_train_a})
train_loss += err;
train_acc += ac;
n_batch += 1
print(" train loss: %f" % (np.sum(train_loss) / n_batch))
print(" train acc: %f" % (np.sum(train_acc) / n_batch))
# validation
val_loss, val_acc, n_batch = 0, 0, 0
for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle=False):
err, ac = sess.run([loss, acc], feed_dict={x: x_val_a, y_: y_val_a})
val_loss += err
val_acc += ac
n_batch += 1
print(" validation loss: %f" % (np.sum(val_loss) / n_batch))
print(" validation acc: %f" % (np.sum(val_acc) / n_batch))
saver.save(sess,model_path)
sess.close()

@ -0,0 +1,22 @@
import requests
import KMP
url = 'https://fabiaoqing.com/search/search/keyword/%E4%BA%8C%E6%AC%A1%E5%85%83%E5%A6%B9%E5%AD%90/type/bq/page/'
store_path = 'D://tensorflow//萌妹子//'
KMP.KMPinit()
total = 0
file = store_path
for page in range(1,20):
pages = url+str(page)+'.html'
response = requests.get(pages)
Word = KMP.findWord(response.text)
num = len(Word)
for step in range(num):
saver = requests.get(Word[step]).content
hander=open(store_path+str(total)+'.jpg','wb')
hander.write(saver)
hander.close()
total = total+1
print("At Page :%d %d %d"%(page,num,total))

@ -0,0 +1,39 @@
import os
import time
import random
import shutil
from captcha.image import ImageCaptcha
CHAR_SET = ['0','1','2','3','4','5','6','7','8','9']
CHAR_SET_LEN = 10
CAPTCHA_LEN = 4
CAPTCHA_IMAGE_PATH = 'D:/tensorflow/captcha/images/'
TEST_IMAGE_PATH = 'D:/tensorflow/captcha/test/'
TEST_CASE = 200
def generate_captcha_image(PATH):
image = ImageCaptcha()
for i in range(10):
for j in range(10):
for k in range(10):
for z in range(10):
cap_text = CHAR_SET[i]+CHAR_SET[j]+CHAR_SET[k]+CHAR_SET[z]
image.write(cap_text,PATH+cap_text+'.jpg')
print("Finish Write!")
def generate_test_image():
fileNameList = []
for filePath in os.listdir(CAPTCHA_IMAGE_PATH):
captcha_name = filePath.split('/')[-1]
fileNameList.append(captcha_name)
random.seed(time.time())
random.shuffle(fileNameList)
for i in range(TEST_CASE):
name = fileNameList[i]
shutil.move(CAPTCHA_IMAGE_PATH + name, TEST_IMAGE_PATH + name)
if __name__ == '__main__':
generate_captcha_image(CAPTCHA_IMAGE_PATH)
generate_test_image()

@ -0,0 +1,195 @@
import glob
import os
import tensorflow as tf
import numpy as np
import time
from PIL import Image
#数据集地址
path='D:/tensorflow/imgaes/'
#模型保存地址
model_path='D:/tensorflow/saver/model.ckpt'
#将所有的图片resize成100*100
w=100
h=100
c=3
#读取图片
def read_img(path):
cate=[path+x for x in os.listdir(path) if os.path.isdir(path+x)]
imgs=[]
labels=[]
for idx,folder in enumerate(cate):
print("folder :%s"%(folder))
total = 0
zero = np.zeros(5)
zero[int(idx)]=1
for im in glob.glob(folder+'/*.jpg'):
# print('reading the images:%s'%(im))
image = Image.open(im).convert('RGB')
img = image.resize((w, h), Image.ANTIALIAS)
arr = np.asarray(img, dtype="float32")
imgs.append(arr)
labels.append(zero)
total = total + 1
print(total)
return np.asarray(imgs, np.float32), np.asarray(labels, np.float32)
# 好骚的操作啊
data,label=read_img(path)
num_example = data.shape[0]
arr = np.arange(num_example)
np.random.shuffle(arr)
data = data[arr]
label = label[arr]
# 将所有数据分为训练集和验证集
ratio = 0.8
s = np.int(num_example * ratio)
x_train = data[:s]
y_train = label[:s]
x_val = data[s:]
y_val = label[s:]
print(x_val.shape)
print(y_val.shape)
# -----------------构建网络----------------------
x = tf.placeholder(tf.float32, shape=[None, w, h, c], name='x')
y_ = tf.placeholder(tf.float32, shape=[None,5 ], name='y_')
def inference(input_tensor, train, regularizer):
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.get_variable("weight", [5, 5, 3, 32],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1_biases = tf.get_variable("bias", [32], initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
with tf.name_scope("layer2-pool1"):
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
with tf.variable_scope("layer3-conv2"):
conv2_weights = tf.get_variable("weight", [5, 5, 32, 64],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_biases = tf.get_variable("bias", [64], initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
with tf.name_scope("layer4-pool2"):
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
with tf.variable_scope("layer5-conv3"):
conv3_weights = tf.get_variable("weight", [3, 3, 64, 128],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv3_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0))
conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))
with tf.name_scope("layer6-pool3"):
pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
with tf.variable_scope("layer7-conv4"):
conv4_weights = tf.get_variable("weight", [3, 3, 128, 128],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv4_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0))
conv4 = tf.nn.conv2d(pool3, conv4_weights, strides=[1, 1, 1, 1], padding='SAME')
relu4 = tf.nn.relu(tf.nn.bias_add(conv4, conv4_biases))
with tf.name_scope("layer8-pool4"):
pool4 = tf.nn.max_pool(relu4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
nodes = 6 * 6 * 128
reshaped = tf.reshape(pool4, [-1, nodes])
with tf.variable_scope('layer9-fc1'):
fc1_weights = tf.get_variable("weight", [nodes, 1024],
initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights))
fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1))
fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
if train: fc1 = tf.nn.dropout(fc1, 0.5)
with tf.variable_scope('layer10-fc2'):
fc2_weights = tf.get_variable("weight", [1024, 512],
initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights))
fc2_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1))
fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases)
if train: fc2 = tf.nn.dropout(fc2, 0.5)
with tf.variable_scope('layer11-fc3'):
fc3_weights = tf.get_variable("weight", [512, 5],
initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights))
fc3_biases = tf.get_variable("bias", [5], initializer=tf.constant_initializer(0.1))
logit = tf.matmul(fc2, fc3_weights) + fc3_biases
return logit
# ---------------------------网络结束---------------------------
regularizer = tf.contrib.layers.l2_regularizer(0.0001)
logits = inference(x, False, regularizer)
# (小处理)将logits乘以1赋值给logits_eval定义name方便在后续调用模型时通过tensor名字调用输出tensor
b = tf.constant(value=1, dtype=tf.float32)
logits_eval = tf.multiply(logits, b, name='logits_eval')
# loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_)
# loss = -tf.reduce_sum(y_*tf.log(logits))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=y_))
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
correct_prediction = tf.equal(tf.argmax(logits, 1),tf.argmax(y_,1))
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 定义一个函数,按批次取数据
def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else:
excerpt = slice(start_idx, start_idx + batch_size)
yield inputs[excerpt], targets[excerpt]
# 训练和测试数据可将n_epoch设置更大一些
n_epoch = 30
batch_size = 64
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(n_epoch):
start_time = time.time()
# training
train_loss, train_acc, n_batch = 0, 0, 0
for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True):
_, err, ac = sess.run([train_op, loss, acc], feed_dict={x: x_train_a, y_: y_train_a})
train_loss += err;
train_acc += ac;
n_batch += 1
print(" train loss: %f" % (np.sum(train_loss) / n_batch))
print(" train acc: %f" % (np.sum(train_acc) / n_batch))
# validation
val_loss, val_acc, n_batch = 0, 0, 0
for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle=False):
err, ac = sess.run([loss, acc], feed_dict={x: x_val_a, y_: y_val_a})
val_loss += err;
val_acc += ac;
n_batch += 1
print(" validation loss: %f" % (np.sum(val_loss) / n_batch))
print(" validation acc: %f" % (np.sum(val_acc) / n_batch))
saver.save(sess,model_path)
sess.close()

@ -0,0 +1,57 @@
import tensorflow as tf
import numpy as np
from PIL import Image
import os
flower_dict = {0:'小熊猫',1:'滑稽',2:'萌妹子',3:'小坏坏',4:'小黄鸡'}
path = 'C:/Users/yuanshao/Desktop/test/'
w=100
h=100
c=3
def read_one_image(path):
image = Image.open(path).convert('RGB')
img = image.resize((w, h), Image.ANTIALIAS)
return np.asarray(img)
def preprocess():
count = 0
for file in os.listdir(path):
new_name = os.path.join(path, str(count))
os.rename(os.path.join(path, file),new_name)
count += 1
for file in os.listdir(path):
ori_name = path+file
os.rename(ori_name,ori_name+'.jpg')
with tf.Session() as sess:
PATH = cate=[path+x for x in os.listdir(path)]
data = []
pic = []
preprocess()
for i in range(len(PATH)):
picture = path+str(i)+'.jpg';
print(picture)
pic.append(picture)
data.append(read_one_image(picture))
saver = tf.train.import_meta_graph('D:/tensorflow/saver/model.ckpt.meta')
saver.restore(sess,tf.train.latest_checkpoint('D:/tensorflow/saver/'))
graph = tf.get_default_graph()
x = graph.get_tensor_by_name("x:0")
logits = graph.get_tensor_by_name("logits_eval:0")
classification_result = sess.run(logits,feed_dict={x:data})
#打印出预测矩阵
print(classification_result)
#打印出预测矩阵每一行最大值的索引
print(tf.argmax(classification_result,1).eval())
output = []
output = tf.argmax(classification_result,1).eval()
for i in range(len(output)):
print("",i,"张图片预测:"+flower_dict[output[i]])

@ -0,0 +1,209 @@
import tensorflow as tf
from captcha.image import ImageCaptcha
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import random
number=['0','1','2','3','4','5','6','7','8','9']
#alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
#ALPHABET = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
def random_captcha_text(char_set=number,captcha_size=4):
captcha_text=[]
for i in range(captcha_size):
c=random.choice(char_set)
captcha_text.append(c)
return captcha_text
def gen_captcha_text_image():
image=ImageCaptcha()
captcha_text=random_captcha_text()
captcha_text=''.join(captcha_text)
captcha=image.generate(captcha_text)
captcha_image=Image.open(captcha)
captcha_image=np.array(captcha_image)
return captcha_text,captcha_image
def convert2gray(img):
if len(img.shape)>2:
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
else:
return img
def text2vec(text):
text_len = len(text)
if text_len > max_captcha:
raise ValueError('验证码最长4个字符')
vector = np.zeros(max_captcha * char_set_len)
def char2pos(c):
if c == '_':
k = 62
return k
k = ord(c) - 48
if k > 9:
k = ord(c) - 55
if k > 35:
k = ord(c) - 61
if k > 61:
raise ValueError('No Map')
return k
for i, c in enumerate(text):
idx = i * char_set_len + char2pos(c)
vector[idx] = 1
return vector
def get_next_batch(batch_size=128):
batch_x=np.zeros([batch_size,image_height*image_width])
batch_y=np.zeros([batch_size,max_captcha*char_set_len])
def wrap_gen_captcha_text_and_image():
while True:
text, image = gen_captcha_text_image()
if image.shape == (60, 160, 3):
return text, image
for i in range(batch_size):
text, image = wrap_gen_captcha_text_and_image()
image = convert2gray(image)
batch_x[i, :] = image.flatten() / 255
batch_y[i, :] = text2vec(text)
return batch_x, batch_y
def cnn_structure(w_alpha=0.01, b_alpha=0.1):
x = tf.reshape(X, shape=[-1, image_height, image_width, 1])
wc1=tf.get_variable(name='wc1',shape=[3,3,1,32],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())
#wc1 = tf.Variable(w_alpha * tf.random_normal([3, 3, 1, 32]))
bc1 = tf.Variable(b_alpha * tf.random_normal([32]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, wc1, strides=[1, 1, 1, 1], padding='SAME'), bc1))
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv1 = tf.nn.dropout(conv1, keep_prob)
wc2=tf.get_variable(name='wc2',shape=[3,3,32,64],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())
# wc2 = tf.Variable(w_alpha * tf.random_normal([3, 3, 32, 64]))
bc2 = tf.Variable(b_alpha * tf.random_normal([64]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, wc2, strides=[1, 1, 1, 1], padding='SAME'), bc2))
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv2 = tf.nn.dropout(conv2, keep_prob)
wc3=tf.get_variable(name='wc3',shape=[3,3,64,128],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())
#wc3 = tf.Variable(w_alpha * tf.random_normal([3, 3, 64, 128]))
bc3 = tf.Variable(b_alpha * tf.random_normal([128]))
conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, wc3, strides=[1, 1, 1, 1], padding='SAME'), bc3))
conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv3 = tf.nn.dropout(conv3, keep_prob)
wd1=tf.get_variable(name='wd1',shape=[8*20*128,1024],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())
#wd1 = tf.Variable(w_alpha * tf.random_normal([7*20*128,1024]))
bd1 = tf.Variable(b_alpha * tf.random_normal([1024]))
dense = tf.reshape(conv3, [-1, wd1.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, wd1), bd1))
dense = tf.nn.dropout(dense, keep_prob)
wout=tf.get_variable('name',shape=[1024,max_captcha * char_set_len],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())
#wout = tf.Variable(w_alpha * tf.random_normal([1024, max_captcha * char_set_len]))
bout = tf.Variable(b_alpha * tf.random_normal([max_captcha * char_set_len]))
out = tf.add(tf.matmul(dense, wout), bout)
return out
def train_cnn():
output=cnn_structure()
cost=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output,labels=Y))
optimizer=tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
predict=tf.reshape(output,[-1,max_captcha,char_set_len])
max_idx_p = tf.argmax(predict, 2)
max_idx_l = tf.argmax(tf.reshape(Y, [-1, max_captcha, char_set_len]), 2)
correct_pred = tf.equal(max_idx_p, max_idx_l)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
saver=tf.train.Saver()
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
step = 0
while True:
batch_x, batch_y = get_next_batch(100)
_, cost_= sess.run([optimizer, cost], feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.75})
print(step, cost_)
if step % 10 == 0:
batch_x_test, batch_y_test = get_next_batch(100)
acc = sess.run(accuracy, feed_dict={X: batch_x_test, Y: batch_y_test, keep_prob: 1.})
print(step, acc)
if acc > 0.99:
saver.save(sess, "./model/crack_capcha.model", global_step=step)
break
step += 1
def crack_captcha(captcha_image):
output = cnn_structure()
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, "./model/crack_capcha.model-1200")
predict = tf.argmax(tf.reshape(output, [-1, max_captcha, char_set_len]), 2)
text_list = sess.run(predict, feed_dict={X: [captcha_image], keep_prob: 1.})
text = text_list[0].tolist()
return text
if __name__=='__main__':
train=1
if train==0:
text,image=gen_captcha_text_image()
print("验证码大小:",image.shape)#(60,160,3)
image_height=60
image_width=160
max_captcha=len(text)
print("验证码文本最长字符数",max_captcha)
char_set=number
char_set_len=len(char_set)
X = tf.placeholder(tf.float32, [None, image_height * image_width])
Y = tf.placeholder(tf.float32, [None, max_captcha * char_set_len])
keep_prob = tf.placeholder(tf.float32)
train_cnn()
if train == 1:
image_height = 60
image_width = 160
char_set = number
char_set_len = len(char_set)
text, image = gen_captcha_text_image()
f = plt.figure()
ax = f.add_subplot(111)
ax.text(0.1, 0.9, text, ha='center', va='center', transform=ax.transAxes)
plt.imshow(image)
# plt.show()
max_captcha = len(text)
image = convert2gray(image)
image = image.flatten() / 255
X = tf.placeholder(tf.float32, [None, image_height * image_width])
Y = tf.placeholder(tf.float32, [None, max_captcha * char_set_len])
keep_prob = tf.placeholder(tf.float32)
predict_text = crack_captcha(image)
print("正确: {} 预测: {}".format(text, predict_text))
plt.show()

@ -0,0 +1,65 @@
# coding = utf-8
# 圈出图片中所有的人脸
import numpy as np
import math
import cv2
import dlib
path = '/Users/yanshao/Desktop/img/timg.jpeg'
Model = '/Users/yanshao/dlib/shape_predictor_5_face_landmarks.dat'
# 首先把图片转化为方形
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(Model)
def preprocess():
image = cv2.imread(path)
height, width, chl = image.shape[0], image.shape[1], image.shape[2]
rec = int(max(height, width) * 1.5)
pic = np.zeros((rec, rec, chl), np.uint8)
baseh, basew = (rec - height) // 2, (rec - width) // 2
pic[baseh:height + baseh, basew:width + basew] = image
return pic, rec
# 为了检测出图片中的所有人脸我们每次把图片翻转45度
def get_dist(point1, point2):
return math.sqrt((point1[0]-point2[0])*(point1[0]-point2[0]) +
(point1[1]-point2[1])*(point1[1]-point2[1]))
def get_pos(point, M):
return np.linalg.solve([[M[0][0], M[0][1]],
[M[1][0], M[1][1]]],
[point[0]-M[0][2], point[1]-M[1][2]]).astype(np.int32)
pic, rec = preprocess()
center = (rec//2,rec//2)
list = []
for ang in range(0, 8):
M = cv2.getRotationMatrix2D(center, 45*ang, 1)
pic2 = cv2.warpAffine(pic, M, (rec, rec))
dets = detector(pic2, 1)
print(len(dets))
for face in dets:
left, right, top, bot = face.left(), face.right(), face.top(), face.bottom()
LT, RB = get_pos((left, top), M), get_pos((right, bot), M)
list.append(((LT[0]+RB[0])//2, (LT[1]+RB[1])//2, (right-left)//2))
cv2.destroyAllWindows()
length = len(list)
for i in range(0, length):
flag = False
for j in range(0, i):
if get_dist(list[i], list[j]) < list[i][2]+list[j][2]:
flag = True
if not flag:
cv2.circle(pic, (list[i][0], list[i][1]), list[i][2], (0, 255, 0))
cv2.imshow("img", pic)
cv2.waitKey(0)
cv2.destroyAllWindows()

Binary file not shown.

Binary file not shown.

@ -0,0 +1,20 @@
https://book.qidian.com/info/1011774352 海贼之天赋系统
https://book.qidian.com/info/1009934766 火影之活久见
https://book.qidian.com/info/1009352601 海贼之雷神降临
https://book.qidian.com/info/1011987395 满级账号在异界
https://book.qidian.com/info/1004591465 寻找走丢的舰娘
https://book.qidian.com/info/1010837556 忍者招募大师
https://book.qidian.com/info/1010523354 妙木山的塔姆仙人
https://book.qidian.com/info/1004975904 海贼:厌世之歌
https://book.qidian.com/info/1005313052 请回答火影
https://book.qidian.com/info/1010731906 木叶之实力至上
https://book.qidian.com/info/1011704667 漫威世界中的幽灵
https://book.qidian.com/info/1009361813 魔法与万象卡牌系统
https://book.qidian.com/info/1004597439 我的女仆是恶魔
https://book.qidian.com/info/1011121140 捉妖奶爸
https://book.qidian.com/info/1005115417 火影之最强卡卡西
https://book.qidian.com/info/1005235019 神级剑魂系统
https://book.qidian.com/info/1010279627 变身之萝莉主播
https://book.qidian.com/info/1012043959 漫威之苍雷之影
https://book.qidian.com/info/1012055792 变身之漫威天才
https://book.qidian.com/info/1004607972 龙珠之最强神话

@ -0,0 +1,5 @@
259
变身之漫威天才
111
紫妈的故事
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

@ -0,0 +1,7 @@
新书《变身在美漫》
重生成为玛雅·汉森的女儿智商220的天才。
一,如何将万年屌丝古德里奇·基力安送进监狱。
二,如何建立超过托尼·斯塔克的科技帝国。
三,如何建立光照会。
四,如何拯救北极星。
书友群707272151验证变身之漫威天才

@ -0,0 +1,5 @@
257
变身之萝莉主播
111
青棘
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

@ -0,0 +1,7 @@
莫名其妙变成一只十岁小萝莉,莫名其妙又获得一个‘嘴强王者’系统。
她可以在直播的时候,将脑子里面的各种不可思议的骚操作实现出来!
绝地求生30杀吃鸡这真的是基本操作只要单人四排就行坐下坐下。
唱歌直播?全能天王歌喉,听过一遍就能直接唱出来,甚至唱得更好。
无数星探来挖?抱歉,我可不当什么大明星,我只想安安静静的做直播。
一只逆天小萝莉的直播之旅,无意间搅动起整个世界。然而她最大的心愿,只想静静的做一只鸽王,直播什么的,一周十个小时就行啦……
千人读者群673827995

@ -0,0 +1,5 @@
247
妙木山的塔姆仙人
111
河流之汪
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

@ -0,0 +1,7 @@
【这是火影同人】
主角人设LOL英雄河流之王塔姆。
这是一个物种不明的假蛤蟆混入忍界最大雇佣兵公司妙木山又顺利拿到千年豪门日向家族offer从此成为雏田大小姐贴身保镖、最终走向人生巅峰的职场励志故事。
好吧...这其实是一个大挂比在忍界培养各种小挂比,用外挂打败拼爹的毒鸡汤故事。
PS新人作品前期毒草请多包涵
地泽万物神农不死,欢迎各位农家弟子来此试炼。
相信我,我这毒草越嚼越甜(/ω\)

@ -0,0 +1,5 @@
245
寻找走丢的舰娘
111
海底熔岩
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

@ -0,0 +1,5 @@
当你脱坑好长时间之后你可曾想过你的姑娘们会怎样?
港区因为长官离开而分崩离析;
胡德带着声望离开去了遥远的城市,提尔比茨为了生活在画同人本子,弗莱彻为了几个妹妹的生活同时打着几份工,无敌的苏赫巴托尔大人一手抱着黑猫奥斯卡一手抱着凶猛的大老虎有些茫然不知所措;
儿童节的小提尔比茨,小宅也是幼宅为了寻找自己的长官走遍千山万水,当你感受到那份心情之后只想再重建那份曾经辉煌。
欢迎加入书友群298746959

@ -0,0 +1,5 @@
246
忍者招募大师
111
24K纯帅鸦
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

@ -0,0 +1,3 @@
携招募系统穿越忍界鸣人大哥,收集忍者碎片招募忍者,成就忍界最强弟控!
(新书综漫《万界最强吻神》已经可以宰了,欢迎大家继续支持)
本书官方qq群678253803

@ -0,0 +1,5 @@
253
我的女仆是恶魔
111
空明音
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

@ -0,0 +1,2 @@
大学生白晓树,在生日的当天突然接到父亲奇怪的电话,然后莫名其妙的被不明黑衣人追杀,白晓树疑惑的按照父亲电话的指示回到祖宅,生死之间与恶魔签订契约,醒来之后一个漂亮可爱的女孩自称是他的女仆,然而所有的诡异事件才刚刚开始……
空明音书友群434860648

@ -0,0 +1,5 @@
254
捉妖奶爸
111
火中物
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

@ -0,0 +1,9 @@
这是一个宅男蜕变为无敌奶爸的故事!
我的娃,由我来守护!
什么?
孩子妈有意见?
咱们回房,我给你正一正三观!
【原名:无敌奶爸的捉妖日常】
【火锅粉《一号桌》群号489337956进群无限制。】
【火锅粉贵宾席9897952进群需2000粉丝值验证。】
【十三年老作者倾力奉献,水平稳定,大局观血强。】

@ -0,0 +1,5 @@
250
木叶之实力至上
111
召弓
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

@ -0,0 +1,8 @@
ps新书《崛起最强战法》求支持
这是一本穿越成鸣人的魔改同人。
CP原装鸣人雏田。
主角性格:有爱心不圣母+不作死就活不下去。
主角爱好:热爱研究,算是披着鸣人皮的大蛇丸。
主角外挂:身体里有一个能够吞噬灵魂融合查克拉性质变化的怪物。
本书的缺点1.加了些原创的人物2.设定源于火影改造于无良作者召弓。
本书优点:脑洞爆炸!

@ -0,0 +1,5 @@
241
海贼之天赋系统
111
夜南听风
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

@ -0,0 +1,4 @@
他并不想被悬赏通缉,也不想当海贼,但如果没有其他选择,只能当海贼的话……
那就让这个世界,天翻地覆吧!
…………………………
书友群580391329

@ -0,0 +1,5 @@
243
海贼之雷神降临
111
焉得羽翼兮
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

@ -0,0 +1,5 @@
围绕着自由与梦想为主题而又波澜壮阔的海贼世界,忽然闯入了一个不速之客!
没有力量就一无所有的世界中,是选择甘于平淡碌碌无为,还是用自己的命为赌注搏出一条通天大道?
“疯狂与肆意,畅快与刺激!我追寻的是最精彩和无悔的人生啊!”
“只管把目标定在最高峰,别人要笑就让他去笑吧!”
交流群372599638

@ -0,0 +1,5 @@
248
海贼:厌世之歌
111
农夫一拳
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

@ -0,0 +1,8 @@
一颗恶魔果实,一身用血与汗换来的体术,一身用牵挂与疯狂换来的霸气。
王,乃是最贪婪、最高傲、最易怒之人;王的身后,留下的不是美好,而是尸骨成山。
心中所想,心中所念,在所不惜;一生的执念,愿抛弃一切,不折手段!
其实,这只是一个可怜人为了执念,而变成神经病的故事。
世人皆可抛,唯亲人不离不弃!
新书:《海贼之爆炸艺术》《妖尾之金金果实》请支持,多谢!

@ -0,0 +1,5 @@
244
满级账号在异界
111
尺间萤火
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

@ -0,0 +1,12 @@
打拼十年,终于坑死体内老爷爷,继承他的系统,穿越到剑与魔法的异世界。
村民开局,等级归零,还有女儿要养,就连壁虎也爬上床欺辱他。
丢你壁虎~
【杀死壁虎等级上升lv1、lv2、lv3……lv120】
【恭喜游戏者,已达到满级】
【请游戏者开始攻略世界剩余世界数9^999】
打工是不可能打工的,就是发展轮回者这种生物,才能维持得了生活这样子
.
(真.无敌+小世界快穿+灵气复苏,再问自杀)
.
ps:书友群——805815626问题答案是尺间萤火

@ -0,0 +1,5 @@
251
漫威世界中的幽灵
111
职业偷懒
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

@ -0,0 +1,4 @@
漫威世界之中突然发生了很多奇怪的事情,那些未来的超级英雄们开始被不断的骚扰,可是他们又抓不到丝毫的线索,这种困扰持续了好久,直到一个名为墨菲的男人出现才结束……
墨菲原名莫飞,是一个穿越者,他经历无尽的孤独才再次为人。
“穿越者是一种高危职业,也许男变女、也许变猫变狗,但是为什么我会穿越成一堆数据呢?”
偷懒书友群669561750。

@ -0,0 +1,5 @@
258
漫威之苍雷之影
111
青圭大大
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

@ -0,0 +1,9 @@
钢铁侠还没有诞生就穿越过来的方问,苟到奥创出现才开始冒头。
本以为开局无敌,却发现,对手一个比一个强大。
方问的内心是崩溃的。
“我只是想装个逼,为什么那么难???”
p.s1前期变种人写的一般请耐心看下去越后越好看。
p.s2本书电影宇宙合理原创无漫画。
p.s3时间线复联二开始不过前面的电影也会有的。
p.s4主线安排变种人——漫威电影。
新书《漫威之致命守护者》,欢迎各位入坑。

@ -0,0 +1,5 @@
255
火影之最强卡卡西
111
墨渊九砚
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

@ -0,0 +1,4 @@
九尾之夜,现世的灵魂与卡卡西融为一体。从此开启一条不同的火影之路。写轮眼,雷遁,刀法,仙术,成就最强。绝对的火影味道!
火影卡卡西结束,番外篇夏目友人帐
新书《木叶墨痕》
正版书友群555050388

@ -0,0 +1,5 @@
242
火影之活久见
111
李四羊
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

@ -0,0 +1,2 @@
说出来你不信,木叶村是我建立的,漩涡一族是我带大的。
哦不对,现在那应该叫涡之国了。

@ -0,0 +1,5 @@
256
神级剑魂系统
111
夜南听风
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

@ -0,0 +1,6 @@
森罗万象,皆为灰烬,流刃若火!
穿越到海贼世界,大海贼时代十一年的罗亚,得到了一个能够无限强化,抽取各种技能的剑魂系统。
流刃若火,镜花水月,月牙天冲,千本樱……
“我这一刀下去,你可能会被烧死。”罗亚看着眼前的赤犬,很认真的说道。
………………………………
新书《海贼之天赋系统》已发布,求支持!

@ -0,0 +1,5 @@
249
请回答火影
111
蒙着面的Sama
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

@ -0,0 +1,11 @@
一支手里剑破空飞来。
这是一个没有预料到的角度。
近在咫尺的瞬间,禹小白睁大了眼睛——不是因为那支挟带死亡的利器,而是从阴影中跳出来的敌人身后,出现了一行不该在这个世界存在的文字。
【未完待续…】
“未完待续?”
……
穿越到火影世界后,禹小白经过迷茫,开始逐渐接受现实,在终于成为了一名光荣的木叶暗部时,一场战斗让他回到了现实世界。
“2017年《周刊少年JUMP》开始连载《火影忍者》……”
“等等2017年火影
某页黑白纸中,他,出镜了……
磨砺沉淀过的忍者之心就这么受到了深深的摧残。

@ -0,0 +1,5 @@
252
魔法与万象卡牌系统
111
威馆长
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

@ -0,0 +1,9 @@
“什么?你是位战士!”
“终结者上,踢死他!让他知道没有钢铁般的意志,不配叫战士。”
“还有提莫,去把墙角装隐身的那个盗贼给我擒下,以后你就用他实验新品种的蘑菇。”
“阿尔托利亚,那个武僧就交给你了!”
“对不起,伤害手无寸铁的人,有悖于我身为骑士的誓言。”
“大姐,武僧的拳头就是武器。你揍他就是了,相信我没错的!”
电影、漫画、游戏,所有的人物和道具都将为我所用!
这是一个带着卡牌召唤系统在异界浪的故事。
书友群632291742。欢迎各位小伙伴

@ -0,0 +1,5 @@
260
龙珠之最强神话
111
枫叶缀
二次元

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

@ -0,0 +1,8 @@
夏亚重生到龙珠世界成为了一名赛亚人而这时距离贝吉塔星毁灭只剩下12年。
“多多利亚先生,听说赛亚人的贝吉塔王正忙着造反呢,你觉得我该怎么处理?”弗利萨将酒杯递给旁边的侍从,饶有兴致地问多多利亚。
“那些赛亚人真是不知好歹,要不是还有些用处,属下早就带人将他们贝吉塔星毁灭了。”
“不要急,多多利亚先生,过段时间我们就去除掉那些赛亚人,到时让你们看一下宇宙中最美丽的烟花。”
这时一个紫色宇宙人连滚带爬跑了进来:“不好了弗利萨大王,尚波大人在菲达亚行星遭遇了强敌,已经全军覆没……”
已完结《龙珠之绫叶传奇》
群号118293119
新书《龙珠之牧神传说》已经上传!

@ -0,0 +1,20 @@
https://book.qidian.com/info/107580 凡人修仙传
https://book.qidian.com/info/1735921 遮天
https://book.qidian.com/info/118447 星辰变
https://book.qidian.com/info/2248950 最强弃少
https://book.qidian.com/info/2502372 莽荒纪
https://book.qidian.com/info/1264634 仙逆
https://book.qidian.com/info/1004995075 最强神话帝皇
https://book.qidian.com/info/1003723851 天影
https://book.qidian.com/info/2227457 飞天
https://book.qidian.com/info/1010053502 反套路快穿
https://book.qidian.com/info/3347627 妙医圣手
https://book.qidian.com/info/2070910 求魔
https://book.qidian.com/info/1010804088 二青
https://book.qidian.com/info/3441641 掠天记
https://book.qidian.com/info/3195551 造化之门
https://book.qidian.com/info/1005317872 能穿越的修行者
https://book.qidian.com/info/3406500 走进修仙
https://book.qidian.com/info/3347574 从前有座灵剑山
https://book.qidian.com/info/1005395194 仙藏
https://book.qidian.com/info/3135391 大泼猴

@ -0,0 +1,5 @@
73
二青
111
来不及忧伤
仙侠

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

@ -0,0 +1,5 @@
(新书《妖灵狂潮》已发布!)
岑青穿越了,重生成一条青蛇,名叫二青。
后来,他知道这个世界有猴子,有白蛇。
游遍青山蛇未老,蓦然回首已成妖!
689615506

@ -0,0 +1,5 @@
78
从前有座灵剑山
111
国王陛下
仙侠

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

@ -0,0 +1,3 @@
灵剑派成立于九州历四二三三年,几千年来始终致力于为行业提供一流的修仙人才,如今位列万仙盟五大超品宗派之一,掌门风吟真人担任万仙盟七大常务长老,修为盖世。灵剑派坚持和平与发展的主题,门派核心价值理念是求真、求善、求种。为进一步扩充门派力量,补充新鲜血液,拟于近期召开升仙大会,诚邀各路精英前来。
还是原来的节操
还是一样的搞笑

@ -0,0 +1,5 @@
79
仙藏
111
鬼雨
仙侠

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

@ -0,0 +1,8 @@
虚度光阴年近不惑的秦笛穿越到了异界,从一个束发少年接受桃花洗礼开始走上修真之路,一路奇遇,炼气,筑基,金丹,元婴,步虚,合道,地仙,灵仙,天仙,祖仙,金仙,仙王,仙帝,道祖.....
那时那日此门中,
桃花树下初相逢。
只见仙人种桃树,
未闻仙人看花红。
这是一本与众不同、悠闲安逸、富有阳光、脑洞大开的仙侠修真小说。书里没有太多的杀伐,但有一个广阔的仙侠世界,还有不少的小故事,贯穿古今,开卷有益。
仙路漫漫,岁月悠悠,希望大家与我同行。
书友群:47733057QQ

@ -0,0 +1,5 @@
66
仙逆
111
耳根
仙侠

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

@ -0,0 +1,2 @@
顺为凡,逆则仙,只在心中一念间……
请看耳根作品《仙逆》

@ -0,0 +1,5 @@
61
凡人修仙传
111
忘语
仙侠

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

@ -0,0 +1,7 @@
一个普通山村小子,偶然下进入到当地江湖小门派,成了一名记名弟子。他以这样身份,如何在门派中立足,如何以平庸的资质进入到修仙者的行列,从而笑傲三界之中!
续篇《凡人修仙之仙界篇》已经在起点中文网上传了,欢迎大家继续支持哦!
凡人修仙,风云再起
时空穿梭,轮回逆转
金仙太乙,大罗道祖
三千大道,法则至尊
《凡人修仙传》仙界篇,一个韩立叱咤仙界的故事,一个凡人小子修仙的不灭传说。

@ -0,0 +1,5 @@
70
反套路快穿
111
良心
仙侠

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save