当前位置:网站首页>SVM+Surf+K-means花朵分类(Matlab)
SVM+Surf+K-means花朵分类(Matlab)
2022-07-27 15:10:00 【秃头嘤嘤魔】
详细算法、步骤介绍见python版:
https://editor.csdn.net/md/?articleId=103732613
训练文件
clc
clear all
train_path = "g:/flowers/train/";
test_path = "g:/flowers/test/";
K = 100;
build_centers(train_path,K);
%构建训练集特征向量
[x_train,y_train] = cal_vec(train_path,K);
%将特征向量和标签输入到SVM分类器中
Train(x_train,y_train);
%计算测试集的正确率
[predicted_label, accuracy, decision_values] = Test(test_path,K);
function f1 = calcSurfFeature(img)
%将图像转化为灰度图像
gray = rgb2gray(img);
%计算surf特征点
points = detectSURFFeatures(gray);
%计算描述向量
[f1, vpts1] = extractFeatures(gray, points);
end
%%计算词袋
function centers = learnVocabulary(features,K)
%设置词袋数量
% use k-means to cluster a bag of features
%[聚类编号,点的位置] = [输入,聚类个数,初始中心点,聚类重复次数]
opts = statset('Display','final','MaxIter',1000);
[idx ,centers] = kmeans(features ,K,'Replicates',10, 'Options',opts);
end
%%根据字典计算图片的特征向量
function featVec = calcFeatVec(features, centers,K)
featVec = zeros(1,K);
%m表示特征点的个数,n表示特征点的维度
[m,n] = size(features);
for i = 1:m
fi = features(i,:);
%%特征是1*64,而字典是K*64,需要求该特征与哪个词袋最近
res = zeros(K,64);
for j = 1:K
res(j,:) = fi;
end
%将该特征纵向复制变成K*64
%求出距离
distance = sum((res - centers).^2,2);
distance = sqrt(distance);
%将距离排序,求得最小的距离
[x,y] = min(distance);
featVec(y) = featVec(y)+1;
end
end
%%%建立字典
function build_centers(path,K)
%求得路径下子目录,将子目录路径保存到cate中
sub = dir(path);
cate = [];
features = [];
for i = 1:size(sub)
if sub(i).name ~= "." && sub(i).name ~= ".." && sub(i).isdir
s = strcat(strcat(path ,sub(i).name),"/");
cate = [cate s];
end
end
for i = 1:length(cate)
imgDir = dir( strcat(cate(i),'*.jpg'));
for j = 1:length(imgDir)
img = imread(strcat(cate(i),imgDir(j).name));
%计算surf特征
img_f = calcSurfFeature(img);
%特征点加入特征集合
features = [features;img_f];
end
end
[m,n] = size(features);
fprintf("训练集特征点集合:[ %d , %d ]\n",m,n);
centers = learnVocabulary(features,K);
filename = "g:/flowers/svm/svm_centers.mat";
save(filename,'centers');
end
%%根据centers计算图片特征向量
function [data_vec,labels] = cal_vec(path,K)
load("g:/flowers/svm/svm_centers.mat");
%存放图片特征向量
data_vec = [];
%存放图片标签(所属类别)
labels = [];
%得到当前目录下的子目录
sub = dir(path);
cate = [];
for i = 1:size(sub)
if sub(i).name ~= "." && sub(i).name ~= ".." && sub(i).isdir
s = strcat(strcat(path ,sub(i).name),"/");
cate = [cate s];
end
end
for i = 1:length(cate)
imgDir = dir( strcat(cate(i),'*.jpg'));
for j = 1:length(imgDir)
img = imread(strcat(cate(i),imgDir(j).name));
%计算surf特征
img_f = calcSurfFeature(img);
img_vec = calcFeatVec(img_f, centers,K);
data_vec = [data_vec;img_vec];
labels = [labels,i];
end
end
end
%训练SVM分类器
function Train(data_vec,labels)
modle = libsvm_train(labels',data_vec,'-t 0');
save("g:/flowers/svm/svm_model.mat",'modle');
fprintf("Training Done!\n");
end
%SVM测试测试集正确率
function [predicted_label, accuracy, decision_values] = Test(path,K)
%读取SVM模型
load("g:/flowers/svm/svm_model.mat");
%读取字典
load("g:/flowers/svm/svm_centers.mat");
%计算每张图片的特征向量
[data_vec,labels] = cal_vec(path,K);
[num_test , y] = size(data_vec);
[predicted_label, accuracy, decision_values] = libsvm_predict(labels',data_vec,modle);
fprintf("Testing Done!\n");
end
预测文件
clc
clear all
img_path = "e:\minist\predict\";
class_flower = ["0","1","2","3","4","5","6","7","8","9"];
K = 100;
for i = 1:5
path = strcat(strcat(img_path,num2str(i)),".jpg");
img = imread(path);
figure(i);
imshow(img);
res = predict(img,K);
xlabel(["预测结果:",class_flower(res)]);
end
%预测单张图片
function res = predict(img,K)
load("g:/flowers/svm/svm_centers.mat");
load("g:/flowers/svm/svm_model.mat");
features = calcSurfFeature(img);
featVec = calcFeatVec(features, centers,K);
res = libsvm_predict([1],featVec,modle);
end
function f1 = calcSurfFeature(img)
%将图像转化为灰度图像
gray = rgb2gray(img);
%计算suft特征点
points = detectSURFFeatures(gray);
%计算描述向量
[f1, vpts1] = extractFeatures(gray, points);
end
%%根据字典计算图片的特征向量
function featVec = calcFeatVec(features, centers,K)
featVec = zeros(1,K);
%m表示特征点的个数,n表示特征点的维度
[m,n] = size(features);
for i = 1:m
fi = features(i,:);
%%特征是1*64,而字典是50*64,需要求该特征与哪个词袋最近
res = zeros(K,64);
for j = 1:K
res(j,:) = fi;
end
%将该特征纵向复制变成K*64
%求出距离
distance = sum((res - centers).^2,2);
distance = sqrt(distance);
%将距离排序,求得最小的距离
[x,y] = min(distance);
featVec(y) = featVec(y)+1;
end
end
边栏推荐
- 每条你收藏的资讯背后,都离不开TA
- Mobile end Foundation
- Jerry's in ear detection function [chapter]
- Program environment and preprocessing of C language
- $attrs与$listeners组件传值
- Niuke topic -- judge whether it is a complete binary tree or a balanced binary tree
- VS2019 C语言如何同时运行多个项目,如何在一个项目中添加多个包含main函数的源文件并分别调试运行
- SAP UI5 FileUploader 的隐藏 iframe 设计明细
- Getting started with nvida CUDA dirverapi
- C语言之数组
猜你喜欢

Measured: the performance of cloud RDS MySQL is 1.6 times that of self built

C语言之函数

js中的函数
![[SAML SSO solution] Shanghai daoning brings you SAML for asp NET/SAML for ASP. Net core download, trial, tutorial](/img/7d/c372dba73531f4574ca3d379668b13.jpg)
[SAML SSO solution] Shanghai daoning brings you SAML for asp NET/SAML for ASP. Net core download, trial, tutorial

C语言之操作符

Shardingsphere-proxy-5.0.0 distributed snowflake ID generation (III)

Shell编程规范与变量

Niuke topic -- symmetric binary tree

meta-data 占位符的引用

How does vs2019 C language run multiple projects at the same time, how to add multiple source files containing main functions in a project and debug and run them respectively
随机推荐
三表联查2
Project exercise: the function of checking and modifying tables
Share a scheme of "redis" to realize "chat round system" that can't be found on the Internet
两表联查1
Behind every piece of information you collect, you can't live without TA
AppStore 内购
MySQL - linked table query
牛客题目——用两个栈实现队列、包含min函数的栈、有效括号序列
Select structure
File operation in C language
C语言之数组
Ten thousand words analysis ribbon core components and operation principle
每条你收藏的资讯背后,都离不开TA
Getting started with nvida CUDA dirverapi
一个端到端的基于 form 表单的文件上传程序,包含客户端和服务器端
JDBC connection database
基于STM32的智能鱼缸设计
Data collection: skillfully using Bloom filter to extract data summary
全局String对象(函数类型)+Math对象
.net core with microservices - what is a microservice