update: 1024维的模型

This commit is contained in:
divenswu 2024-01-22 18:57:19 +08:00
parent 297d1b525f
commit 475bdc58a0
3 changed files with 62 additions and 9 deletions

View File

@ -8,8 +8,11 @@ import com.visual.face.search.core.domain.FaceInfo.Embedding;
import com.visual.face.search.core.domain.ImageMat;
import com.visual.face.search.core.utils.ArrayUtil;
import org.opencv.core.Scalar;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import java.util.stream.Collectors;
/**
* 人脸识别-人脸特征提取
@ -33,6 +36,19 @@ public class SeetaFaceOpenRecognition extends BaseOnnxInfer implements FaceRecog
*/
@Override
public Embedding inference(ImageMat image, Map<String, Object> params) {
if("light".equals(params.get("model"))){
return inferenceLight(image, params);
}else{
return inferenceLarge(image, params);
}
}
/**
* 人脸识别人脸特征向量-512维
* @param image 图像信息
* @return
*/
private Embedding inferenceLight(ImageMat image, Map<String, Object> params) {
OnnxTensor tensor = null;
OrtSession.Result output = null;
try {
@ -56,4 +72,38 @@ public class SeetaFaceOpenRecognition extends BaseOnnxInfer implements FaceRecog
}
}
/**
* 人脸识别人脸特征向量-1024维
* @param image 图像信息
* @return
*/
public Embedding inferenceLarge(ImageMat image, Map<String, Object> params) {
OnnxTensor tensor = null;
OrtSession.Result output = null;
try {
tensor = image.resizeAndNoReleaseMat(248,248)
.blobFromImageAndDoReleaseMat(1.0, new Scalar(0, 0, 0), true)
.to4dFloatOnnxTensorAndDoReleaseMat(true);
output = getSession().run(Collections.singletonMap(getInputName(), tensor));
float[][][] predictions = ((float[][][][]) output.get(0).getValue())[0];
float[] embeds = new float[predictions.length];
for(int i=0; i< embeds.length; i++){
embeds[i] = Double.valueOf(Math.sqrt(predictions[i][0][0])).floatValue();
}
double normValue = ArrayUtil.matrixNorm(embeds);
float[] embedding = ArrayUtil.division(embeds, Double.valueOf(normValue).floatValue());
return Embedding.build(image.toBase64AndNoReleaseMat(), embedding);
} catch (Exception e) {
throw new RuntimeException(e);
}finally {
if(null != tensor){
tensor.close();
}
if(null != output){
output.close();
}
}
}
}

View File

@ -20,33 +20,36 @@ import java.util.Map;
public class SeetaFaceOpenRecognitionTest extends BaseTest {
private static String modelCoordPath = "face-search-core/src/main/resources/model/onnx/keypoint_coordinate/coordinate_106_mobilenet_05.onnx";
private static String modelSeetaPath = "face-search-core/src/main/resources/model/onnx/recognition_fcae_seeta/face_recognizer_512.onnx";
// private static String modelSeetaPath = "face-search-core/src/main/resources/model/onnx/recognition_fcae_seeta/face_recognizer_1024.onnx";
// private static String modelSeetaPath = "face-search-core/src/main/resources/model/onnx/recognition_face_seeta/face_recognizer_512.onnx";
private static String modelSeetaPath = "face-search-core/src/main/resources/model/onnx/recognition_face_seeta/face_recognizer_1024.onnx";
private static String imagePath = "face-search-core/src/test/resources/images/faces";
// private static String imagePath1 = "face-search-core/src/test/resources/images/faces/debug/debug_0001.jpg";
// private static String imagePath2 = "face-search-core/src/test/resources/images/faces/debug/debug_0004.jpeg";
private static String imagePath1 = "face-search-core/src/test/resources/images/faces/compare/1682052661610.jpg";
private static String imagePath2 = "face-search-core/src/test/resources/images/faces/compare/1682052669004.jpg";
// private static String imagePath2 = "face-search-core/src/test/resources/images/faces/compare/1682053163961.jpg";
// private static String imagePath2 = "face-search-core/src/test/resources/images/faces/compare/1682052669004.jpg";
private static String imagePath2 = "face-search-core/src/test/resources/images/faces/compare/1682053163961.jpg";
public static void main(String[] args) {
Map<String, Object> params = new HashMap<>();
params.put("model", "light1");
FaceAlignment simple005pFaceAlignment = new Simple005pFaceAlignment();
FaceKeyPoint insightCoordFaceKeyPoint = new InsightCoordFaceKeyPoint(modelCoordPath, 1);
FaceRecognition insightSeetaFaceRecognition = new SeetaFaceOpenRecognition(modelSeetaPath, 1);
Mat image1 = Imgcodecs.imread(imagePath1);
Mat image2 = Imgcodecs.imread(imagePath2);
// Mat image2 = Imgcodecs.imread(imagePath2);
// image1 = CropUtil.crop(image1, FaceInfo.FaceBox.build(54,27,310,380));
// image2 = CropUtil.crop(image2, FaceInfo.FaceBox.build(48,13,292,333));
// image2 = CropUtil.crop(image2, FaceInfo.FaceBox.build(52,9,235,263));
// simple005pFaceAlignment.inference()
FaceInfo.Embedding embedding1 = insightSeetaFaceRecognition.inference(ImageMat.fromCVMat(image1), null);
FaceInfo.Embedding embedding2 = insightSeetaFaceRecognition.inference(ImageMat.fromCVMat(image2), null);
float similarity = Similarity.cosineSimilarity(embedding1.embeds, embedding2.embeds);
System.out.println(similarity);
FaceInfo.Embedding embedding1 = insightSeetaFaceRecognition.inference(ImageMat.fromCVMat(image1), params);
System.out.println(Arrays.toString(embedding1.embeds));
// FaceInfo.Embedding embedding2 = insightSeetaFaceRecognition.inference(ImageMat.fromCVMat(image2), params);
// float similarity = Similarity.cosineSimilarity(embedding1.embeds, embedding2.embeds);
// System.out.println(similarity);
// System.out.println(Arrays.toString(embedding1.embeds));
// System.out.println(Arrays.toString(embedding2.embeds));
}