|
|
#include <stdio.h>
|
|
|
#include <ctime>
|
|
|
#include <stdlib.h>
|
|
|
#include <arm_neon.h> //启用 NEON 指令
|
|
|
|
|
|
#define SIZE 102400000
|
|
|
|
|
|
// 基础向量加法函数
|
|
|
void vector_add(float* A, float* B, float* C, int size)
|
|
|
{
|
|
|
for (int i = 0; i < size; ++i)
|
|
|
{
|
|
|
C[i] = A[i] + B[i];
|
|
|
}
|
|
|
}
|
|
|
//优化的向量加法函数
|
|
|
void vector_add_optimized(float* A, float* B, float* C, int size)
|
|
|
{
|
|
|
for(int i=0;i<SIZE;i+=4)
|
|
|
{
|
|
|
//加载A和B向量的4个浮点数到NEO寄存器
|
|
|
float32x4_t vecA=vld1q_f32(&A[i]);
|
|
|
float32x4_t vecB=vld1q_f32(&B[i]);
|
|
|
//执行向量加法
|
|
|
float32x4_t vecC=vaddq_f32(vecA,vecB);
|
|
|
//将结果存储到C向量
|
|
|
vst1q_f32(&C[i],vecC);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
//基础的矩阵乘法
|
|
|
#define M 2000
|
|
|
#define N 2000
|
|
|
#define Q 2000
|
|
|
//A(M*N),B(N*Q),C(M*Q)对于矩阵维度的说明
|
|
|
void matmul(float** A, float** B, float** C)
|
|
|
{
|
|
|
for (int i = 0; i < M; ++i)
|
|
|
{
|
|
|
for (int j = 0; j < Q; ++j)
|
|
|
{
|
|
|
C[i][j] = 0;
|
|
|
for (int k = 0; k < N; ++k)
|
|
|
{
|
|
|
C[i][j] += A[i][k] * B[k][j];
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
//转置函数
|
|
|
void transposeMatrix(float** matrix, float** transposed) {
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
for (int j = 0; j < Q; j++) {
|
|
|
transposed[j][i] = matrix[i][j];
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
//优化的乘法函数
|
|
|
void matmul_optimized(float** A, float** B, float** C) {
|
|
|
for (int i = 0; i < M; i++) {
|
|
|
for (int j = 0; j < Q; j++) {
|
|
|
float sum = 0.0f;
|
|
|
for (int k = 0; k < N; k += 4)
|
|
|
{
|
|
|
float32x4_t vecA, vecB, vecC;
|
|
|
if (k + 4 <= N)
|
|
|
{
|
|
|
// 加载A和B的4个元素,进行向量化计算
|
|
|
vecA = vld1q_f32(&A[i][k]);
|
|
|
vecB = vld1q_f32(&B[j][k]);
|
|
|
// 向量化乘法并累加结果
|
|
|
vecC = vmulq_f32(vecA, vecB);
|
|
|
sum += vgetq_lane_f32(vecC, 0) + vgetq_lane_f32(vecC, 1) +
|
|
|
vgetq_lane_f32(vecC, 2) + vgetq_lane_f32(vecC, 3);
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
// 处理剩余的元素
|
|
|
for (int m = k; m < N; m++)
|
|
|
{
|
|
|
sum += A[i][m] * B[j][m];
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
C[i][j] = sum;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
int main()
|
|
|
{
|
|
|
srand(time(NULL));
|
|
|
|
|
|
// 动态分配内存,并检查是否分配成功
|
|
|
float *A = (float *)malloc(SIZE * sizeof(float));
|
|
|
float *B = (float *)malloc(SIZE * sizeof(float));
|
|
|
float *C = (float *)malloc(SIZE * sizeof(float));
|
|
|
// 随机初始化,生成0到99之间的随机浮点数
|
|
|
for (int i = 0; i < SIZE; i++)
|
|
|
{
|
|
|
A[i] = (float)(rand() % 100) / 100.0f;
|
|
|
B[i] = (float)(rand() % 100) / 100.0f;
|
|
|
}
|
|
|
|
|
|
printf("当SIZE取%d时\n", SIZE);
|
|
|
clock_t start1 = clock();
|
|
|
vector_add(A, B, C, SIZE);
|
|
|
clock_t end1 = clock();
|
|
|
// 计算并输出向量加法的时间
|
|
|
double basical_time_spent = double(end1 - start1) / CLOCKS_PER_SEC;
|
|
|
printf("使用基础的向量加法用时%lf秒\n", basical_time_spent);
|
|
|
|
|
|
clock_t start2 = clock();
|
|
|
vector_add_optimized(A, B, C, SIZE);
|
|
|
clock_t end2 = clock();
|
|
|
// 计算并输出向量加法的时间
|
|
|
double time_spent = double(end2 - start2) / CLOCKS_PER_SEC;
|
|
|
printf("使用NEON 优化向量加法用时%lf秒\n", time_spent);
|
|
|
// 释放动态分配的内存
|
|
|
free(A);free(B);free(C);
|
|
|
|
|
|
// 分配矩阵内存
|
|
|
float** A2 = (float**)malloc(M * sizeof(float*));
|
|
|
float** B2 = (float**)malloc(N * sizeof(float*));
|
|
|
float** C2 = (float**)malloc(M * sizeof(float*));
|
|
|
for (int i = 0; i < M; ++i)
|
|
|
{
|
|
|
A2[i] = (float*)malloc(N * sizeof(float));
|
|
|
C2[i] = (float*)malloc(Q * sizeof(float));
|
|
|
}
|
|
|
for (int i = 0; i < N; ++i)
|
|
|
{
|
|
|
B2[i] = (float*)malloc(Q * sizeof(float));
|
|
|
}
|
|
|
// 初始化矩阵数据
|
|
|
for (int i = 0; i < M; i++) {
|
|
|
for (int j = 0; j < N; j++) {
|
|
|
A2[i][j] = (float)(rand() % 100) / 100.0f;
|
|
|
}
|
|
|
}
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
for (int j = 0; j < Q; j++) {
|
|
|
B2[i][j] = (float)(rand() % 100) / 100.0f;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
printf("\n");
|
|
|
printf("当矩阵A2的维度为%d*%d,矩阵B2的维度为%d*%d时\n",M,N,N,Q);
|
|
|
//计算基础用时
|
|
|
clock_t start3 = clock();
|
|
|
matmul(A2, B2, C2);
|
|
|
clock_t end3 = clock();
|
|
|
double multiply_time_spent = double(end3 - start3) / CLOCKS_PER_SEC;
|
|
|
printf("使用基础的矩阵乘法用时:%lf秒\n",multiply_time_spent);
|
|
|
//计算 优化用时
|
|
|
float** transposed = (float**)malloc(Q * sizeof(float*));
|
|
|
for (int i = 0; i < Q; ++i)
|
|
|
{
|
|
|
transposed[i] = (float*)malloc(N * sizeof(float));
|
|
|
}
|
|
|
transposeMatrix(B2, transposed);
|
|
|
clock_t start4 = clock();
|
|
|
matmul_optimized(A2, transposed, C2);
|
|
|
clock_t end4 = clock();
|
|
|
double optimized_multiply_time_spent = double(end4 - start4) / CLOCKS_PER_SEC;
|
|
|
printf("使用优化的矩阵乘法用时:%lf秒\n", optimized_multiply_time_spent);
|
|
|
|
|
|
// 释放动态分配的内存
|
|
|
for (int i = 0; i < M; ++i) {
|
|
|
free(A2[i]);free(C2[i]);
|
|
|
}
|
|
|
for (int i = 0; i < N ;++i) {
|
|
|
free(B2[i]);
|
|
|
}
|
|
|
for (int i = 0; i < Q ;++i) {
|
|
|
free(transposed[i]);
|
|
|
}
|
|
|
free(A2); free(B2); free(C2);free(transposed);
|
|
|
} |