//! Partial emulation of ATLAS/BLAS gemm(), using caching for speedup. //! Matrix 'C' is assumed to have been set to the correct size (i.e. taking into account transposes) More...
#include <gemm.hpp>
Static Public Member Functions | |
template<typename eT > | |
static arma_hot void | apply (Mat< eT > &C, const Mat< eT > &A, const Mat< eT > &B, const eT alpha=eT(1), const eT beta=eT(0)) |
//! Partial emulation of ATLAS/BLAS gemm(), using caching for speedup. //! Matrix 'C' is assumed to have been set to the correct size (i.e. taking into account transposes)
Definition at line 27 of file gemm.hpp.
static arma_hot void gemm_emul_cache< do_trans_A, do_trans_B, use_alpha, use_beta >::apply | ( | Mat< eT > & | C, | |
const Mat< eT > & | A, | |||
const Mat< eT > & | B, | |||
const eT | alpha = eT(1) , |
|||
const eT | beta = eT(0) | |||
) | [inline, static] |
Definition at line 37 of file gemm.hpp.
References Mat< eT >::at(), Mat< eT >::colptr(), Mat< eT >::n_cols, Mat< eT >::n_rows, and trans().
{ arma_extra_debug_sigprint(); const u32 A_n_rows = A.n_rows; const u32 A_n_cols = A.n_cols; const u32 B_n_rows = B.n_rows; const u32 B_n_cols = B.n_cols; if( (do_trans_A == false) && (do_trans_B == false) ) { arma_aligned podarray<eT> tmp(A_n_cols); eT* A_rowdata = tmp.memptr(); for(u32 row_A=0; row_A < A_n_rows; ++row_A) { for(u32 col_A=0; col_A < A_n_cols; ++col_A) { A_rowdata[col_A] = A.at(row_A,col_A); } for(u32 col_B=0; col_B < B_n_cols; ++col_B) { const eT* B_coldata = B.colptr(col_B); eT acc = eT(0); for(u32 i=0; i < B_n_rows; ++i) { acc += A_rowdata[i] * B_coldata[i]; } if( (use_alpha == false) && (use_beta == false) ) { C.at(row_A,col_B) = acc; } else if( (use_alpha == true) && (use_beta == false) ) { C.at(row_A,col_B) = alpha * acc; } else if( (use_alpha == false) && (use_beta == true) ) { C.at(row_A,col_B) = acc + beta*C.at(row_A,col_B); } else if( (use_alpha == true) && (use_beta == true) ) { C.at(row_A,col_B) = alpha*acc + beta*C.at(row_A,col_B); } } } } else if( (do_trans_A == true) && (do_trans_B == false) ) { for(u32 col_A=0; col_A < A_n_cols; ++col_A) { // col_A is interpreted as row_A when storing the results in matrix C const eT* A_coldata = A.colptr(col_A); for(u32 col_B=0; col_B < B_n_cols; ++col_B) { const eT* B_coldata = B.colptr(col_B); eT acc = eT(0); for(u32 i=0; i < B_n_rows; ++i) { acc += A_coldata[i] * B_coldata[i]; } if( (use_alpha == false) && (use_beta == false) ) { C.at(col_A,col_B) = acc; } else if( (use_alpha == true) && (use_beta == false) ) { C.at(col_A,col_B) = alpha * acc; } else if( (use_alpha == false) && (use_beta == true) ) { C.at(col_A,col_B) = acc + beta*C.at(col_A,col_B); } else if( (use_alpha == true) && (use_beta == true) ) { C.at(col_A,col_B) = alpha*acc + beta*C.at(col_A,col_B); } } } } else if( (do_trans_A == false) && (do_trans_B == true) ) { Mat<eT> B_tmp = trans(B); gemm_emul_cache<false, false, use_alpha, use_beta>::apply(C, A, B_tmp, alpha, beta); } else if( (do_trans_A == true) && (do_trans_B == true) ) { // mat B_tmp = trans(B); // dgemm_arma<true, false, use_alpha, use_beta>::apply(C, A, B_tmp, alpha, beta); // By using the trans(A)*trans(B) = trans(B*A) equivalency, // transpose operations are not needed arma_aligned podarray<eT> tmp(B.n_cols); eT* B_rowdata = tmp.memptr(); for(u32 row_B=0; row_B < B_n_rows; ++row_B) { for(u32 col_B=0; col_B < B_n_cols; ++col_B) { B_rowdata[col_B] = B.at(row_B,col_B); } for(u32 col_A=0; col_A < A_n_cols; ++col_A) { const eT* A_coldata = A.colptr(col_A); eT acc = eT(0); for(u32 i=0; i < A_n_rows; ++i) { acc += B_rowdata[i] * A_coldata[i]; } if( (use_alpha == false) && (use_beta == false) ) { C.at(col_A,row_B) = acc; } else if( (use_alpha == true) && (use_beta == false) ) { C.at(col_A,row_B) = alpha * acc; } else if( (use_alpha == false) && (use_beta == true) ) { C.at(col_A,row_B) = acc + beta*C.at(col_A,row_B); } else if( (use_alpha == true) && (use_beta == true) ) { C.at(col_A,row_B) = alpha*acc + beta*C.at(col_A,row_B); } } } } }