29 #include "tiny_dnn/core/params/fully_params.h"
35 fully_connected_op_nnpack(
const tensor_t& in_data,
39 const fully_params& params,
40 const bool layer_parallelize) {
42 const float* kernel_ptr = W.data();
43 const float* input_ptr = in_data[0].data();
44 float* output_ptr =out_data[0].data();
47 const size_t num_mkl_threads = 1;
48 pthreadpool_t threadpool = pthreadpool_create(num_mkl_threads);
51 nnp_fully_connected_inference(
59 if (status != nnp_status_success) {
60 throw nn_error(
"Could not succeed with nnp_max_pooling_output");
64 pthreadpool_destroy(threadpool);
67 output_ptr =out_data[0].data();
68 if (params.has_bias_) {
69 for_i(layer_parallelize, params.out_size_, [&](
int i) {
72 output_ptr[i] += bias[i];
76 throw nn_error(
"TinyDNN has not been compiled with NNPACK support.");