// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include #include #include #include #include #include namespace paddle_infer { class Predictor; } namespace PaddleOCR { class Classifier { public: explicit Classifier(const std::string &model_dir, const bool &use_gpu, const int &gpu_id, const int &gpu_mem, const int &cpu_math_library_num_threads, const bool &use_mkldnn, const double &cls_thresh, const bool &use_tensorrt, const std::string &precision, const int &cls_batch_num) noexcept { this->use_gpu_ = use_gpu; this->gpu_id_ = gpu_id; this->gpu_mem_ = gpu_mem; this->cpu_math_library_num_threads_ = cpu_math_library_num_threads; this->use_mkldnn_ = use_mkldnn; this->cls_thresh = cls_thresh; this->use_tensorrt_ = use_tensorrt; this->precision_ = precision; this->cls_batch_num_ = cls_batch_num; std::string yaml_file_path = model_dir + "/inference.yml"; std::ifstream yaml_file(yaml_file_path); if (yaml_file.is_open()) { std::string model_name; try { YAML::Node config = YAML::LoadFile(yaml_file_path); if (config["Global"] && config["Global"]["model_name"]) { model_name = config["Global"]["model_name"].as(); } if (!model_name.empty()) { std::cerr << "Error: " << model_name << " is currently not supported." << std::endl; std::exit(EXIT_FAILURE); } } catch (const YAML::Exception &e) { std::cerr << "Failed to load YAML file: " << e.what() << std::endl; } } LoadModel(model_dir); } double cls_thresh = 0.9; // Load Paddle inference model void LoadModel(const std::string &model_dir) noexcept; void Run(const std::vector &img_list, std::vector &cls_labels, std::vector &cls_scores, std::vector ×) noexcept; private: std::shared_ptr predictor_; bool use_gpu_ = false; int gpu_id_ = 0; int gpu_mem_ = 4000; int cpu_math_library_num_threads_ = 4; bool use_mkldnn_ = false; std::vector mean_ = {0.5f, 0.5f, 0.5f}; std::vector scale_ = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f}; bool is_scale_ = true; bool use_tensorrt_ = false; std::string precision_ = "fp32"; int cls_batch_num_ = 1; // pre-process ClsResizeImg resize_op_; Normalize normalize_op_; PermuteBatch permute_op_; }; // class Classifier } // namespace PaddleOCR