Move stackvm

pull/605/head
sunnycase 2022-04-26 19:55:31 +08:00
parent 082213e6ac
commit cd1c64824c
657 changed files with 7585 additions and 49107 deletions

View File

@ -1,8 +1,7 @@
---
BasedOnStyle: WebKit
BreakBeforeBraces: Allman
ConstructorInitializerAllOnOneLineOrOnePerLine: 'true'
BasedOnStyle: LLVM
ConstructorInitializerAllOnOneLineOrOnePerLine: true
UseTab: Never
PointerAlignment: Right
...
AllowShortLambdasOnASingleLine: All
IndentWidth: 4

View File

@ -69,8 +69,8 @@ include(${CMAKE_BINARY_DIR}/conan_paths.cmake)
include(cmake/dependencies.cmake)
if (BUILDING_RUNTIME)
set(NNCASE_MAIN_INCLUDE_DIR ${CMAKE_CURRENT_LIST_DIR}/include)
set(NNCASE_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/include)
set(NNCASE_MAIN_INCLUDE_DIR ${CMAKE_CURRENT_LIST_DIR}/src/Native/include)
set(NNCASE_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/src/Native/include)
set(THIRD_PARTY ${CMAKE_CURRENT_LIST_DIR}/third_party)
set_property(GLOBAL PROPERTY POSITION_INDEPENDENT_CODE ON)
@ -91,10 +91,10 @@ if (BUILDING_RUNTIME)
include_directories(${NNCASE_MAIN_INCLUDE_DIR})
include_directories(${NNCASE_INCLUDE_DIR})
add_subdirectory(include/nncase)
add_subdirectory(src/kernels)
add_subdirectory(src/runtime)
add_subdirectory(src/functional)
add_subdirectory(src/Native/include/nncase)
add_subdirectory(src/Native/src/kernels)
add_subdirectory(src/Native/src/runtime)
add_subdirectory(src/Native/src/functional)
if(BUILD_BENCHMARK)
add_subdirectory(benchmark)
endif()
@ -112,7 +112,7 @@ if (BUILDING_RUNTIME)
PATTERN "CMakeFiles" EXCLUDE
)
install(DIRECTORY include/nncase/kernels
install(DIRECTORY src/Native/include/nncase/kernels
DESTINATION include/nncase
COMPONENT nncase-headers
FILES_MATCHING
@ -124,7 +124,7 @@ if (BUILDING_RUNTIME)
PATTERN "LICENSE.TXT"
)
install(DIRECTORY include/nncase/runtime
install(DIRECTORY src/Native/include/nncase/runtime
DESTINATION include/nncase
COMPONENT nncase-headers
FILES_MATCHING
@ -138,8 +138,8 @@ if (BUILDING_RUNTIME)
else()
set(CMAKE_SKIP_RPATH OFF)
set(NNCASE_MAIN_INCLUDE_DIR ${CMAKE_CURRENT_LIST_DIR}/include)
set(NNCASE_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/include)
set(NNCASE_MAIN_INCLUDE_DIR ${CMAKE_CURRENT_LIST_DIR}/src/Native/include)
set(NNCASE_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/src/Native/include)
set(THIRD_PARTY ${CMAKE_CURRENT_LIST_DIR}/third_party)
set_property(GLOBAL PROPERTY POSITION_INDEPENDENT_CODE ON)
if (APPLE)
@ -170,21 +170,8 @@ else()
include_directories(${NNCASE_MAIN_INCLUDE_DIR})
include_directories(${NNCASE_INCLUDE_DIR})
add_subdirectory(include/nncase)
add_subdirectory(src/nncase)
add_subdirectory(src/data)
add_subdirectory(src/ir)
add_subdirectory(src/importer)
add_subdirectory(src/schedule)
add_subdirectory(src/evaluator)
add_subdirectory(src/functional)
add_subdirectory(src/transforms)
add_subdirectory(src/codegen)
add_subdirectory(src/kernels)
add_subdirectory(src/runtime)
add_subdirectory(src/targets)
add_subdirectory(src/plugin)
add_subdirectory(src/cli)
add_subdirectory(src/Native/include/nncase)
add_subdirectory(src/Native/src)
if(BUILD_TESTING)
add_subdirectory(tests/kernels)
@ -192,18 +179,18 @@ else()
# Python binding
if(BUILD_PYTHON_BINDING)
add_subdirectory(python/nncase/native)
add_subdirectory(python/nncaseruntime/native)
endif()
# Csharp binding
if(BUILD_CSHARP_BINDING)
add_subdirectory(csharp)
#add_subdirectory(csharp)
endif()
# Thrid party
add_subdirectory(third_party/onnx)
# add_subdirectory(third_party/onnx)
install(DIRECTORY include/nncase
install(DIRECTORY src/Native/include/nncase
DESTINATION include
COMPONENT nncase-headers
FILES_MATCHING
@ -236,11 +223,11 @@ else()
)
# Targets
add_subdirectory(targets/cpu)
add_subdirectory(targets/k210)
add_subdirectory(targets/vulkan)
#add_subdirectory(targets/cpu)
#add_subdirectory(targets/k210)
#add_subdirectory(targets/vulkan)
endif()
# Modules
add_subdirectory(modules/k210)
add_subdirectory(modules/vulkan)
#add_subdirectory(modules/k210)
#add_subdirectory(modules/vulkan)

View File

@ -9,18 +9,13 @@ if ((NOT BUILDING_RUNTIME) OR ENABLE_VULKAN_RUNTIME)
endif ()
if (NOT BUILDING_RUNTIME)
find_package(Flatbuffers REQUIRED)
find_package(libzip REQUIRED)
if(NOT CONAN_EXPORTED)
set(FLATBUFFERS_FLATC_EXECUTABLE ${flatbuffers_LIB_DIRS}/../bin/flatc)
set(LIBZIP_ZIPTOOL_EXECUTABLE ${libzip_zip_LIB_DIRS}/../bin/ziptool)
endif()
find_package(fmt REQUIRED)
find_package(lyra REQUIRED)
find_package(magic_enum REQUIRED)
find_package(nlohmann_json REQUIRED)
find_package(OpenCV REQUIRED)
find_package(Protobuf REQUIRED)
find_package(xtensor REQUIRED)
find_package(spdlog REQUIRED)
find_package(libzippp REQUIRED)

View File

@ -51,13 +51,9 @@ class nncaseConan(ConanFile):
self.requires('pybind11/2.6.1')
if not self.options.runtime:
self.requires('flatbuffers/2.0.0')
self.requires('fmt/7.1.3')
self.requires('lyra/1.5.0')
self.requires('magic_enum/0.7.0')
self.requires('nlohmann_json/3.9.1')
self.requires('opencv/4.5.1')
self.requires('protobuf/3.17.1')
self.requires('xtensor/0.21.5')
self.requires('spdlog/1.8.2')
self.requires('libzippp/4.0')
@ -82,19 +78,11 @@ class nncaseConan(ConanFile):
self.options.halide = False
if not self.options.runtime:
self.options["opencv"].contrib = False
self.options["opencv"].with_webp = False
self.options["opencv"].with_openexr = False
self.options["opencv"].with_eigen = False
self.options["opencv"].with_quirc = False
self.options["opencv"].dnn = False
self.options["flatbuffers"].options_from_context = False
self.options["xtensor"].xsimd = False
self.options["libzip"].with_bzip2 = False
self.options["libzip"].with_zstd = False
self.options["libzip"].crypto = False
if self.settings.os == 'Linux':
self.options["opencv"].with_gtk = False
self.options["spirv-tools"].link_libcpp = False
self.options["shaderc"].link_libcpp = False

View File

@ -3,7 +3,7 @@ cmake_minimum_required (VERSION 3.18)
set(SRCS interpreter.cpp)
add_library(nncase_csharp SHARED ${SRCS})
target_link_libraries(nncase_csharp PRIVATE nncase)
target_link_libraries(nncase_csharp PRIVATE simulator)
target_include_directories(nncase_csharp PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
install(TARGETS nncase_csharp
COMPONENT nncase-runtime

View File

@ -1,86 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert>
#include <iostream>
#include <span>
namespace nncase
{
class binary_writer
{
public:
binary_writer(std::ostream &stream)
: stream_(stream), relative_offset_(0)
{
}
template <class T>
void write(T &&value)
{
stream_.write(reinterpret_cast<const char *>(&value), sizeof(value));
relative_offset_ += sizeof(value);
}
template <class T>
void write_array(std::span<T const> value)
{
stream_.write(reinterpret_cast<const char *>(value.data()), value.size_bytes());
relative_offset_ += value.size_bytes();
}
std::streampos position() const
{
assert(stream_);
return stream_.tellp();
}
void position(std::streampos pos)
{
auto old_pos = position();
stream_.seekp(pos);
assert(stream_);
relative_offset_ += pos - old_pos;
}
void skip(size_t len)
{
char zero = 0;
for (size_t i = 0; i < len; i++)
stream_.write(&zero, 1);
relative_offset_ += len;
}
std::streamoff align_position(size_t alignment)
{
auto pos = position();
auto rem = pos % alignment;
if (rem != 0)
{
auto off = std::streamoff(alignment - rem);
skip(off);
return off;
}
return 0;
}
int64_t relative_offset() const noexcept { return relative_offset_; }
private:
std::ostream &stream_;
int64_t relative_offset_;
};
}

View File

@ -1,62 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <iostream>
#include <nncase/codegen/binary_writer.h>
#include <nncase/ir/node.h>
#include <nncase/runtime/datatypes.h>
namespace nncase::codegen
{
class section_writer;
struct symbol_ref
{
std::string name;
std::streampos streampos;
size_t bitoffset;
size_t length;
};
struct symbol
{
std::string name;
std::streampos streampos;
};
class section_writer : public binary_writer
{
public:
using binary_writer::binary_writer;
NNCASE_API std::span<const symbol> symbols() const noexcept { return symbols_; }
NNCASE_API std::span<const symbol_ref> symbol_refs() const noexcept { return symbol_refs_; }
NNCASE_API void add_symbol_ref(size_t offset, size_t length, std::string_view name)
{
symbol_refs_.emplace_back(symbol_ref { std::string(name), position(), offset, length });
}
NNCASE_API void add_symbol(std::string_view name)
{
auto pos = position();
symbols_.emplace_back(symbol { std::string(name), pos });
}
private:
std::vector<symbol_ref> symbol_refs_;
std::vector<symbol> symbols_;
};
}

View File

@ -1,43 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "module_builder.h"
namespace nncase::codegen
{
struct build_model_result
{
size_t model_size;
};
class NNCASE_API model_builder
{
public:
model_builder(target &target, const schedule::model_schedule_result &sched);
model_builder(model_builder &) = delete;
model_builder(model_builder &&) = delete;
void config_dump(const std::filesystem::path &dump_dir, bool dump_asm);
build_model_result build(std::ostream &output);
size_t max_usage(memory_location_t location) const;
private:
target &target_;
const schedule::model_schedule_result &sched_;
std::filesystem::path dump_dir_;
bool dump_asm_;
};
}

View File

@ -1,134 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "codegen_types.h"
#include <filesystem>
#include <functional>
#include <iostream>
#include <map>
#include <nncase/ir/graph.h>
#include <nncase/runtime/datatypes.h>
#include <nncase/schedule/scheduler.h>
#include <sstream>
#include <unordered_set>
namespace nncase::codegen
{
class NNCASE_API section_decompiler
{
public:
virtual ~section_decompiler() = default;
virtual void decompile(std::span<const uint8_t> input, std::span<const symbol> symbols, std::ostream &output) = 0;
};
struct module_builder_params
{
const schedule::model_schedule_result &model_sched;
const schedule::module_schedule_result &module_sched;
};
struct function_call_id
{
size_t module_id;
size_t function_id;
};
class NNCASE_API module_builder
{
private:
struct section
{
std::stringstream stream;
section_writer writer;
std::vector<uint8_t> body;
section(std::in_place_t = std::in_place)
: writer(stream)
{
}
};
struct rdata_merge_info
{
size_t start;
size_t size;
rdata_merge_info(std::in_place_t = std::in_place)
{
}
};
public:
module_builder(uint32_t alignment, std::string_view module_name, const module_builder_params &params);
module_builder(module_builder &) = delete;
module_builder(module_builder &&) = delete;
virtual ~module_builder() = default;
uint32_t alignment() const noexcept { return alignment_; }
void config_dump(const std::filesystem::path &dump_dir, bool dump_asm);
void build(binary_writer &writer);
const schedule::buffer_allocation &allocation(ir::output_connector &conn) const;
const schedule::buffer_allocation &allocation(ir::input_connector &conn) const { return allocation(*conn.connection()); }
size_t max_usage(memory_location_t location) const;
section_writer &writer(std::string_view section_name);
virtual module_type_t module_type() const noexcept = 0;
virtual uint32_t module_version() const noexcept = 0;
virtual std::unique_ptr<section_decompiler> create_decompiler(std::string_view section_name);
protected:
section *find_section(std::string_view section_name);
void merge_to_rdata_section(std::string_view from);
function_call_id function_id(ir::graph *graph);
void set_current_entry_point(std::streampos pos);
void set_current_function_text_end(std::streampos pos);
virtual void begin_emit_module();
virtual void begin_emit_function(const schedule::function_schedule_result &function);
virtual void end_emit_function(const schedule::function_schedule_result &function);
virtual void emit(ir::node &node);
virtual void end_emit_module();
protected:
std::filesystem::path dump_dir_;
bool dump_asm_;
private:
std::vector<nncase::ir::node *> generate_current_runtime_ops();
void compile();
void decompile(std::string_view stage, std::string_view section_name, std::span<const uint8_t> input, std::span<const symbol> symbols);
void write_constants();
void generate_merge_info();
void generate_symbol_offsets();
void write_symbol_refs();
void link();
void write_binary(binary_writer &writer);
void write_function_binary(binary_writer &writer, const schedule::function_schedule_result &function_sched);
private:
uint32_t alignment_;
std::string module_name_;
const module_builder_params &params_;
std::map<std::string, section, std::less<>> section_writer_;
std::map<std::string, rdata_merge_info, std::less<>> rdata_section_merges_;
std::unordered_map<std::string_view, std::pair<size_t, std::string_view>> symbol_offsets_;
const schedule::function_schedule_result *current_function_;
std::unordered_map<const schedule::function_schedule_result *, std::streampos> entry_points_;
std::unordered_map<const schedule::function_schedule_result *, std::streampos> function_text_end_;
};
}

View File

@ -1,76 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "binary_writer.h"
#include <nncase/runtime/compiler_defs.h>
#include <nncase/runtime/nnil.h>
namespace nncase::codegen
{
class NNCASE_API nnil_builder
{
public:
nnil_builder(binary_writer &writer)
: writer_(writer) { }
void emit_nop() { emit_opcode(runtime::nnil_nop); }
void emit_dup() { emit_opcode(runtime::nnil_dup); }
void emit_pop() { emit_opcode(runtime::nnil_pop); }
void emit_lda_0() { emit_opcode(runtime::nnil_lda_0); }
void emit_ldc_r4_0() { emit_opcode(runtime::nnil_ldc_r4_0); }
void emit_ldc_r4_1() { emit_opcode(runtime::nnil_ldc_r4_1); }
void emit_ldc_r4(float value)
{
emit_opcode(runtime::nnil_ldc_r4);
writer_.write(runtime::nnil_ldc_r4_t { value });
}
void emit_abs() { emit_opcode(runtime::nnil_abs); }
void emit_acos() { emit_opcode(runtime::nnil_acos); }
void emit_asin() { emit_opcode(runtime::nnil_asin); }
void emit_ceil() { emit_opcode(runtime::nnil_ceil); }
void emit_cos() { emit_opcode(runtime::nnil_cos); }
void emit_exp() { emit_opcode(runtime::nnil_exp); }
void emit_floor() { emit_opcode(runtime::nnil_floor); }
void emit_log() { emit_opcode(runtime::nnil_log); }
void emit_neg() { emit_opcode(runtime::nnil_neg); }
void emit_rsqrt() { emit_opcode(runtime::nnil_rsqrt); }
void emit_sign() { emit_opcode(runtime::nnil_sign); }
void emit_sin() { emit_opcode(runtime::nnil_sin); }
void emit_sqrt() { emit_opcode(runtime::nnil_sqrt); }
void emit_square() { emit_opcode(runtime::nnil_square); }
void emit_tanh() { emit_opcode(runtime::nnil_tanh); }
void emit_bitwise_not() { emit_opcode(runtime::nnil_bitwise_not); }
void emit_logical_not() { emit_opcode(runtime::nnil_logical_not); }
void emit_round() { emit_opcode(runtime::nnil_round); }
void emit_add() { emit_opcode(runtime::nnil_add); }
void emit_sub() { emit_opcode(runtime::nnil_sub); }
void emit_mul() { emit_opcode(runtime::nnil_mul); }
void emit_div() { emit_opcode(runtime::nnil_div); }
void emit_min() { emit_opcode(runtime::nnil_min); }
void emit_max() { emit_opcode(runtime::nnil_max); }
void emit_pow() { emit_opcode(runtime::nnil_pow); }
void emit_clamp() { emit_opcode(runtime::nnil_clamp); }
void emit_ret() { emit_opcode(runtime::nnil_ret); }
private:
void emit_opcode(runtime::nnil_opcode_t opcode) { writer_.write((uint8_t)opcode); }
private:
binary_writer &writer_;
};
}

View File

@ -1,21 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../module_builder.h"
namespace nncase::codegen
{
NNCASE_API std::unique_ptr<module_builder> create_stackvm_module_builder(std::string_view module_name, const module_builder_params &params);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,120 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "plugin_loader.h"
#include <cstddef>
#include <cstdint>
#include <filesystem>
#include <iostream>
#include <memory>
#include <span>
#include <unordered_map>
#include <vector>
namespace nncase::ir
{
class graph;
}
namespace nncase
{
class target;
struct compile_options
{
bool dump_ir;
bool dump_asm;
bool dump_quant_error;
bool dump_import_op_range;
bool is_fpga;
bool use_dataset_as_input_stat = false;
bool benchmark_only = false;
bool preprocess = false;
bool swapRB = false;
std::string target;
std::filesystem::path dump_dir;
std::string input_type = "default";
std::string output_type = "float32";
std::string quant_type = "uint8";
std::vector<float> mean { 0.f, 0.f, 0.f };
std::vector<float> std { 1.f, 1.f, 1.f };
std::vector<float> input_range { 0.f, 1.f };
float letterbox_value = 0.f;
std::vector<int32_t> input_shape {};
std::string w_quant_type = "uint8";
bool use_mse_quant_w = false;
std::string input_layout = "NCHW";
std::string output_layout = "NCHW";
};
struct import_options
{
std::span<const std::string> output_arrays;
};
struct ptq_options_base
{
std::string calibrate_method = "no_clip";
std::function<void(size_t cnt, size_t total)> progress;
};
struct ptq_dataset_options : ptq_options_base
{
std::filesystem::path dataset;
std::string dataset_format;
};
struct ptq_tensor_options : ptq_options_base
{
std::vector<uint8_t> tensor_data;
size_t samples_count;
};
struct dump_range_options_base
{
std::string calibrate_method = "no_clip";
std::function<void(size_t cnt, size_t total)> progress;
};
struct dump_range_dataset_options : dump_range_options_base
{
std::filesystem::path dataset;
std::string dataset_format;
};
struct dump_range_tensor_options : dump_range_options_base
{
std::vector<uint8_t> tensor_data;
size_t samples_count;
};
class NNCASE_API compiler
{
public:
static std::unique_ptr<compiler> create(const compile_options &options);
virtual ~compiler();
virtual void import_tflite(std::span<const uint8_t> model, const import_options &options) = 0;
virtual void import_onnx(std::span<const uint8_t> model, const import_options &options) = 0;
virtual void import_caffe(std::span<const uint8_t> model, std::span<const uint8_t> prototxt) = 0;
virtual void use_ptq(ptq_dataset_options options) = 0;
virtual void use_ptq(ptq_tensor_options options) = 0;
virtual void dump_range_options(dump_range_dataset_options options) = 0;
virtual void dump_range_options(dump_range_tensor_options options) = 0;
virtual ir::graph &graph(uint32_t stage) = 0;
virtual nncase::target &target() = 0;
virtual void compile() = 0;
virtual void gencode(std::ostream &output) = 0;
};
}

View File

@ -1,166 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <filesystem>
#include <functional>
#include <nncase/io_utils.h>
#include <nncase/runtime/datatypes.h>
#include <nncase/runtime/host_runtime_tensor.h>
#include <nncase/runtime/runtime_tensor.h>
#include <optional>
#include <span>
#include <string>
#include <string_view>
#include <xtensor/xarray.hpp>
#include <xtensor/xshape.hpp>
namespace nncase::data
{
struct data_batch
{
std::vector<uint8_t> tensor;
std::span<const std::filesystem::path> filenames;
};
class NNCASE_API dataset
{
public:
class iterator
{
public:
using iterator_category = std::forward_iterator_tag;
using value_type = data_batch;
using pointer = data_batch *;
using reference = data_batch &;
bool operator==(const iterator &rhs) const noexcept { return from_ == rhs.from_; }
bool operator!=(const iterator &rhs) const noexcept { return from_ != rhs.from_; }
iterator &operator++()
{
value_ = dataset_->batch(from_ + dataset_->batch_size());
if (value_)
from_ += dataset_->batch_size();
else
*this = dataset_->end();
return *this;
}
iterator &operator=(const iterator &rhs)
{
value_ = rhs.value_;
from_ = rhs.from_;
return *this;
}
data_batch &operator*()
{
if (value_)
return *value_;
throw std::runtime_error("Invalid datast iterator");
}
data_batch *operator->()
{
if (value_)
return &value_.value();
throw std::runtime_error("Invalid datast iterator");
}
private:
friend class dataset;
iterator(dataset &dataset, size_t from)
: dataset_(&dataset), from_(from), value_(dataset.batch(from))
{
}
dataset *dataset_;
size_t from_;
std::optional<data_batch> value_;
};
dataset(const std::filesystem::path &path, std::function<bool(const std::filesystem::path &)> file_filter, xt::dynamic_shape<size_t> input_shape, std::string input_layout);
virtual ~dataset() = default;
iterator begin()
{
return { *this, 0 };
}
iterator end()
{
return { *this, filenames_.size() };
}
size_t batch_size() const noexcept { return 1; }
size_t total_size() const noexcept { return filenames_.size(); }
protected:
virtual void process(const std::vector<uint8_t> &src, float *dest, const xt::dynamic_shape<size_t> &shape, std::string layout) = 0;
virtual void process(const std::vector<uint8_t> &src, uint8_t *dest, const xt::dynamic_shape<size_t> &shape, std::string layout) = 0;
virtual void process(const std::vector<uint8_t> &src, int8_t *dest, const xt::dynamic_shape<size_t> &shape, std::string layout) = 0;
virtual bool do_normalize() const noexcept { return true; }
private:
std::optional<data_batch> batch(size_t from)
{
if (from + batch_size() <= filenames_.size())
{
size_t start = from;
auto file = read_file(filenames_[from++]);
// NOTE not support process
// process(file, batch.data(), batch.shape(), input_layout_);
std::span<const std::filesystem::path> filenames(filenames_.data() + start, filenames_.data() + from);
return data_batch { file, filenames };
}
return {};
}
private:
std::vector<std::filesystem::path> filenames_;
xt::dynamic_shape<size_t> input_shape_;
std::string input_layout_;
};
class NNCASE_API image_dataset : public dataset
{
public:
image_dataset(const std::filesystem::path &path, xt::dynamic_shape<size_t> input_shape, std::string input_layout);
protected:
void process(const std::vector<uint8_t> &src, float *dest, const xt::dynamic_shape<size_t> &shape, std::string layout) override;
void process(const std::vector<uint8_t> &src, uint8_t *dest, const xt::dynamic_shape<size_t> &shape, std::string layout) override;
void process(const std::vector<uint8_t> &src, int8_t *dest, const xt::dynamic_shape<size_t> &shape, std::string layout) override;
};
class NNCASE_API raw_dataset : public dataset
{
public:
raw_dataset(const std::filesystem::path &path, xt::dynamic_shape<size_t> input_shape);
protected:
void process(const std::vector<uint8_t> &src, float *dest, const xt::dynamic_shape<size_t> &shape, std::string layout) override;
void process(const std::vector<uint8_t> &src, uint8_t *dest, const xt::dynamic_shape<size_t> &shape, std::string layout) override;
void process(const std::vector<uint8_t> &src, int8_t *dest, const xt::dynamic_shape<size_t> &shape, std::string layout) override;
bool do_normalize() const noexcept override { return false; }
};
}

View File

@ -1,32 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <filesystem>
#include <nncase/ir/graph.h>
#include <span>
#include <unordered_map>
#include <vector>
namespace nncase::importer
{
struct import_options
{
std::span<const std::string> output_arrays;
};
void import_tflite(ir::graph &graph, std::span<const uint8_t> model, const import_options &options, std::string &real_inlayout, std::string &real_outlayout);
void import_onnx(ir::graph &graph, std::span<const uint8_t> model, const import_options &options, std::string &real_inlayout, std::string &real_outlayout);
void import_caffe(ir::graph &graph, std::span<const uint8_t> model, std::span<const uint8_t> prototxt, std::string &real_inlayout, std::string &real_outlayout);
}

View File

@ -1,103 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <nncase/ir/debug.h>
#include <nncase/ir/ir_types.h>
#include <nncase/ir/ops/convert.h>
#include <nncase/runtime/debug.h>
namespace nncase::importer
{
using ir::shape_t;
template <class Id>
void link_input_tensor_by_id(std::unordered_map<ir::input_connector *, Id> &input_tensors, ir::input_connector *conn, const Id &in_id,
datatype_t in_type, const shape_t &in_shape, const std::string &tensor_name)
{
input_tensors.emplace(conn, in_id);
if (in_type != conn->type())
{
throw std::runtime_error(
"Type must be same: \n"
+ conn->owner().name() + "[" + std::string(conn->owner().runtime_opcode().name) + "] != "
+ tensor_name + "[input]"
+ "\n has type mismatch: \n["
+ std::string(datatype_names(conn->type())) + "] != ["
+ std::string(datatype_names(in_type)) + "]");
}
if (in_shape != conn->shape())
{
throw std::runtime_error(
"Shape must be same: \n"
+ conn->owner().name() + "[" + std::string(conn->owner().runtime_opcode().name) + "] != "
+ tensor_name + "[output]"
+ "\n has shape mismatch: \n"
+ ir::to_string(conn->shape()) + " != "
+ ir::to_string(in_shape) + "");
}
}
template <class Id>
void link_output_tensor_by_id(std::unordered_map<Id, ir::output_connector *> &output_tensors, const Id &out_id, ir::output_connector *conn,
datatype_t out_type, const shape_t &out_shape, const std::string &tensor_name)
{
output_tensors.emplace(out_id, conn);
if (out_type != conn->type())
{
throw std::runtime_error(
"Type must be same: \n"
+ conn->owner().name() + "[" + std::string(conn->owner().runtime_opcode().name) + "] != "
+ tensor_name + "[output]"
+ "\n has type mismatch: \n["
+ std::string(datatype_names(conn->type())) + "] != ["
+ std::string(datatype_names(out_type)) + "]");
}
if (out_shape != conn->shape())
{
throw std::runtime_error(
"Shape must be same: \n"
+ conn->owner().name() + "[" + std::string(conn->owner().runtime_opcode().name) + "] != "
+ tensor_name + "[output]"
+ "\n has shape mismatch: \n"
+ ir::to_string(conn->shape()) + " != "
+ ir::to_string(out_shape) + "");
}
}
template <class T = int32_t>
T get_positive(int32_t v, size_t length)
{
return static_cast<T>(v < 0 ? v + length : v);
}
// place new node before exist node
// Node output -> NextNode input
template <class Node, class... Args>
Node *add_prev_node(ir::graph &graph, ir::input_connector &next_input, Args &&...args)
{
auto node = graph.emplace<Node>(std::forward<Args>(args)...);
next_input.connect(node->output());
return node;
}
template <class Node, class... Args>
Node *add_next_node(ir::graph &graph, ir::output_connector &prev_output, Args &&...args)
{
auto node = graph.emplace<Node>(std::forward<Args>(args)...);
node->input().connect(prev_output);
return node;
}
}

View File

@ -1,91 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "ir_types.h"
#include <optional>
#include <span>
#include <string>
#include <vector>
#include <xtensor/xshape.hpp>
namespace nncase::ir
{
class node;
class output_connector;
class NNCASE_API base_connector
{
public:
template <class TName, class TShape>
base_connector(node &owner, TName &&name, datatype_t type, TShape &&shape)
: owner_(owner), name_(std::forward<TName>(name)), type_(type), shape_(std::forward<TShape>(shape))
{
}
base_connector(base_connector &) = delete;
base_connector(base_connector &&) = default;
node &owner() const noexcept { return owner_; }
const std::string &name() const noexcept { return name_; }
datatype_t type() const noexcept { return type_; }
const shape_t &shape() const noexcept { return shape_; }
connector_attributes attributes() const noexcept { return attributes_; }
void attributes(connector_attributes value) noexcept { attributes_ = value; }
private:
node &owner_;
std::string name_;
datatype_t type_;
shape_t shape_;
connector_attributes attributes_ = cnctr_attr_none;
};
class NNCASE_API input_connector : public base_connector
{
public:
using base_connector::base_connector;
output_connector *connection() const noexcept { return connection_; }
void connect(output_connector &connector);
void clear_connection();
private:
output_connector *connection_ = nullptr;
};
class NNCASE_API output_connector : public base_connector
{
public:
template <class TName, class TShape>
output_connector(node &owner, TName &&name, datatype_t type, TShape &&shape, memory_location_t memory_location = mem_data)
: base_connector(owner, std::forward<TName>(name), type, std::forward<TShape>(shape)), memory_location_(memory_location)
{
}
std::span<input_connector *const> connections() const noexcept { return connections_; }
void connect(input_connector &connector);
void disconnect(input_connector &connector);
void clear_connections();
// connector_attributes attributes() const noexcept { return attributes_; }
// void attributes(connector_attributes value) noexcept { attributes_ = value; }
memory_location_t memory_location() const noexcept { return memory_location_; }
void memory_location(memory_location_t value) noexcept { memory_location_ = value; }
private:
std::vector<input_connector *> connections_;
// connector_attributes attributes_ = cnctr_attr_none;
memory_location_t memory_location_;
};
}

View File

@ -1,100 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "graph.h"
#include "ir_types.h"
#include <filesystem>
#include <map>
#include <string>
namespace nncase
{
inline std::string to_string(const padding &value)
{
return "{" + std::to_string(value.before) + ", " + std::to_string(value.after) + "}";
}
inline std::string to_string(const quant_param_t &value)
{
return "(q - " + std::to_string(value.zero_point) + ") * " + std::to_string(value.scale);
}
inline std::string to_string(memory_location_t location)
{
switch (location)
{
case mem_input:
return "input";
case mem_output:
return "output";
case mem_rdata:
return "rdata";
case mem_data:
return "data";
default:
return "unknown";
}
}
template <typename Tv, typename T>
static size_t index_of(const Tv &v, const T &e)
{
for (size_t i = 0; i < v.size(); i++)
{
if (&v[i] == &e)
{
return i;
}
}
return SIZE_MAX;
}
namespace ir
{
inline std::string to_string(const shape_t &shape)
{
std::string str { '[' };
for (size_t i = 0; i < shape.size(); i++)
{
if (i != 0)
{
str.append(",");
}
str.append(std::to_string(shape[i]));
}
str += ']';
return str;
}
inline std::string to_string(const axis_t &axis)
{
std::string str { '[' };
for (size_t i = 0; i < axis.size(); i++)
{
if (i != 0)
{
str.append(",");
}
str.append(std::to_string(axis[i]));
}
str += ']';
return str;
}
NNCASE_API void dump_graph(const ir::graph &src_graph, const std::filesystem::path &dst_path);
}
}

View File

@ -1,145 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "evaluate_types.h"
#include "quantizer.h"
#include <nncase/schedule/schedule_types.h>
namespace nncase
{
class target;
}
namespace nncase::ir
{
enum class eval_step
{
after_import,
after_calib,
after_quant
};
class module_evaluate_context;
class model_evaluate_context;
class NNCASE_API function_evaluate_context
{
public:
function_evaluate_context(const schedule::function_schedule_result &sched, module_evaluate_context &mod_eval);
function_evaluate_context(const function_evaluate_context &) = delete;
function_evaluate_context(function_evaluate_context &&) = default;
evaluate_tensor memory_at(const output_connector &conn);
evaluate_tensor memory_at(const input_connector &conn)
{
return memory_at(*conn.connection());
}
evaluate_tensor input_at(size_t index)
{
return memory_at(*inputs_[index]);
}
evaluate_tensor output_at(size_t index)
{
return memory_at(*outputs_[index]);
}
module_evaluate_context &module() const noexcept { return mod_eval_; }
void evaluate(eval_step step, size_t stage, bool record_output_buffers);
private:
const schedule::function_schedule_result &sched_;
module_evaluate_context &mod_eval_;
std::unique_ptr<std::byte[]> input_pool_;
std::unique_ptr<std::byte[]> output_pool_;
std::vector<output_connector *> inputs_;
std::vector<input_connector *> outputs_;
};
class NNCASE_API module_evaluate_context
{
public:
module_evaluate_context(const schedule::module_schedule_result &sched, model_evaluate_context &model_eval);
module_evaluate_context(module_evaluate_context &) = delete;
module_evaluate_context(module_evaluate_context &&) = default;
const schedule::module_schedule_result &sched() const noexcept { return sched_; }
std::byte *memory_pool(memory_location_t location) const;
ir::quantizer *quantizer() noexcept { return quantizer_.get(); }
function_evaluate_context &function(ir::graph &function);
model_evaluate_context &model() const noexcept { return model_eval_; }
void enable_ptq(target &target, ir::calibrate_method calib_method);
void begin_collect_distribution();
void end_sample();
void end_collect_distribution(const std::function<void(size_t cnt, size_t total)> &progress);
private:
const schedule::module_schedule_result &sched_;
model_evaluate_context &model_eval_;
std::unordered_map<memory_location_t, std::unique_ptr<std::byte[]>> memory_pools_;
std::vector<output_connector *> inputs_;
std::vector<input_connector *> outputs_;
std::unique_ptr<ir::quantizer> quantizer_;
std::unordered_map<ir::graph *, function_evaluate_context> functions_;
};
class NNCASE_API model_evaluate_context
{
public:
model_evaluate_context(const schedule::model_schedule_result &sched);
model_evaluate_context(const model_evaluate_context &) = delete;
model_evaluate_context(model_evaluate_context &&) = default;
function_evaluate_context &entrypoint();
module_evaluate_context &module(const module_type_t &module_type);
evaluate_tensor memory_at(const output_connector &conn)
{
return entrypoint().memory_at(conn);
}
evaluate_tensor memory_at(const input_connector &conn)
{
return memory_at(*conn.connection());
}
evaluate_tensor input_at(size_t index)
{
return entrypoint().input_at(index);
}
evaluate_tensor output_at(size_t index)
{
return entrypoint().output_at(index);
}
void enable_ptq(nncase::target &target, ir::calibrate_method calib_method);
void begin_collect_distribution();
void end_sample();
void end_collect_distribution(const std::function<void(size_t cnt, size_t total)> &progress);
void evaluate(eval_step step, size_t stage, bool record_output_buffers);
private:
const schedule::model_schedule_result &sched_;
std::unordered_map<module_type_t, module_evaluate_context> module_ctxs_;
};
}

View File

@ -1,36 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <nncase/runtime/datatypes.h>
namespace nncase::ir
{
class NNCASE_API evaluate_tensor
{
public:
evaluate_tensor(datatype_t datatype, runtime_shape_t shape, runtime_shape_t strides, gsl::span<gsl::byte> buffer);
datatype_t datatype() const noexcept { return datatype_; }
const runtime_shape_t &shape() const noexcept { return shape_; }
const runtime_shape_t &strides() const noexcept { return strides_; }
gsl::span<gsl::byte> buffer() const noexcept { return buffer_; }
private:
datatype_t datatype_;
runtime_shape_t shape_;
runtime_shape_t strides_;
gsl::span<gsl::byte> buffer_;
};
}

View File

@ -1,47 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "evaluate_context.h"
#include "evaluate_types.h"
namespace nncase::ir
{
class NNCASE_API evaluator
{
public:
evaluator(const schedule::model_schedule_result &sched);
evaluator(evaluator &) = delete;
evaluator(evaluator &&) = default;
void enable_ptq(target &target, ir::calibrate_method calib_method);
void evaluate(eval_step step = nncase::ir::eval_step::after_import, size_t stage = 0, bool record_output_buffers = false);
ir::quantizer *quantizer(const module_type_t &module_type);
void begin_collect_distribution();
void end_sample();
void end_collect_distribution(const std::function<void(size_t cnt, size_t total)> &progress);
evaluate_tensor memory_at(const output_connector &conn);
evaluate_tensor memory_at(const input_connector &conn);
evaluate_tensor input_at(size_t index);
evaluate_tensor output_at(size_t index);
private:
model_evaluate_context model_eval_;
};
NNCASE_API void register_evaluator(ir::node_opcode opcode, std::function<void(ir::node &, function_evaluate_context &)> evaluator);
}

View File

@ -1,86 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "node.h"
#include "placeholders.h"
#include <memory>
#include <unordered_map>
#include <vector>
namespace nncase::ir
{
class graph;
struct split_graph_result
{
std::unique_ptr<graph> subgraph;
std::unordered_map<input_node *, output_connector *> inputs;
std::unordered_map<output_node *, std::vector<input_connector *>> outputs;
};
class NNCASE_API graph
{
public:
graph() noexcept;
explicit graph(const module_type_t &module_type) noexcept
: module_type_(module_type) { }
graph(graph &) = delete;
graph(graph &&) = delete;
const std::string &name() const noexcept { return name_; }
std::string escaped_name() const noexcept;
void name(std::string value) { name_ = std::move(value); }
const module_type_t &module_type() const noexcept { return module_type_; }
void set_module_type(module_type_t type) { this->module_type_ = type; }
std::span<std::unique_ptr<node>> nodes() noexcept { return nodes_; }
std::span<input_node *> inputs() noexcept { return inputs_; }
std::span<output_node *> outputs() noexcept { return outputs_; }
std::span<std::unique_ptr<graph>> subgraphs() noexcept { return subgraphs_; }
std::vector<graph *> reachable_graphs() noexcept;
std::span<std::unique_ptr<node> const> nodes() const noexcept { return nodes_; }
std::span<input_node *const> inputs() const noexcept { return inputs_; }
std::span<output_node *const> outputs() const noexcept { return outputs_; }
std::span<std::unique_ptr<graph> const> subgraphs() const noexcept { return subgraphs_; }
template <class T, class... TArgs>
T *emplace(TArgs &&...args)
{
auto node = static_cast<T *>(nodes_.emplace_back(new T(std::forward<TArgs>(args)...)).get());
if constexpr (std::is_same_v<T, input_node>)
inputs_.emplace_back(node);
else if constexpr (std::is_same_v<T, output_node>)
outputs_.emplace_back(node);
return node;
}
void assign_names();
void dce();
void cse();
void merge_module_regions();
split_graph_result split_subgraph(std::span<node *const> nodes);
graph &add_subgraph(std::unique_ptr<graph> subgraph);
private:
std::string name_;
module_type_t module_type_;
std::vector<std::unique_ptr<node>> nodes_;
std::vector<std::unique_ptr<graph>> subgraphs_;
std::vector<input_node *> inputs_;
std::vector<output_node *> outputs_;
};
}

View File

@ -1,54 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <nncase/runtime/datatypes.h>
#include <span>
#include <type_traits>
#include <xtensor/xshape.hpp>
namespace nncase::ir
{
using shape_t = xt::dynamic_shape<std::size_t>;
using axis_t = xt::dynamic_shape<int32_t>;
enum node_attributes
{
node_attr_none = 0,
node_attr_action = 1,
node_attr_need_quantize = 2,
node_attr_fuse_input_slice = 4,
node_attr_fuse_output_concat = 8,
node_attr_skip_constant_folding = 16
};
enum connector_attributes
{
cnctr_attr_none = 0,
cnctr_attr_need_quantize = 1,
cnctr_attr_no_layout_strides = 2,
cnctr_attr_no_buffer_fusion = 4,
cnctr_attr_buffer_slice = 8,
cnctr_attr_no_dummy_for_benchmark = 16
};
DEFINE_ENUM_BITMASK_OPERATORS(node_attributes)
DEFINE_ENUM_BITMASK_OPERATORS(connector_attributes)
template <class T, class = std::enable_if_t<std::is_pointer_v<T>>>
std::vector<std::decay_t<T>> dup(std::span<T> source)
{
return { source.begin(), source.end() };
}
}

View File

@ -1,92 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "connectors.h"
#include "opcode.h"
#include <list>
#include <span>
#include <unordered_map>
namespace nncase::ir
{
#define DEFINE_NODE_OPCODE(value) \
static constexpr node_opcode opcode() noexcept { return value; } \
const node_opcode &runtime_opcode() const noexcept override { return value; }
class NNCASE_API node
{
public:
node(std::string name = "");
node(node &) = delete;
node &operator=(node &) = delete;
virtual ~node();
const std::string &name() const noexcept { return name_; }
template <class TArg, class... TArgs>
void name(TArg arg, TArgs... args) { name_.assign(std::forward<TArg>(arg), std::forward<TArgs>(args)...); }
std::string escaped_name() const noexcept;
const module_type_t &module_type() const noexcept { return module_type_; }
void module_type(const module_type_t &type) noexcept { module_type_ = type; }
std::span<input_connector *const> inputs() const noexcept { return input_connectors_; }
std::span<output_connector *const> outputs() const noexcept { return output_connectors_; }
input_connector &input_at(size_t index) const { return *input_connectors_.at(index); }
output_connector &output_at(size_t index) const { return *output_connectors_.at(index); }
virtual const node_opcode &runtime_opcode() const noexcept = 0;
node_attributes attributes() const noexcept { return attributes_; }
void attributes(node_attributes value) noexcept { attributes_ = value; }
bool equals(node &other) const;
void record_output_connectors_quant_map(output_connector &oc_after_quant, output_connector &oc_before_quant) noexcept { output_connectors_quant_map_.emplace(&oc_after_quant, &oc_before_quant); }
std::unordered_map<output_connector *, output_connector *> get_output_connectors_quant_map() const noexcept { return output_connectors_quant_map_; }
void record_node_name_before_quant(std::string name) noexcept { node_name_before_quant_.assign(name); }
std::string get_node_name_before_quant() const noexcept { return node_name_before_quant_; }
protected:
template <class TName, class TShape>
input_connector &add_input(TName &&name, datatype_t type, TShape &&shape)
{
auto ptr = input_connectors_storage_.emplace_back(std::make_unique<input_connector>(*this, std::forward<TName>(name), type, std::forward<TShape>(shape))).get();
input_connectors_.emplace_back(ptr);
return *ptr;
}
template <class TName, class TShape>
output_connector &add_output(TName &&name, datatype_t type, TShape &&shape, memory_location_t memory_location = mem_data)
{
auto ptr = output_connectors_storage_.emplace_back(std::make_unique<output_connector>(*this, std::forward<TName>(name), type, std::forward<TShape>(shape), memory_location)).get();
output_connectors_.emplace_back(ptr);
return *ptr;
}
virtual bool properties_equal(node &other) const = 0;
private:
std::string name_;
module_type_t module_type_;
node_attributes attributes_ = node_attributes::node_attr_action;
std::vector<input_connector *> input_connectors_;
std::vector<output_connector *> output_connectors_;
std::vector<std::unique_ptr<input_connector>> input_connectors_storage_;
std::vector<std::unique_ptr<output_connector>> output_connectors_storage_;
std::unordered_map<output_connector *, output_connector *> output_connectors_quant_map_;
std::string node_name_before_quant_;
};
}

View File

@ -1,379 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "node.h"
#include <span>
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
inline shape_t get_transposed_shape(const shape_t &input_shape, const axis_t &perm)
{
shape_t new_shape(input_shape.size());
for (size_t i = 0; i < new_shape.size(); i++)
new_shape[i] = input_shape[perm[i]];
return new_shape;
}
inline size_t get_windowed_output_size(int32_t size, int32_t filter, int32_t stride, int32_t dilation, bool same, bool ceil_mode = false)
{
auto effective_filter_size = (filter - 1) * dilation + 1;
if (same)
return (size_t(size) + stride - 1) / stride;
else
{
if (!ceil_mode)
return (size_t(size) - effective_filter_size + stride) / stride;
else
{
return static_cast<int>(ceil(static_cast<float>(size_t(size) - effective_filter_size + stride) / stride));
}
}
}
inline padding get_windowed_padding(int32_t input_size, int32_t output_size, int32_t filter, int32_t stride, int32_t dilation)
{
auto effective_filter_size = (filter - 1) * dilation + 1;
int padding = std::max(0, (output_size - 1) * stride + effective_filter_size - input_size);
return { padding / 2, padding - padding / 2 };
}
inline padding get_windowed_padding(int32_t input_size, int32_t filter, int32_t stride, int32_t dilation, bool same)
{
auto output_size = get_windowed_output_size(input_size, filter, stride, dilation, same);
return get_windowed_padding(input_size, (int32_t)output_size, filter, stride, dilation);
}
inline constexpr size_t get_bytes(datatype_t type)
{
switch (type)
{
#define DEFINE_DATATYPE(id, t, name, value) \
case (dt_##id): \
return sizeof(t);
#include <nncase/runtime/datatypes.def>
#undef DEFINE_DATATYPE
default:
throw std::invalid_argument("Invalid datatype");
}
}
inline size_t get_bytes(datatype_t type, const shape_t &shape)
{
return xt::compute_size(shape) * get_bytes(type);
}
inline nncase::ir::shape_t to_strides(const nncase::ir::shape_t &shape)
{
nncase::ir::shape_t strides(shape.size());
xt::compute_strides(shape, xt::layout_type::row_major, strides);
return strides;
}
inline int32_t normalize_axis(const shape_t &input_shape, int32_t axis)
{
return axis < 0 ? (int32_t)input_shape.size() + axis : axis;
}
inline axis_t normalize_axis(const shape_t &input_shape, const axis_t &axis)
{
axis_t new_axis = axis;
for (auto &a : new_axis)
{
if (a < 0)
a = (int32_t)input_shape.size() + a;
}
return new_axis;
}
inline axis_t normalize_reduce_axis(const shape_t &input_shape, const axis_t &axis)
{
axis_t new_axis = normalize_axis(input_shape, axis);
std::sort(new_axis.begin(), new_axis.end());
return new_axis;
}
inline shape_t get_reduced_shape(const shape_t &input_shape, const axis_t &axis, bool keep_dims)
{
if (!std::is_sorted(axis.begin(), axis.end()))
throw std::invalid_argument("axis must be sorted");
shape_t shape;
for (size_t i = 0; i < input_shape.size(); i++)
{
if (std::find(axis.begin(), axis.end(), i) == axis.end())
shape.push_back(input_shape[i]);
else if (keep_dims)
shape.push_back(1);
}
if (shape.empty())
shape.push_back(1);
return shape;
}
inline shape_t normalize_reshape(const shape_t &in_shape, const axis_t &new_shape)
{
shape_t result(new_shape.size());
size_t shape_size = 1;
std::optional<size_t> non_det_id;
for (size_t i = 0; i < new_shape.size(); i++)
{
auto v = new_shape[i];
if (v == -1)
{
if (non_det_id)
throw std::runtime_error("Reshape can only have 1 non-determined dimension at most");
non_det_id = i;
}
else
{
shape_size *= v;
result[i] = (size_t)new_shape[i];
}
}
if (non_det_id)
result[*non_det_id] = xt::compute_size(in_shape) / shape_size;
return result;
}
inline shape_t get_binary_output_shape(const shape_t &input_a_shape, const shape_t &input_b_shape)
{
shape_t out_shape;
const auto dest_dims = (int32_t)std::max(input_a_shape.size(), input_b_shape.size());
const auto in_a_ext = dest_dims - (int32_t)input_a_shape.size();
const auto in_b_ext = dest_dims - (int32_t)input_b_shape.size();
for (int32_t i = 0; i < dest_dims; i++)
{
const auto in_a_dim = i - (int32_t)in_a_ext;
const auto in_b_dim = i - (int32_t)in_b_ext;
const auto in_a = in_a_dim < 0 ? 1 : input_a_shape[in_a_dim];
const auto in_b = in_b_dim < 0 ? 1 : input_b_shape[in_b_dim];
if (in_a == in_b)
out_shape.push_back(in_a);
else if (in_a == 1)
out_shape.push_back(in_b);
else if (in_b == 1)
out_shape.push_back(in_a);
else
throw std::invalid_argument("inputs are not compatible to broadcast");
}
return out_shape;
}
inline std::vector<shape_t> get_input_shapes(std::span<input_connector *const> inputs)
{
std::vector<shape_t> shapes;
shapes.reserve(inputs.size());
for (auto in : inputs)
shapes.emplace_back(in->shape());
return shapes;
}
inline shape_t get_concated_shape(std::span<shape_t> input_shapes, size_t axis)
{
if (input_shapes.empty())
throw std::invalid_argument("there must be at least one input");
auto concated_shape = input_shapes[0];
for (size_t i = 1; i < input_shapes.size(); i++)
{
auto &cur_shape = input_shapes[i];
if (concated_shape.size() != cur_shape.size())
throw std::invalid_argument("inputs must have same ranks");
for (size_t j = 0; j < concated_shape.size(); j++)
{
if (j == axis)
{
concated_shape[j] += cur_shape[j];
}
else if (cur_shape[j] != concated_shape[j])
{
throw std::invalid_argument("inputs are not compatible to concat");
}
}
}
return concated_shape;
}
inline void get_concat_params(const shape_t &out_shape, size_t elem_size, size_t axis, uint64_t &inner_size, uint64_t &outer_size)
{
inner_size = elem_size;
outer_size = 1;
for (size_t i = 0; i < out_shape.size(); i++)
{
if (i > axis)
inner_size *= out_shape[i];
else if (i < axis)
outer_size *= out_shape[i];
}
}
inline shape_t get_padded_shape(const shape_t &in_shape, const xt::svector<padding> &paddings)
{
auto new_shape = in_shape;
for (size_t i = 0; i < in_shape.size(); i++)
new_shape[i] = size_t(int32_t(new_shape[i]) + paddings[i].sum() + (new_shape[i] - 1) * paddings[i].interior);
return new_shape;
}
inline shape_t get_resize_image_shape(const shape_t &in_shape, const std::array<int32_t, 2> &new_size)
{
auto new_shape = in_shape;
auto r = new_shape.rbegin();
*r++ = new_size[1];
*r++ = new_size[0];
return new_shape;
}
inline axis_t normalize_strided_slice_begin(const shape_t &in_shape, const axis_t &begin, const axis_t &strides, int32_t begin_mask)
{
axis_t new_shape(strides.size());
for (size_t i = 0; i < new_shape.size(); i++)
{
auto stride = strides[i];
assert(stride);
new_shape[i] = (begin_mask & (1 << i)) != 0
? stride > 0 ? 0 : (int32_t)in_shape[i] - 1
: (begin[i] >= 0 ? begin[i] : (int32_t)in_shape[i] + begin[i]);
}
return new_shape;
}
inline axis_t normalize_strided_slice_end(const shape_t &in_shape, [[maybe_unused]] const axis_t &begin, const axis_t &end, const axis_t &strides, int32_t end_mask)
{
axis_t new_shape(strides.size());
for (size_t i = 0; i < new_shape.size(); i++)
{
auto stride = strides[i];
auto end_val = (end_mask & (1 << i)) != 0
? stride > 0 ? (int32_t)in_shape[i] : -1
: (end[i] >= 0 ? end[i] : in_shape[i] + end[i]);
new_shape[i] = (int32_t)end_val;
}
return new_shape;
}
inline shape_t get_strided_slice_output_shape(const axis_t &begin, const axis_t &end, const axis_t &strides, int32_t ellipsis_mask, int32_t new_axis_mask)
{
if (ellipsis_mask)
throw std::invalid_argument("Non-zero ellipsis_mask is not supported");
if (new_axis_mask)
throw std::invalid_argument("Non-zero new_axis_mask is not supported");
shape_t new_shape;
for (size_t i = 0; i < strides.size(); i++)
{
auto stride = strides[i];
auto begin_val = begin[i];
auto end_val = end[i];
auto dim = (int)std::ceil((end_val - begin_val) / (float)stride);
new_shape.push_back(dim);
}
return new_shape.size() ? new_shape : shape_t { 1 };
}
inline bool is_copy_slice(const axis_t &strides)
{
return std::all_of(strides.begin(), strides.end(), [](int32_t stride) { return stride == 1; });
}
inline bool is_simple_slice(const axis_t &begin, const axis_t &end, const axis_t &strides, const shape_t &input_shape)
{
if (!is_copy_slice(strides))
return false;
bool is_simple_slice = true;
bool allow_not_equal = true;
for (size_t i = 0; i < begin.size(); i++)
{
if (begin[i] != 0
|| end[i] != input_shape[i])
{
if (allow_not_equal)
{
allow_not_equal = false;
}
else
{
is_simple_slice = false;
break;
}
}
else if (input_shape[i] != 1)
{
allow_not_equal = false;
}
}
return is_simple_slice;
}
inline bool is_axis0_squeeze_or_expand_dim_bitcast(const shape_t &in_shape, const shape_t &out_shape)
{
auto in_begin = std::find_if_not(in_shape.begin(), in_shape.end(), [](size_t dim) { return dim == 1; });
auto out_begin = std::find_if_not(out_shape.begin(), out_shape.end(), [](size_t dim) { return dim == 1; });
return std::distance(in_begin, in_shape.end()) == std::distance(out_begin, out_shape.end())
&& std::equal(in_begin, in_shape.end(), out_begin);
}
template <class U, class T>
std::span<U> as_span(const std::span<T> &src) noexcept
{
assert(src.size_bytes() % sizeof(U) == 0);
return std::span<U>(reinterpret_cast<U *>(src.data()), src.size_bytes() / sizeof(U));
}
}
namespace xt
{
inline nncase::ir::shape_t operator+(const nncase::ir::shape_t &lhs, const nncase::ir::shape_t &rhs)
{
using namespace nncase::ir;
if (lhs.size() != rhs.size())
throw std::invalid_argument("Shape's rank mismatch");
shape_t ret = lhs;
for (size_t i = 0; i < lhs.size(); i++)
ret[i] += rhs[i];
return ret;
}
inline nncase::ir::shape_t &operator+=(nncase::ir::shape_t &lhs, const nncase::ir::shape_t &rhs)
{
using namespace nncase::ir;
if (lhs.size() != rhs.size())
throw std::invalid_argument("Shape's rank mismatch");
for (size_t i = 0; i < lhs.size(); i++)
lhs[i] += rhs[i];
return lhs;
}
}

View File

@ -1,48 +0,0 @@
DEFINE_NEUTRAL_OPCODE(input_node, Input, 0x01)
DEFINE_NEUTRAL_OPCODE(output_node, Output, 0x02)
DEFINE_NEUTRAL_OPCODE(ignore_node, Ignore, 0x03)
DEFINE_NEUTRAL_OPCODE(constant, Constant, 0x04)
DEFINE_NEUTRAL_OPCODE(uninitialized, Uninitialized, 0x05)
DEFINE_NEUTRAL_OPCODE(call, Call, 0x06)
DEFINE_NEUTRAL_OPCODE(copy, Copy, 0x07)
DEFINE_NEUTRAL_OPCODE(conv2d, Conv2D, 0x100)
DEFINE_NEUTRAL_OPCODE(matmul, MatMul, 0x101)
DEFINE_NEUTRAL_OPCODE(transpose, Transpose, 0x102)
DEFINE_NEUTRAL_OPCODE(reduce, Reduce, 0x103)
DEFINE_NEUTRAL_OPCODE(reduce_window2d, ReduceWindow2D, 0x104)
DEFINE_NEUTRAL_OPCODE(binary, Binary, 0x105)
DEFINE_NEUTRAL_OPCODE(concat, Concat, 0x106)
DEFINE_NEUTRAL_OPCODE(unary, Unary, 0x107)
DEFINE_NEUTRAL_OPCODE(fused_unary, FusedUnary, 0x108)
DEFINE_NEUTRAL_OPCODE(quantize, Quantize, 0x109)
DEFINE_NEUTRAL_OPCODE(dequantize, Dequantize, 0x10A)
DEFINE_NEUTRAL_OPCODE(pad, Pad, 0x10B)
DEFINE_NEUTRAL_OPCODE(bitcast, Bitcast, 0x10C)
DEFINE_NEUTRAL_OPCODE(resize_image, ResizeImage, 0x10D)
DEFINE_NEUTRAL_OPCODE(slice, Slice, 0x10E)
DEFINE_NEUTRAL_OPCODE(table_lookup1d, TableLookup1D, 0x10F)
DEFINE_NEUTRAL_OPCODE(conv2d_transpose, Conv2DTranspose, 0x110)
DEFINE_NEUTRAL_OPCODE(clamp, Clamp, 0x111)
DEFINE_NEUTRAL_OPCODE(convert, Convert, 0x112)
DEFINE_NEUTRAL_OPCODE(broadcast, Broadcast, 0x113)
DEFINE_NEUTRAL_OPCODE(take, Take, 0x114)
DEFINE_NEUTRAL_OPCODE(space_to_batch, SpaceToBatch, 0x115)
DEFINE_NEUTRAL_OPCODE(batch_to_space, BatchToSpace, 0x116)
DEFINE_NEUTRAL_OPCODE(split, Split, 0x117)
DEFINE_NEUTRAL_OPCODE(gather, Gather, 0x118)
DEFINE_NEUTRAL_OPCODE(gather_nd, GatherND, 0x119)
DEFINE_NEUTRAL_OPCODE(onehot, OneHot, 0x11A)
DEFINE_NEUTRAL_OPCODE(lstm, LSTM, 0x11B)
DEFINE_NEUTRAL_OPCODE(reduce_arg, ReduceArg, 0x11C)
DEFINE_NEUTRAL_OPCODE(cumsum, CumSum, 0x11D)
DEFINE_NEUTRAL_OPCODE(hardmax, HardMax, 0x11E)
DEFINE_NEUTRAL_OPCODE(random_normal, RandomNormal, 0x11F)
DEFINE_NEUTRAL_OPCODE(random_uniform, RandomUniform, 0x120)
DEFINE_NEUTRAL_OPCODE(reduce_prod, ReduceProd, 0x121)
DEFINE_NEUTRAL_OPCODE(ternary, Ternary, 0x122)
DEFINE_NEUTRAL_OPCODE(topk, TopK, 0x123)
DEFINE_NEUTRAL_OPCODE(trilu, Trilu, 0x124)
DEFINE_NEUTRAL_OPCODE(sigmoid, Sigmoid, 0x125)
DEFINE_NEUTRAL_OPCODE(roi_align, RoiAlign, 0x126)
DEFINE_NEUTRAL_OPCODE(equal, Equal, 0x127)

View File

@ -1,62 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xarray.hpp>
namespace nncase::ir
{
class NNCASE_API batch_to_space : public node
{
public:
DEFINE_NODE_OPCODE(op_batch_to_space);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
int32_t block_size_h() const noexcept { return block_size_h_; }
int32_t block_size_w() const noexcept { return block_size_w_; }
const axis_t &begin() const noexcept { return begin_; }
const axis_t &end() const noexcept { return end_; }
const axis_t &strides() const noexcept { return strides_; }
int32_t begin_mask() const noexcept { return begin_mask_; }
int32_t end_mask() const noexcept { return end_mask_; }
int32_t ellipsis_mask() const noexcept { return ellipsis_mask_; }
int32_t new_axis_mask() const noexcept { return new_axis_mask_; }
int32_t shrink_axis_mask() const noexcept { return shrink_axis_mask_; }
std::array<int32_t, 2> crop_h() const noexcept { return crop_h_; }
std::array<int32_t, 2> crop_w() const noexcept { return crop_w_; }
batch_to_space(datatype_t input_type, shape_t input_shape, int32_t block_shape_h, int32_t block_shape_w, axis_t stride, axis_t begin, axis_t end, std::array<int32_t, 2> crop_h_, std::array<int32_t, 2> crop_w_);
protected:
bool properties_equal(node &other) const override;
private:
int32_t block_size_h_;
int32_t block_size_w_;
axis_t begin_;
axis_t end_;
axis_t strides_;
int32_t begin_mask_;
int32_t end_mask_;
int32_t ellipsis_mask_;
int32_t new_axis_mask_;
int32_t shrink_axis_mask_;
std::array<int32_t, 2> crop_h_;
std::array<int32_t, 2> crop_w_;
};
}

View File

@ -1,42 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API binary : public node
{
public:
DEFINE_NODE_OPCODE(op_binary);
input_connector &input_a() { return input_at(0); }
input_connector &input_b() { return input_at(1); }
output_connector &output() { return output_at(0); }
binary_op_t binary_op() const noexcept { return binary_op_; }
value_range<float> fused_activation() const noexcept { return fused_activation_; }
binary(binary_op_t binary_op, shape_t input_a_shape, shape_t input_b_shape, value_range<float> input_fused_activation);
protected:
bool properties_equal(node &other) const override;
private:
binary_op_t binary_op_;
value_range<float> fused_activation_;
};
}

View File

@ -1,46 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API bitcast : public node
{
public:
DEFINE_NODE_OPCODE(op_bitcast);
const input_connector &input() const { return input_at(0); }
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
bool is_reshape() const noexcept { return new_type_ == input().type(); }
datatype_t new_type() const noexcept { return new_type_; }
const shape_t &new_shape() const noexcept { return new_shape_; }
bitcast(datatype_t input_type, shape_t input_shape, axis_t new_shape);
bitcast(datatype_t input_type, shape_t input_shape, shape_t new_shape);
bitcast(datatype_t input_type, shape_t input_shape, datatype_t new_type, axis_t new_shape);
bitcast(datatype_t input_type, shape_t input_shape, datatype_t new_type, shape_t new_shape);
protected:
bool properties_equal(node &other) const override;
private:
datatype_t new_type_;
shape_t new_shape_;
};
}

View File

@ -1,40 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API broadcast : public node
{
public:
DEFINE_NODE_OPCODE(op_broadcast);
const input_connector &input() const { return input_at(0); }
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
const shape_t &new_shape() const noexcept { return new_shape_; }
broadcast(datatype_t input_type, shape_t input_shape, shape_t new_shape);
protected:
bool properties_equal(node &other) const override;
private:
shape_t new_shape_;
};
}

View File

@ -1,41 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../graph.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API call : public node
{
public:
DEFINE_NODE_OPCODE(op_call);
graph &target() const noexcept { return target_; }
call(graph &target);
input_connector &outer_connector(input_node &target_input);
input_connector &outer_connector(input_connector &target_input);
output_connector &outer_connector(output_node &target_output);
output_connector &outer_connector(output_connector &target_output);
protected:
bool properties_equal(node &other) const override;
private:
graph &target_;
};
}

View File

@ -1,39 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API clamp : public node
{
public:
DEFINE_NODE_OPCODE(op_clamp);
input_connector &input() { return input_at(0); }
input_connector &input_low() { return input_at(1); }
const input_connector &input_low() const { return input_at(1); }
input_connector &input_high() { return input_at(2); }
const input_connector &input_high() const { return input_at(2); }
output_connector &output() { return output_at(0); }
const output_connector &output() const { return output_at(0); }
clamp(shape_t input_shape, shape_t input_low_shape, shape_t input_high_shape);
protected:
bool properties_equal([[maybe_unused]] node &other) const override { return true; }
};
}

View File

@ -1,40 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API concat : public node
{
public:
DEFINE_NODE_OPCODE(op_concat);
output_connector &output() { return output_at(0); }
int32_t axis() const noexcept { return axis_; }
std::span<const size_t> concat_dims() const noexcept { return concat_dims_; }
concat(datatype_t type, std::span<shape_t> input_shapes, int32_t axis);
protected:
bool properties_equal(node &other) const override;
private:
int32_t axis_;
std::vector<size_t> concat_dims_;
};
}

View File

@ -1,141 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../debug.h"
#include "../node.h"
#include "../op_utils.h"
#include <nncase/runtime/debug.h>
#include <vector>
namespace nncase::ir
{
class NNCASE_API constant : public node
{
public:
DEFINE_NODE_OPCODE(op_constant);
output_connector &output() { return output_at(0); }
const output_connector &output() const { return output_at(0); }
size_t alignment() const noexcept { return alignment_; }
void alignment(size_t value) { alignment_ = value; }
std::span<const std::byte> data() const noexcept { return data_; }
datatype_t data_type() { return datatype_; }
template <class TShape>
constant(datatype_t type, TShape &&shape, std::span<const std::byte> data)
: constant(type, std::forward<TShape>(shape), data.begin(), data.end())
{
}
template <class TShape>
constant(datatype_t type, TShape &&shape, gsl::span<const gsl::byte> data)
: constant(type, std::forward<TShape>(shape), reinterpret_cast<const std::byte *>(data.begin()), reinterpret_cast<const std::byte *>(data.end()))
{
}
template <class TShape, class T>
constant(datatype_t type, TShape &&shape, std::span<const T> data)
: constant(type, std::forward<TShape>(shape), std::as_bytes(data))
{
}
template <class TShape, class T>
constant(datatype_t type, TShape &&shape, gsl::span<const T> data)
: constant(type, std::forward<TShape>(shape), gsl::as_bytes(data))
{
}
template <class TShape, class T>
constant(datatype_t type, TShape &&shape, std::span<T> data)
: constant(type, std::forward<TShape>(shape), std::as_bytes(data))
{
}
template <class TShape, class T>
constant(datatype_t type, TShape &&shape, gsl::span<T> data)
: constant(type, std::forward<TShape>(shape), gsl::as_bytes(data))
{
}
template <class TShape, class T>
constant(datatype_t type, TShape &&shape, const std::vector<T> &data)
: constant(type, std::forward<TShape>(shape), std::as_bytes(std::span<const T>(data)))
{
}
template <class TShape, class... TDataArgs>
constant(datatype_t type, TShape &&shape, TDataArgs... data_args)
: data_(std::forward<TDataArgs>(data_args)...), datatype_(type)
{
if (ir::get_bytes(type, shape) != data_.size())
throw std::invalid_argument("Shape and data size don't match");
add_output("output", type, std::forward<TShape>(shape), mem_rdata)
.attributes(cnctr_attr_no_layout_strides);
}
template <class TScalar>
constant(TScalar scalar)
: constant(to_datatype<TScalar>(), shape_t { 1 }, std::span<const TScalar>(&scalar, 1))
{
}
std::string to_string() const
{
auto shape = this->output().shape();
auto dtype = this->output().type();
auto total_size = 1;
for (auto i : shape)
{
total_size *= i;
}
if (total_size == 1)
{
switch (dtype)
{
case dt_int8:
return std::to_string(*(to_cpp_type_t<dt_int8> *)data_.data());
case dt_uint8:
return std::to_string(*(to_cpp_type_t<dt_uint8> *)data_.data());
#define DT_TO_STRING_CASE(dt) \
case dt: \
return std::to_string(*(to_cpp_type_t<dt> *)data_.data());
DT_TO_STRING_CASE(dt_uint32);
DT_TO_STRING_CASE(dt_float32);
DT_TO_STRING_CASE(dt_bfloat16);
DT_TO_STRING_CASE(dt_int32);
#undef DT_TO_STRING_CASE
default:
throw "un supported dtype to_string: " + std::string(nncase::datatype_names(dtype));
}
}
else
{
return "[...]";
}
}
protected:
bool properties_equal(node &other) const override;
private:
std::vector<std::byte> data_;
datatype_t datatype_;
size_t alignment_ = 8;
};
}

View File

@ -1,62 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xarray.hpp>
namespace nncase::ir
{
class NNCASE_API conv2d : public node
{
public:
DEFINE_NODE_OPCODE(op_conv2d);
const input_connector &weights() const { return input_at(1); }
input_connector &input() { return input_at(0); }
input_connector &weights() { return input_at(1); }
input_connector &bias() { return input_at(2); }
output_connector &output() { return output_at(0); }
int32_t filter_h() const noexcept { return (int32_t)weights().shape()[2]; }
int32_t filter_w() const noexcept { return (int32_t)weights().shape()[3]; }
int32_t input_channels() const noexcept { return (int32_t)weights().shape()[1] * groups(); }
int32_t output_channels() const noexcept { return (int32_t)weights().shape()[0]; }
int32_t groups() const noexcept { return groups_; }
bool is_depthwise() const noexcept { return input_channels() == output_channels() && output_channels() == groups() && groups() != 1; }
padding padding_h() const noexcept { return padding_h_; }
padding padding_w() const noexcept { return padding_w_; }
int32_t stride_h() const noexcept { return stride_h_; }
int32_t stride_w() const noexcept { return stride_w_; }
int32_t dilation_h() const noexcept { return dilation_h_; }
int32_t dilation_w() const noexcept { return dilation_w_; }
value_range<float> fused_activation() const noexcept { return fused_activation_; }
conv2d(shape_t input_shape, shape_t weights_shape, int32_t groups, padding padding_h, padding padding_w, int32_t stride_h, int32_t stride_w, int32_t dilation_h, int32_t dilation_w, value_range<float> fused_activation);
protected:
bool properties_equal(node &other) const override;
private:
int32_t groups_;
padding padding_h_;
padding padding_w_;
int32_t stride_h_;
int32_t stride_w_;
int32_t dilation_h_;
int32_t dilation_w_;
value_range<float> fused_activation_;
};
}

View File

@ -1,65 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API conv2d_transpose : public node
{
public:
DEFINE_NODE_OPCODE(op_conv2d_transpose);
const input_connector &weights() const { return input_at(1); }
input_connector &input() { return input_at(0); }
input_connector &weights() { return input_at(1); }
input_connector &bias() { return input_at(2); }
output_connector &output() { return output_at(0); }
int32_t filter_h() const noexcept { return (int32_t)weights().shape()[2]; }
int32_t filter_w() const noexcept { return (int32_t)weights().shape()[3]; }
int32_t input_channels() const noexcept { return (int32_t)weights().shape()[1] * groups(); }
int32_t output_channels() const noexcept { return (int32_t)weights().shape()[0]; }
int32_t groups() const noexcept { return groups_; }
padding padding_h() const noexcept { return padding_h_; }
padding padding_w() const noexcept { return padding_w_; }
int32_t output_padding_h() const noexcept { return output_padding_h_; }
int32_t output_padding_w() const noexcept { return output_padding_w_; }
int32_t stride_h() const noexcept { return stride_h_; }
int32_t stride_w() const noexcept { return stride_w_; }
int32_t dilation_h() const noexcept { return dilation_h_; }
int32_t dilation_w() const noexcept { return dilation_w_; }
value_range<float> fused_activation() const noexcept { return fused_activation_; }
conv2d_transpose(shape_t input_shape, shape_t weights_shape, shape_t output_shape, int32_t groups, padding padding_h, padding padding_w, int32_t output_padding_h, int32_t output_padding_w, int32_t stride_h, int32_t stride_w, int32_t dilation_h, int32_t dilation_w, value_range<float> fused_activation);
protected:
bool properties_equal(node &other) const override;
private:
int32_t groups_;
padding padding_h_;
padding padding_w_;
int32_t output_padding_h_;
int32_t output_padding_w_;
int32_t stride_h_;
int32_t stride_w_;
int32_t dilation_h_;
int32_t dilation_w_;
value_range<float> fused_activation_;
};
}

View File

@ -1,39 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API convert : public node
{
public:
DEFINE_NODE_OPCODE(op_convert);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
datatype_t new_type() const noexcept { return new_type_; }
convert(datatype_t input_type, shape_t input_shape, datatype_t new_type);
protected:
bool properties_equal(node &other) const override;
private:
datatype_t new_type_;
};
}

View File

@ -1,35 +0,0 @@
/* Copyright 2019-2020 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API copy : public node
{
public:
DEFINE_NODE_OPCODE(op_copy);
const input_connector &input() const { return input_at(0); }
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
copy(datatype_t input_type, shape_t input_shape);
protected:
bool properties_equal(node &other) const override;
};
}

View File

@ -1,42 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
namespace nncase::ir
{
class NNCASE_API cumsum : public node
{
public:
DEFINE_NODE_OPCODE(op_cumsum);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
int32_t axis() const noexcept { return axis_; }
bool exclusive() const noexcept { return exclusive_; }
bool reverse() const noexcept { return reverse_; }
cumsum(datatype_t input_type, shape_t input_shape, int32_t axis, bool exclusive = false, bool reverse = false);
protected:
bool properties_equal(node &other) const override;
private:
int32_t axis_;
bool exclusive_;
bool reverse_;
};
}

View File

@ -1,39 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API dequantize : public node
{
public:
DEFINE_NODE_OPCODE(op_dequantize);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
const quant_param_t quant_param() const noexcept { return quant_param_; }
dequantize(datatype_t input_type, shape_t input_shape, datatype_t output_type, quant_param_t quant_param);
protected:
bool properties_equal(node &other) const override;
private:
quant_param_t quant_param_;
};
}

View File

@ -1,37 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API equal : public node
{
public:
DEFINE_NODE_OPCODE(op_equal);
input_connector &input_a() { return input_at(0); }
input_connector &input_b() { return input_at(1); }
output_connector &output() { return output_at(0); }
equal(datatype_t input_type, shape_t input_a_shape, shape_t input_b_shape);
protected:
bool properties_equal(node &other) const override;
private:
};
}

View File

@ -1,150 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../graph.h"
#include "../node.h"
#include <nncase/codegen/nnil_builder.h>
#include <nncase/runtime/nnil.h>
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
enum fused_unary_opcode
{
fu_constant,
fu_identity,
fu_ldx,
fu_unary,
fu_binary,
fu_clamp
};
struct fused_unary_arg
{
size_t op_id;
};
struct fused_unary_constant
{
float value;
};
struct fused_unary_identity
{
fused_unary_arg input;
};
struct fused_unary_ldx
{
};
struct fused_unary_unary
{
unary_op_t unary_op;
fused_unary_arg input;
};
struct fused_unary_binary
{
binary_op_t binary_op;
fused_unary_arg input_a;
fused_unary_arg input_b;
};
struct fused_unary_clamp
{
fused_unary_arg input;
fused_unary_arg low;
fused_unary_arg high;
};
struct fused_unary_op
{
fused_unary_opcode opcode;
union
{
fused_unary_constant constant;
fused_unary_identity identity;
fused_unary_ldx ldx;
fused_unary_unary unary;
fused_unary_binary binary;
fused_unary_clamp clamp;
};
static fused_unary_op make_ldx() noexcept
{
fused_unary_op op { fu_ldx, {} };
return op;
}
static fused_unary_op make_constant(float value) noexcept
{
fused_unary_op op { fu_constant, {} };
op.constant.value = value;
return op;
}
static fused_unary_op make_unary(unary_op_t unary_op, fused_unary_arg input) noexcept
{
fused_unary_op op { fu_unary, {} };
op.unary = { unary_op, input };
return op;
}
static fused_unary_op make_binary(binary_op_t binary_op, fused_unary_arg input_a, fused_unary_arg input_b) noexcept
{
fused_unary_op op { fu_binary, {} };
op.binary = { binary_op, input_a, input_b };
return op;
}
static fused_unary_op make_clamp(fused_unary_arg input, fused_unary_arg low, fused_unary_arg high) noexcept
{
fused_unary_op op { fu_clamp, {} };
op.clamp = { input, low, high };
return op;
}
static fused_unary_op make_identity(fused_unary_arg input) noexcept
{
fused_unary_op op { fu_identity, {} };
op.identity = { input };
return op;
}
};
NNCASE_API std::vector<fused_unary_op> concat_subgraph(const std::vector<fused_unary_op> &src1, const std::vector<fused_unary_op> &src2);
class NNCASE_API fused_unary : public node
{
public:
static void compile_graph(const std::vector<fused_unary_op> &subgraph, codegen::nnil_builder &builder);
DEFINE_NODE_OPCODE(op_fused_unary);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
std::vector<fused_unary_op> &subgraph() noexcept { return subgraph_; }
fused_unary(std::vector<fused_unary_op> subgraph, shape_t in_shape);
protected:
bool properties_equal(node &other) const override;
private:
std::vector<fused_unary_op> subgraph_;
};
}

View File

@ -1,40 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API gather : public node
{
public:
DEFINE_NODE_OPCODE(op_gather);
input_connector &input() { return input_at(0); }
input_connector &indices() { return input_at(1); }
output_connector &output() { return output_at(0); }
int32_t axis() const noexcept { return axis_; }
gather(datatype_t in_type, shape_t input_shape, shape_t indices_shape, shape_t output_shape, int32_t axis);
protected:
bool properties_equal(node &other) const override;
private:
int32_t axis_;
};
}

View File

@ -1,40 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API gather_nd : public node
{
public:
DEFINE_NODE_OPCODE(op_gather_nd);
input_connector &input() { return input_at(0); }
input_connector &indices() { return input_at(1); }
output_connector &output() { return output_at(0); }
int32_t batch_dims() const noexcept { return batch_dims_; }
gather_nd(datatype_t type, shape_t input_shape, shape_t indices_shape, shape_t output_shape, int32_t batch_dims);
protected:
bool properties_equal(node &other) const override;
private:
int32_t batch_dims_;
};
}

View File

@ -1,37 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
namespace nncase::ir
{
class NNCASE_API hardmax : public node
{
public:
DEFINE_NODE_OPCODE(op_hardmax);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
int32_t axis() const noexcept { return axis_; }
hardmax(datatype_t input_type, shape_t input_shape, int32_t axis);
protected:
bool properties_equal(node &other) const override;
private:
int32_t axis_;
};
}

View File

@ -1,52 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API lstm : public node
{
public:
DEFINE_NODE_OPCODE(op_lstm);
input_connector &input() { return input_at(0); }
input_connector &w_xc() { return input_at(1); }
input_connector &b_xc() { return input_at(2); }
input_connector &w_rc() { return input_at(3); }
input_connector &b_rc() { return input_at(4); }
input_connector &initial_h() { return input_at(5); }
input_connector &initial_c() { return input_at(6); }
input_connector &w_static() { return input_at(7); }
output_connector &output() { return output_at(0); }
output_connector &output_h() { return output_at(1); }
output_connector &output_c() { return output_at(2); }
int32_t num_output() const noexcept { return num_output_; }
bool has_static() const noexcept { return has_static_; }
std::string framework() const noexcept { return framework_; }
lstm(shape_t input_shape, shape_t w_xc_shape, shape_t b_xc_shape, shape_t w_rc_shape, shape_t b_rc_shape, shape_t initial_h_shape, shape_t initial_c_shape, int32_t num_output, bool has_static, std::string framework);
protected:
bool properties_equal(node &other) const override;
private:
int32_t num_output_;
bool has_static_;
std::string framework_;
};
}

View File

@ -1,41 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API matmul : public node
{
public:
DEFINE_NODE_OPCODE(op_matmul);
input_connector &input_a() { return input_at(0); }
input_connector &input_b() { return input_at(1); }
input_connector &bias() { return input_at(2); }
output_connector &output() { return output_at(0); }
value_range<float> fused_activation() const noexcept { return fused_activation_; }
matmul(shape_t input_a_shape, shape_t input_b_shape, value_range<float> fused_activation);
protected:
bool properties_equal(node &other) const override;
private:
value_range<float> fused_activation_;
};
}

View File

@ -1,44 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API onehot : public node
{
public:
DEFINE_NODE_OPCODE(op_onehot);
input_connector &indices() { return input_at(0); }
input_connector &depth() { return input_at(1); }
input_connector &on_value() { return input_at(2); }
input_connector &off_value() { return input_at(3); }
output_connector &output() { return output_at(0); }
int32_t axis() const noexcept { return axis_; }
onehot_mode_t mode() const noexcept { return mode_; }
onehot(datatype_t type, shape_t indices_shape, shape_t output_shape, int32_t axis, onehot_mode_t mode = onehot_mode_t::onehot_normal);
protected:
bool properties_equal(node &other) const override;
private:
int32_t axis_;
onehot_mode_t mode_;
};
}

View File

@ -1,43 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API pad : public node
{
public:
DEFINE_NODE_OPCODE(op_pad);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
const xt::svector<padding> &paddings() const noexcept { return paddings_; }
pad_mode_t pad_mode() const noexcept { return pad_mode_; }
const scalar &pad_value() const noexcept { return pad_value_; }
pad(datatype_t type, shape_t input_shape, xt::svector<padding> paddings, pad_mode_t pad_mode, scalar pad_value);
protected:
bool properties_equal(node &other) const override;
private:
xt::svector<padding> paddings_;
pad_mode_t pad_mode_;
scalar pad_value_;
};
}

View File

@ -1,39 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API quantize : public node
{
public:
DEFINE_NODE_OPCODE(op_quantize);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
const quant_param_t quant_param() const noexcept { return quant_param_; }
quantize(datatype_t input_type, shape_t input_shape, datatype_t output_type, quant_param_t quant_param);
protected:
bool properties_equal(node &other) const override;
private:
quant_param_t quant_param_;
};
}

View File

@ -1,40 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
namespace nncase::ir
{
class NNCASE_API random_normal : public node
{
public:
DEFINE_NODE_OPCODE(op_random_normal);
output_connector &output() { return output_at(0); }
float mean() const noexcept { return mean_; }
float std() const noexcept { return std_; }
float seed() const noexcept { return seed_; }
random_normal(datatype_t output_type, shape_t output_shape, float mean, float std, float seed);
protected:
bool properties_equal(node &other) const override;
private:
float mean_;
float std_;
float seed_;
};
}

View File

@ -1,40 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
namespace nncase::ir
{
class NNCASE_API random_uniform : public node
{
public:
DEFINE_NODE_OPCODE(op_random_uniform);
output_connector &output() { return output_at(0); }
float low() const noexcept { return low_; }
float high() const noexcept { return high_; }
float seed() const noexcept { return seed_; }
random_uniform(datatype_t output_type, shape_t output_shape, float low, float high, float seed);
protected:
bool properties_equal(node &other) const override;
private:
float low_;
float high_;
float seed_;
};
}

View File

@ -1,45 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API reduce : public node
{
public:
DEFINE_NODE_OPCODE(op_reduce);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
reduce_op_t reduce_op() const noexcept { return reduce_op_; }
const axis_t &axis() const noexcept { return axis_; }
float init_value() const noexcept { return init_value_; }
bool keep_dims() const noexcept { return keep_dims_; }
reduce(reduce_op_t reduce_op, shape_t input_shape, axis_t axis, float init_value, bool keep_dims);
protected:
bool properties_equal(node &other) const override;
private:
reduce_op_t reduce_op_;
axis_t axis_;
float init_value_;
bool keep_dims_;
};
}

View File

@ -1,44 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
namespace nncase::ir
{
class NNCASE_API reduce_arg : public node
{
public:
DEFINE_NODE_OPCODE(op_reduce_arg);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
reduce_arg_op_t reduce_arg_op() const noexcept { return reduce_arg_op_; }
int32_t axis() const noexcept { return axis_; }
bool keep_dims() const noexcept { return keep_dims_; }
bool select_last_index() const noexcept { return select_last_index_; }
reduce_arg(reduce_arg_op_t op, datatype_t input_type, shape_t input_shape, datatype_t output_type, int32_t axis, bool keep_dims = true, bool select_last_index = false);
protected:
bool properties_equal(node &other) const override;
private:
reduce_arg_op_t reduce_arg_op_;
int32_t axis_;
bool keep_dims_;
bool select_last_index_;
};
}

View File

@ -1,40 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
namespace nncase::ir
{
class NNCASE_API reduce_prod : public node
{
public:
DEFINE_NODE_OPCODE(op_reduce_prod);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
const axis_t &axis() const noexcept { return axis_; }
bool keep_dims() const noexcept { return keep_dims_; }
reduce_prod(shape_t input_shape, axis_t axis, bool keep_dims);
protected:
bool properties_equal(node &other) const override;
private:
axis_t axis_;
bool keep_dims_;
};
}

View File

@ -1,67 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API reduce_window2d : public node
{
public:
DEFINE_NODE_OPCODE(op_reduce_window2d);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
reduce_op_t reduce_op() const noexcept { return reduce_op_; }
float init_value() const noexcept { return init_value_; }
int32_t filter_h() const noexcept { return filter_h_; }
int32_t filter_w() const noexcept { return filter_w_; }
padding padding_h() const noexcept { return padding_h_; }
padding padding_w() const noexcept { return padding_w_; }
int32_t stride_h() const noexcept { return stride_h_; }
int32_t stride_w() const noexcept { return stride_w_; }
int32_t dilation_h() const noexcept { return dilation_h_; }
int32_t dilation_w() const noexcept { return dilation_w_; }
value_range<float> fused_activation() const noexcept { return fused_activation_; }
bool ceil_mode() const noexcept { return ceil_mode_; }
bool count_include_pad() const noexcept { return count_include_pad_; }
std::vector<int32_t> padding_h_w_after() const noexcept { return padding_h_w_after_; }
bool strict_inside_input() const noexcept { return strict_inside_input_; }
reduce_window2d(reduce_op_t reduce_op, shape_t input_shape, float init_value, int32_t filter_h, int32_t filter_w, padding padding_h, padding padding_w, int32_t stride_h, int32_t stride_w, int32_t dilation_h, int32_t dilation_w, value_range<float> fused_activation, bool ceil_mode = false, bool count_include_pad = false, std::vector<int32_t> padding_h_w_after = { 0, 0 }, bool strict_inside_input = false);
protected:
bool properties_equal(node &other) const override;
private:
reduce_op_t reduce_op_;
float init_value_;
int32_t filter_h_;
int32_t filter_w_;
padding padding_h_;
padding padding_w_;
int32_t stride_h_;
int32_t stride_w_;
int32_t dilation_h_;
int32_t dilation_w_;
value_range<float> fused_activation_;
bool ceil_mode_;
bool count_include_pad_;
std::vector<int32_t> padding_h_w_after_;
bool strict_inside_input_;
};
}

View File

@ -1,45 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API resize_image : public node
{
public:
DEFINE_NODE_OPCODE(op_resize_image);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
const std::array<int32_t, 2> &new_size() const noexcept { return new_size_; }
image_resize_mode_t mode() const noexcept { return mode_; }
bool align_corners() const noexcept { return align_corners_; }
bool half_pixel_centers() const noexcept { return half_pixel_centers_; }
resize_image(datatype_t type, image_resize_mode_t mode, shape_t input_shape, std::array<int32_t, 2> new_size,
bool align_corners = false, bool half_pixel_centers = false);
protected:
bool properties_equal(node &other) const override;
private:
std::array<int32_t, 2> new_size_;
image_resize_mode_t mode_;
bool align_corners_;
bool half_pixel_centers_;
};
}

View File

@ -1,46 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
namespace nncase::ir
{
class NNCASE_API roi_align : public node
{
public:
DEFINE_NODE_OPCODE(op_roi_align);
input_connector &input() { return input_at(0); }
input_connector &rois() { return input_at(1); }
input_connector &batch_indices() { return input_at(2); }
output_connector &output() { return output_at(0); }
roi_align_mode_t mode() const noexcept { return mode_; }
const float &spatial_scale() const noexcept { return spatial_scale_; }
const int64_t &sampling_ratio() const noexcept { return sampling_ratio_; }
roi_align(datatype_t input_type, shape_t input_shape, shape_t rois, shape_t batch_indices, roi_align_mode_t mode,
float spatial_scale, int64_t output_height, int64_t output_width, int64_t sampling_ratio);
protected:
bool properties_equal(node &other) const override;
private:
roi_align_mode_t mode_;
float spatial_scale_;
int64_t sampling_ratio_;
};
}

View File

@ -1,35 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
namespace nncase::ir
{
class NNCASE_API sigmoid : public node
{
public:
DEFINE_NODE_OPCODE(op_sigmoid);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
sigmoid(datatype_t input_type, shape_t input_shape);
protected:
bool properties_equal(node &other) const override;
private:
};
}

View File

@ -1,52 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API slice : public node
{
public:
DEFINE_NODE_OPCODE(op_slice);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
const axis_t &begin() const noexcept { return begin_; }
const axis_t &end() const noexcept { return end_; }
const axis_t &strides() const noexcept { return strides_; }
int32_t begin_mask() const noexcept { return begin_mask_; }
int32_t end_mask() const noexcept { return end_mask_; }
int32_t ellipsis_mask() const noexcept { return ellipsis_mask_; }
int32_t new_axis_mask() const noexcept { return new_axis_mask_; }
slice(datatype_t type, shape_t input_shape, axis_t begin, axis_t end);
slice(datatype_t type, shape_t input_shape, axis_t begin, axis_t end, axis_t strides, int32_t begin_mask, int32_t end_mask, int32_t ellipsis_mask, int32_t new_axis_mask);
protected:
bool properties_equal(node &other) const override;
private:
axis_t begin_;
axis_t end_;
axis_t strides_;
int32_t begin_mask_;
int32_t end_mask_;
int32_t ellipsis_mask_;
int32_t new_axis_mask_;
};
}

View File

@ -1,47 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xarray.hpp>
namespace nncase::ir
{
class NNCASE_API space_to_batch : public node
{
public:
DEFINE_NODE_OPCODE(op_space_to_batch);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
int32_t block_size_h() const noexcept { return block_size_h_; }
int32_t block_size_w() const noexcept { return block_size_w_; }
padding padding_h() const noexcept { return padding_h_; }
padding padding_w() const noexcept { return padding_w_; }
const scalar &pad_value() const noexcept { return pad_value_; }
space_to_batch(datatype_t input_type, shape_t input_shape, int32_t block_shape_h, int32_t block_shape_w, padding padding_h, padding padding_w, scalar pad_value);
protected:
bool properties_equal(node &other) const override;
private:
int32_t block_size_h_;
int32_t block_size_w_;
padding padding_h_;
padding padding_w_;
scalar pad_value_;
};
}

View File

@ -1,42 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
namespace nncase::ir
{
class NNCASE_API split : public node
{
public:
DEFINE_NODE_OPCODE(op_split);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
std::vector<size_t> indices_or_sections() const noexcept { return indices_or_sections_; }
int32_t axis() const noexcept { return axis_; }
bool is_indices() const noexcept { return is_indices_; }
split(datatype_t type, shape_t inputs_shape, std::vector<shape_t> outputs_shape, std::vector<size_t> indices_or_sections, int32_t axis, bool is_indices);
protected:
bool properties_equal(node &other) const override;
private:
std::vector<size_t> indices_or_sections_;
int32_t axis_;
bool is_indices_;
};
}

View File

@ -1,35 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API table_lookup1d : public node
{
public:
DEFINE_NODE_OPCODE(op_table_lookup1d);
input_connector &input() { return input_at(0); }
input_connector &table() { return input_at(1); }
output_connector &output() { return output_at(0); }
table_lookup1d(datatype_t type, shape_t input_shape, size_t table_size);
protected:
bool properties_equal([[maybe_unused]] node &other) const override { return true; }
};
}

View File

@ -1,42 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API take : public node
{
public:
DEFINE_NODE_OPCODE(op_take);
input_connector &input() { return input_at(0); }
input_connector &indices() { return input_at(1); }
output_connector &output() { return output_at(0); }
int32_t axis() const noexcept { return axis_; }
const std::string &mode() const noexcept { return mode_; }
take(datatype_t type, shape_t input_shape, shape_t indices_shape, shape_t output_shape, int32_t axis, std::string mode);
protected:
bool properties_equal(node &other) const override;
private:
int32_t axis_;
std::string mode_;
};
}

View File

@ -1,38 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API ternary : public node
{
public:
DEFINE_NODE_OPCODE(op_ternary);
input_connector &input_a() { return input_at(0); }
input_connector &input_b() { return input_at(1); }
input_connector &input_c() { return input_at(2); }
output_connector &output() { return output_at(0); }
ternary(datatype_t input_a_type, datatype_t input_bc_type, shape_t input_a_shape, shape_t input_b_shape, shape_t input_c_shape);
protected:
bool properties_equal(node &other) const override;
private:
};
}

View File

@ -1,49 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
namespace nncase::ir
{
class NNCASE_API topk : public node
{
public:
DEFINE_NODE_OPCODE(op_topk);
input_connector &input() { return input_at(0); }
// output largest values
output_connector &output_a() { return output_at(0); }
// output indices of largest values
output_connector &output_b() { return output_at(1); }
const int64_t &k() const noexcept { return k_; }
const int32_t &axis() const noexcept { return axis_; }
bool largest() const noexcept { return largest_; }
bool sorted() const noexcept { return sorted_; }
topk(datatype_t input_type, shape_t input_shape, int64_t k, int32_t axis, bool largest, bool sorted);
protected:
bool properties_equal(node &other) const override;
private:
int64_t k_;
int32_t axis_;
bool largest_;
bool sorted_;
};
}

View File

@ -1,39 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API transpose : public node
{
public:
DEFINE_NODE_OPCODE(op_transpose);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
const axis_t &perm() const noexcept { return perm_; }
transpose(datatype_t type, shape_t input_shape, axis_t perm);
protected:
bool properties_equal(node &other) const override;
private:
axis_t perm_;
};
}

View File

@ -1,40 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
namespace nncase::ir
{
class NNCASE_API trilu : public node
{
public:
DEFINE_NODE_OPCODE(op_trilu);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
bool upper() const noexcept { return upper_; }
const int64_t &k() const noexcept { return k_; }
trilu(datatype_t input_type, shape_t input_shape, bool upper, int64_t k);
protected:
bool properties_equal(node &other) const override;
private:
bool upper_;
int64_t k_;
};
}

View File

@ -1,39 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../node.h"
#include <xtensor/xtensor.hpp>
namespace nncase::ir
{
class NNCASE_API unary : public node
{
public:
DEFINE_NODE_OPCODE(op_unary);
input_connector &input() { return input_at(0); }
output_connector &output() { return output_at(0); }
unary_op_t unary_op() const noexcept { return unary_op_; }
unary(unary_op_t unary_op, shape_t input_shape);
protected:
bool properties_equal(node &other) const override;
private:
unary_op_t unary_op_;
};
}

View File

@ -1,90 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "node.h"
namespace nncase::ir
{
class NNCASE_API input_node : public node
{
public:
DEFINE_NODE_OPCODE(op_input_node);
output_connector &output() { return output_at(0); }
template <class TShape>
input_node(datatype_t type, TShape &&shape)
{
add_output("output", type, std::forward<TShape>(shape), mem_input)
.attributes(cnctr_attr_no_layout_strides);
}
protected:
bool properties_equal([[maybe_unused]] node &other) const override { return true; }
};
class NNCASE_API output_node : public node
{
public:
DEFINE_NODE_OPCODE(op_output_node);
input_connector &input() { return input_at(0); }
template <class TShape>
output_node(datatype_t type, TShape &&shape)
{
attributes(attributes() | node_attr_skip_constant_folding);
add_input("input", type, std::forward<TShape>(shape));
}
protected:
bool properties_equal([[maybe_unused]] node &other) const override { return true; }
};
class NNCASE_API ignore_node : public node
{
public:
DEFINE_NODE_OPCODE(op_ignore_node);
~ignore_node() = default;
input_connector &input() { return input_at(0); }
template <class TShape>
ignore_node(datatype_t type, TShape &&shape)
{
add_input("input", type, std::forward<TShape>(shape));
}
protected:
bool properties_equal([[maybe_unused]] node &other) const override { return true; }
};
class NNCASE_API uninitialized : public node
{
public:
DEFINE_NODE_OPCODE(op_uninitialized);
output_connector &output() { return output_at(0); }
template <class TShape>
uninitialized(datatype_t type, TShape &&shape, memory_location_t memory_location = mem_data)
{
add_output("output", type, std::forward<TShape>(shape), memory_location);
}
protected:
bool properties_equal([[maybe_unused]] node &other) const override { return true; }
};
}

View File

@ -1,155 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert>
#include <nncase/ir/graph.h>
#include <unordered_map>
#include <unordered_set>
namespace nncase::ir
{
enum class quantize_stage
{
collect_range,
collect_distribution,
finish
};
enum class calibrate_method
{
no_clip,
l2,
kld_m0,
kld_m1,
kld_m2,
cdf
};
class NNCASE_API quantizer
{
class histogram
{
public:
histogram(value_range<float> range, size_t src_bins, size_t dest_bins, calibrate_method cali_method);
void record(std::span<const float> data);
void record(std::span<const bfloat16> data);
void record(std::span<const half> data);
void finish();
value_range<float> optimal_range() const noexcept { return optimal_range_; }
private:
std::vector<float> src_bins_;
std::vector<float> dest_bins_;
value_range<float> range_;
float src_bin_interval_;
value_range<float> optimal_range_;
calibrate_method cali_method_;
};
public:
quantizer(calibrate_method cali_method, size_t bins);
template <class TIt>
static value_range<float> get_range(TIt begin, TIt end)
{
using value_t = std::decay_t<decltype(*begin)>;
auto min = std::numeric_limits<value_t>::max();
auto max = std::numeric_limits<value_t>::lowest();
while (begin != end)
{
auto value = *begin++;
auto fc = std::fpclassify((float)value);
if (fc == FP_NORMAL || fc == FP_SUBNORMAL || fc == FP_ZERO)
{
min = std::min(min, value);
max = std::max(max, value);
}
}
return { min, max };
}
static value_range<float> fixup_range(value_range<float> range, bool symmetric = false)
{
if (symmetric)
{
auto r = std::max({ std::abs(range.min), std::abs(range.max), 0.01f });
return { -r, r };
}
else
{
if (range.max < 0)
range.max = 0;
if (range.min > 0)
range.min = 0;
auto r = range.max - range.min;
if (r == 0)
r = 0.1f;
else if (r < 0.01f)
r = 0.01f;
range.max = range.min + r;
}
return range;
}
enum class quant_mode
{
unsigned_mode,
signed_symmetric_mode,
signed_asymmetric_mode
};
static quant_param_t get_quant_param(value_range<float> range, int32_t bits, quant_mode qm);
static fixed_mul get_fixed_mul(float value, int32_t max_bits, uint8_t max_shift, bool is_signed);
void record(ir::output_connector &connector, value_range<float> range);
void set(ir::output_connector &connector, value_range<float> range);
bool has_record(ir::output_connector &connector) const;
void record(ir::output_connector &connector, std::span<const float> data);
void record(ir::output_connector &connector, std::span<const bfloat16> data);
void record(ir::output_connector &connector, std::span<const half> data);
void record_buffers(ir::output_connector &connector, std::span<const float> data);
void record_buffers(ir::output_connector &connector, std::span<const bfloat16> data);
void record_buffers(ir::output_connector &connector, std::span<const half> data);
void record_quant_buffers(ir::output_connector &connector, std::span<const float> data);
void record_quant_buffers(ir::output_connector &connector, std::span<const bfloat16> data);
void record_quant_buffers(ir::output_connector &connector, std::span<const half> data);
value_range<float> get(ir::output_connector &connector) const;
void broadcast_output(ir::graph &graph, const std::unordered_set<node_opcode> &ops);
void broadcast_output(ir::node &node, const value_range<float> &range, const std::unordered_set<node_opcode> &ops);
void begin_collect_distribution();
void end_collect_distribution(std::function<void(size_t cnt, size_t total)> progress);
size_t histograms_count() const noexcept { return histograms_.size(); }
void end_sample() { has_record_.clear(); }
std::unordered_map<ir::output_connector *, std::vector<float>> output_buffers() const noexcept { return output_buffers_; }
std::vector<ir::output_connector *> quant_buffers_insert_order() const noexcept { return quant_buffers_insert_order_; }
std::unordered_map<ir::output_connector *, value_range<float>> ranges() const noexcept { return quant_ranges_; }
std::vector<ir::output_connector *> ranges_insert_order() const noexcept { return ranges_insert_order_; }
private:
calibrate_method cali_method_;
quantize_stage stage_ = quantize_stage::collect_range;
const size_t bins_;
std::unordered_map<ir::output_connector *, value_range<float>> quant_ranges_;
std::unordered_map<ir::output_connector *, histogram> histograms_;
std::unordered_map<ir::output_connector *, bool> has_record_;
std::unordered_map<ir::output_connector *, std::vector<float>> output_buffers_;
std::vector<ir::output_connector *> quant_buffers_insert_order_;
std::vector<ir::output_connector *> ranges_insert_order_;
};
}

View File

@ -1,51 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "ir_types.h"
namespace nncase::ir
{
template <class T = size_t, class U>
itlib::small_vector<T, 4> to(const xt::dynamic_shape<U> &in_shape, [[maybe_unused]] T default_val = 1)
{
itlib::small_vector<T, 4> r_in_shape(in_shape.size());
for (size_t i = 0; i < in_shape.size(); i++)
r_in_shape[i] = (T)in_shape[i];
return r_in_shape;
}
inline itlib::small_vector<padding, 4> to(const xt::svector<padding> &paddings)
{
itlib::small_vector<padding, 4> result(paddings.size());
for (size_t i = 0; i < paddings.size(); i++)
result[i] = paddings[i];
return result;
}
inline void extend_transpose_shape(const shape_t &in_shape, const axis_t &perm, runtime_shape_t &r_in_shape, runtime_shape_t &r_perm)
{
r_in_shape = to(in_shape);
r_perm.resize(perm.size());
for (size_t i = 0; i < perm.size(); i++)
{
auto value = perm[i];
if (value < 0)
r_perm[i] = (size_t)((int32_t)perm.size() + value);
else
r_perm[i] = (size_t)value;
}
}
}

View File

@ -1,159 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "graph.h"
#include <span>
#include <unordered_set>
namespace nncase::ir
{
class NNCASE_API ir_visitor
{
public:
void visit(graph &graph);
void visit(std::span<output_node *> outputs);
bool visited(node &node) const noexcept;
protected:
void mark_visit(node &node);
virtual bool visit_strategy(node &node) = 0;
virtual bool visit(node &node);
private:
std::unordered_set<node *> visited_;
};
class NNCASE_API dfs_ir_pre_order_visitor : public ir_visitor
{
protected:
virtual bool visit_strategy(node &node) final override;
private:
};
class NNCASE_API dfs_ir_post_order_visitor : public ir_visitor
{
protected:
virtual bool visit_strategy(node &node) final override;
private:
};
class NNCASE_API bfs_ir_pre_order_visitor : public ir_visitor
{
protected:
virtual bool visit_strategy(node &node) final override;
private:
};
template <class TBaseVisitor, class TVisitor>
class relay_ir_visitor : public TBaseVisitor
{
public:
using TBaseVisitor::visit;
relay_ir_visitor(TVisitor &&visitor)
: visitor_(std::forward<TVisitor>(visitor))
{
}
protected:
virtual bool visit(node &node)
{
constexpr auto is_void = std::is_void_v<decltype(visitor_(node))>;
if constexpr (is_void)
{
visitor_(node);
return false;
}
else
{
return visitor_(node);
}
}
private:
private:
TVisitor visitor_;
};
template <class TBaseVisitor = dfs_ir_post_order_visitor, class TVisitor>
auto make_relay_ir_visitor(TVisitor &&visitor)
{
return relay_ir_visitor<TBaseVisitor, TVisitor>(std::forward<TVisitor>(visitor));
}
template <class TNode, class = std::enable_if_t<std::is_base_of_v<node, TNode>>>
TNode *try_get_direct_child(node &node)
{
for (auto out : node.outputs())
{
for (auto conn : out->connections())
{
if (conn->owner().runtime_opcode() == TNode::opcode())
return static_cast<TNode *>(&conn->owner());
}
}
return nullptr;
}
template <class TNode, class = std::enable_if_t<std::is_base_of_v<node, TNode>>>
TNode *try_get_direct_parent(node &node)
{
for (auto in : node.inputs())
{
if (in->connection() && in->connection()->owner().runtime_opcode() == TNode::opcode())
return static_cast<TNode *>(&in->connection()->owner());
}
return nullptr;
}
template <class TNode, class = std::enable_if_t<std::is_base_of_v<node, TNode>>>
TNode *try_get_direct_parent(node &node, size_t index)
{
if (index < node.inputs().size())
{
auto &in = node.input_at(index);
if (in.connection() && in.connection()->owner().runtime_opcode() == TNode::opcode())
return static_cast<TNode *>(&in.connection()->owner());
}
return nullptr;
}
template <class TNode, class = std::enable_if_t<std::is_base_of_v<node, TNode>>>
TNode *node_cast(node &node)
{
if (node.runtime_opcode() == TNode::opcode())
return static_cast<TNode *>(&node);
return nullptr;
}
inline size_t get_input_index(node &n, output_connector &input)
{
for (size_t i = 0; i < n.inputs().size(); i++)
{
if (n.input_at(i).connection() == &input)
return i;
}
throw std::out_of_range("Input connection not found");
}
}

View File

@ -1,146 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <nncase/kernels/kernel_context.h>
#include <nncase/runtime/datatypes.h>
#include <nncase/runtime/error.h>
#include <nncase/runtime/result.h>
BEGIN_NS_NNCASE_KERNELS
NNCASE_API result<void> batch_to_space(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape,
const runtime_shape_t &block_shape, const runtime_paddings_t &crops, const runtime_shape_t &in_strides, const runtime_shape_t &out_strides,
kernel_context &context = default_kernel_context()) noexcept;
NNCASE_API result<void> broadcast(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_shape, const runtime_shape_t &out_strides, kernel_context &context = default_kernel_context()) noexcept;
NNCASE_API result<void> concat(datatype_t type, gsl::span<const gsl::byte *const> inputs, gsl::byte *output, const runtime_shape_t &out_shape,
gsl::span<const runtime_shape_t> in_strides, const runtime_shape_t &out_strides, size_t axis, const runtime_shape_t &concat_dims,
kernel_context &context = default_kernel_context()) noexcept;
NNCASE_API result<void> convert(datatype_t in_type, datatype_t out_type, const gsl::byte *input, gsl::byte *output,
const runtime_shape_t &in_shape, const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, kernel_context &context = default_kernel_context()) noexcept;
NNCASE_API result<void> copy(datatype_t type, const gsl::byte *src, gsl::byte *dest,
const runtime_shape_t &shape, const runtime_shape_t &src_strides, const runtime_shape_t &dest_strides, kernel_context &context = default_kernel_context()) noexcept;
NNCASE_API result<void> transpose(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape,
const runtime_shape_t &perm, const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, kernel_context &context = default_kernel_context()) noexcept;
NNCASE_API result<void> binary(binary_op_t op, const float *input_a, const float *input_b, float *output,
const runtime_shape_t &in_a_shape, const runtime_shape_t &in_a_strides, const runtime_shape_t &in_b_shape,
const runtime_shape_t &in_b_strides, const runtime_shape_t &out_strides, value_range<float> fused_activation, kernel_context &context = default_kernel_context()) noexcept;
NNCASE_API result<void> dequantize(datatype_t in_type, datatype_t out_type, const gsl::byte *input, gsl::byte *output,
const runtime_shape_t &in_shape, const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, float scale, float bias,
kernel_context &context = default_kernel_context()) noexcept;
template <typename T>
NNCASE_API result<void> equal(const T *input_a, const T *input_b, bool *output,
const runtime_shape_t &in_a_shape, const runtime_shape_t &in_a_strides, const runtime_shape_t &in_b_shape,
const runtime_shape_t &in_b_strides, const runtime_shape_t &out_strides) noexcept;
NNCASE_API result<void> lut1d(datatype_t type, const gsl::byte *input, const gsl::byte *table, gsl::byte *output, const runtime_shape_t &shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, const scalar &min, const scalar &max) noexcept;
template <typename T>
NNCASE_API result<void> matmul(const T *input_a, const T *input_b, const T *bias, T *output,
const runtime_shape_t &in_a_shape, const runtime_shape_t &in_b_shape, value_range<float> fused_activation) noexcept;
NNCASE_API result<void> onehot(datatype_t type, const int32_t *indices, gsl::byte *output, const runtime_shape_t &indices_shape, const runtime_shape_t &out_shape,
const runtime_shape_t &out_strides, gsl::byte *depth, gsl::byte *off_value, gsl::byte *on_value, size_t axis, onehot_mode_t mode,
kernel_context &context = default_kernel_context()) noexcept;
NNCASE_API result<void> pad(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, const runtime_paddings_t &paddings, pad_mode_t mode, const scalar &pad_value,
kernel_context &context = default_kernel_context()) noexcept;
NNCASE_API result<void> quantize(datatype_t in_type, datatype_t out_type, const gsl::byte *input, gsl::byte *output,
const runtime_shape_t &in_shape, const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, float scale, float bias,
kernel_context &context = default_kernel_context()) noexcept;
NNCASE_API result<void> unary(unary_op_t op, const float *input, float *output, const runtime_shape_t &shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, kernel_context &context = default_kernel_context()) noexcept;
NNCASE_API result<void> reduce(reduce_op_t op, float init_value, const float *input, float *output, const runtime_shape_t &in_shape, const runtime_shape_t &axis,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, bool keep_dims, kernel_context &context = default_kernel_context()) noexcept;
template <typename T>
NNCASE_API result<void> reduce_arg(reduce_arg_op_t op, const float *input, T *output, const runtime_shape_t &in_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides,
const runtime_shape_t &axis, bool keep_dims, bool select_last_index, kernel_context &context = default_kernel_context()) noexcept;
template <typename T>
result<void> reduce_prod(const T *input, T *output, const runtime_shape_t &in_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides,
const runtime_shape_t &axes, bool keep_dims) noexcept;
NNCASE_API result<void> resize_bilinear(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, int32_t out_h, int32_t out_w, bool align_corners, bool half_pixel_centers,
kernel_context &context = default_kernel_context()) noexcept;
NNCASE_API result<void> resize_nearest_neighbor(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, int32_t out_h, int32_t out_w, bool align_corners, bool half_pixel_centers,
kernel_context &context = default_kernel_context()) noexcept;
NNCASE_API result<void> slice(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, const runtime_shape_t &begins, const runtime_axis_t &ends, const runtime_axis_t &strides,
kernel_context &context = default_kernel_context()) noexcept;
NNCASE_API result<void> gather(datatype_t in_type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape, const runtime_shape_t &out_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, const int32_t *indices, const runtime_shape_t &indices_shape, int32_t axis, kernel_context &context = default_kernel_context()) noexcept;
NNCASE_API result<void> gather_nd(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape, const runtime_shape_t &out_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, const int32_t *indices, const runtime_shape_t &indices_shape, int32_t batch_dims, kernel_context &context = default_kernel_context()) noexcept;
template <typename T>
NNCASE_API result<void> cumsum(const T *input, T *output, const runtime_shape_t &in_shape,
int32_t axis, bool exclusive, bool reverse) noexcept;
template <typename T>
NNCASE_API result<void> hardmax(const T *input, const runtime_shape_t &in_shape, const runtime_shape_t &in_strides,
T *output, int32_t axis) noexcept;
template <typename T>
NNCASE_API result<void> random_normal(T *output, const runtime_shape_t &out_shape, float mean, float std, float seed) noexcept;
template <typename T>
NNCASE_API result<void> random_uniform(T *output, const runtime_shape_t &out_shape, float low, float high, float seed) noexcept;
template <typename T>
NNCASE_API result<void> roi_align(const T *input, const T *rois, int64_t *batch_indices, T *output, const runtime_shape_t &in_shape,
const runtime_shape_t &out_shape, roi_align_mode_t mode, float spatial_scale, int64_t sampling_ratio) noexcept;
template <typename T>
NNCASE_API result<void> sigmoid(const T *input, T *output, const runtime_shape_t &in_shape, const runtime_shape_t &in_strides) noexcept;
template <typename T>
NNCASE_API result<void> ternary(const float *input_a, const T *input_b, const T *input_c, T *output,
const runtime_shape_t &in_a_shape, const runtime_shape_t &in_a_strides, const runtime_shape_t &in_b_shape,
const runtime_shape_t &in_b_strides, const runtime_shape_t &in_c_shape, const runtime_shape_t &in_c_strides,
const runtime_shape_t &out_strides) noexcept;
template <typename T>
NNCASE_API result<void> topk(const T *input, T *output_values, int64_t *output_indices,
const runtime_shape_t &in_shape, const runtime_shape_t &in_strides,
const runtime_shape_t &output_values_shape, const runtime_shape_t &output_values_strides,
const runtime_shape_t &output_indices_shape, const runtime_shape_t &output_indices_strides,
const int64_t k, const int32_t axis, const bool largest, const bool sorted) noexcept;
template <typename T>
NNCASE_API result<void> trilu(const T *input, T *output, const runtime_shape_t &in_shape, const bool upper, const int64_t k) noexcept;
END_NS_NNCASE_KERNELS

View File

@ -1,27 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <memory>
#include <nncase/targets/target.h>
#include <string_view>
namespace nncase::plugin_loader
{
typedef target *(*target_activator_t)();
#define TARGET_ACTIVATOR_NAME create_target
NNCASE_API std::unique_ptr<target> create_target(std::string_view name);
}

View File

@ -1,12 +0,0 @@
DEFINE_DATATYPE(int8, int8_t, i8, 0x00)
DEFINE_DATATYPE(int16, int16_t, i16, 0x01)
DEFINE_DATATYPE(int32, int32_t, i32, 0x02)
DEFINE_DATATYPE(int64, int64_t, i64, 0x03)
DEFINE_DATATYPE(uint8, uint8_t, u8, 0x04)
DEFINE_DATATYPE(uint16, uint16_t, u16, 0x05)
DEFINE_DATATYPE(uint32, uint32_t, u32, 0x06)
DEFINE_DATATYPE(uint64, uint64_t, u64, 0x07)
DEFINE_DATATYPE(float16, half, f16, 0x08)
DEFINE_DATATYPE(float32, float, f32, 0x09)
DEFINE_DATATYPE(float64, double, f64, 0x0A)
DEFINE_DATATYPE(bfloat16, bfloat16, bf16, 0x0B)

View File

@ -1,524 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "bfloat16.h"
#include "compiler_defs.h"
#include "half.h"
#include "small_vector.hpp"
#include <array>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <limits>
#include <numeric>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <vector>
namespace nncase
{
typedef enum _datatype : uint8_t
{
#define DEFINE_DATATYPE(id, t, name, value) dt_##id = value,
#include "datatypes.def"
#undef DEFINE_DATATYPE
} datatype_t;
inline datatype_t parse_datatype_str(const std::string &name)
{
#define DEFINE_DATATYPE(id, t, n, value) \
if (name == #id) \
return datatype_t::dt_##id;
#include "datatypes.def"
#undef DEFINE_DATATYPE
throw std::runtime_error("Unsupported data type:" + std::string(name));
}
namespace detail
{
template <datatype_t Type>
struct datatype_to_cpp_type
{
};
template <class T>
struct cpp_type_to_datatype
{
};
#if NNCASE_HAVE_STD_BYTE
template <>
struct cpp_type_to_datatype<std::byte>
{
static constexpr datatype_t type = dt_uint8;
};
#endif
#define DEFINE_DATATYPE(id, t, name, value) \
template <> \
struct datatype_to_cpp_type<dt_##id> \
{ \
using type = t; \
}; \
template <> \
struct cpp_type_to_datatype<t> \
{ \
static constexpr datatype_t type = dt_##id; \
};
#include "datatypes.def"
#undef DEFINE_DATATYPE
inline constexpr size_t datatype_bytes(datatype_t type)
{
switch (type)
{
#define DEFINE_DATATYPE(id, t, name, value) \
case (dt_##id): \
return sizeof(t);
#include "datatypes.def"
#undef DEFINE_DATATYPE
default:
return -1;
}
}
}
template <class T>
constexpr datatype_t to_datatype() noexcept
{
return detail::cpp_type_to_datatype<T>::type;
}
template <datatype_t Type>
using to_cpp_type_t = typename detail::datatype_to_cpp_type<Type>::type;
struct padding
{
int32_t before;
int32_t after;
int32_t interior = 0;
int32_t sum() const noexcept { return before + after; }
static padding zero() noexcept { return {}; }
};
template <class T>
struct value_range
{
T min;
T max;
static constexpr value_range<T> full() noexcept
{
if (std::is_floating_point<T>::value || std::is_same<T, bfloat16>::value || std::is_same<T, half>::value)
return { -std::numeric_limits<T>::infinity(), std::numeric_limits<T>::infinity() };
else
return { std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max() };
}
static constexpr value_range<T> nonnegative() noexcept
{
return { 0, std::numeric_limits<T>::max() };
}
constexpr T length() const noexcept { return max - min; }
};
typedef enum _reduce_op
{
reduce_mean,
reduce_min,
reduce_max,
reduce_sum
} reduce_op_t;
inline std::string reduce_op_to_string(reduce_op_t op)
{
switch (op)
{
case reduce_mean:
return "reduce_mean";
case reduce_min:
return "reduce_min";
case reduce_max:
return "reduce_max";
case reduce_sum:
return "reduce_sum";
}
return "unknown";
}
typedef enum _reduce_arg_op
{
reduce_arg_min,
reduce_arg_max,
} reduce_arg_op_t;
inline std::string reduce_arg_op_to_string(reduce_arg_op_t op)
{
switch (op)
{
case reduce_arg_min:
return "reduce_arg_min";
case reduce_arg_max:
return "reduce_arg_max";
}
return "unknown reduce arg op";
}
typedef enum _binary_op
{
binary_add,
binary_sub,
binary_mul,
binary_div,
binary_min,
binary_max,
binary_pow,
binary_floor_div,
binary_floor_mod,
binary_bitwise_and,
binary_bitwise_or,
binary_bitwise_xor,
binary_logical_and,
binary_logical_or,
binary_logical_xor
} binary_op_t;
inline std::string binary_op_to_string(binary_op_t op)
{
switch (op)
{
case binary_add:
return "binary_add";
case binary_sub:
return "binary_sub";
case binary_mul:
return "binary_mul";
case binary_div:
return "binary_div";
case binary_min:
return "binary_min";
case binary_max:
return "binary_max";
case binary_pow:
return "binary_pow";
case binary_floor_div:
return "binary_floor_div";
case binary_floor_mod:
return "binary_floor_mod";
case binary_bitwise_and:
return "binary_bitwise_and";
case binary_bitwise_or:
return "binary_bitwise_or";
case binary_bitwise_xor:
return "binary_bitwise_xor";
case binary_logical_and:
return "binary_logical_and";
case binary_logical_or:
return "binary_logical_or";
case binary_logical_xor:
return "binary_logical_xor";
}
return "unknown";
}
typedef enum _unary_op
{
unary_abs,
unary_acos,
unary_asin,
unary_ceil,
unary_cos,
unary_exp,
unary_floor,
unary_log,
unary_neg,
unary_round,
unary_rsqrt,
unary_sign,
unary_sin,
unary_sqrt,
unary_square,
unary_tanh,
unary_bitwise_not,
unary_logical_not
} unary_op_t;
inline std::string unary_op_to_string(unary_op_t op)
{
switch (op)
{
case unary_abs:
return "unary_abs";
case unary_acos:
return "unary_acos";
case unary_asin:
return "unary_asin";
case unary_ceil:
return "unary_ceil";
case unary_cos:
return "unary_cos";
case unary_exp:
return "unary_exp";
case unary_floor:
return "unary_floor";
case unary_log:
return "unary_log";
case unary_neg:
return "unary_neg";
case unary_round:
return "unary_round";
case unary_rsqrt:
return "unary_rsqrt";
case unary_sign:
return "unary_sign";
case unary_sin:
return "unary_sin";
case unary_sqrt:
return "unary_sqrt";
case unary_square:
return "unary_square";
case unary_tanh:
return "unary_tanh";
case unary_bitwise_not:
return "unary_bitwise_not";
case unary_logical_not:
return "unary_logical_not";
}
return "unknown";
}
typedef enum _image_resize_mode
{
image_resize_bilinear,
image_resize_nearest_neighbor
} image_resize_mode_t;
typedef enum _onehot_mode
{
onehot_normal,
onehot_process_neg
} onehot_mode_t;
typedef enum _pad_mode
{
pad_constant,
pad_reflect,
pad_symmetric,
pad_edge
} pad_mode_t;
typedef enum _roi_align_mode
{
roi_align_avg,
roi_align_max
} roi_align_mode_t;
typedef struct _quant_param
{
int32_t zero_point;
float scale;
template <class T>
constexpr value_range<float> range() const noexcept
{
return {
(std::numeric_limits<T>::lowest() - zero_point) * scale, (std::numeric_limits<T>::max() - zero_point) * scale
};
}
} quant_param_t;
inline bool operator==(const quant_param_t &lhs, const quant_param_t &rhs) noexcept
{
return lhs.zero_point == rhs.zero_point && lhs.scale == rhs.scale;
}
inline bool almost_equal(const quant_param_t &lhs, const quant_param_t &rhs) noexcept
{
return lhs.zero_point == rhs.zero_point
&& fabs(lhs.scale - rhs.scale) <= std::numeric_limits<float>::epsilon();
}
struct fixed_mul
{
float mul;
int8_t shift;
int32_t rounded_mul() const noexcept { return (int32_t)lrintf(mul); }
};
using memory_location_t = uint8_t;
NNCASE_INLINE_VAR constexpr memory_location_t mem_input = 0;
NNCASE_INLINE_VAR constexpr memory_location_t mem_output = 1;
NNCASE_INLINE_VAR constexpr memory_location_t mem_rdata = 2;
NNCASE_INLINE_VAR constexpr memory_location_t mem_data = 3;
NNCASE_INLINE_VAR constexpr memory_location_t mem_shared_data = 4;
NNCASE_INLINE_VAR constexpr memory_location_t mem_private_base = 64;
using runtime_shape_t = itlib::small_vector<size_t, 4>;
using runtime_axis_t = itlib::small_vector<int32_t, 4>;
using runtime_paddings_t = itlib::small_vector<padding, 4>;
struct scalar
{
datatype_t type;
std::aligned_storage_t<8> storage;
scalar() = default;
scalar(int8_t value) noexcept
{
type = dt_int8;
as<int8_t>() = value;
}
scalar(int16_t value) noexcept
{
type = dt_int16;
as<int16_t>() = value;
}
scalar(int32_t value) noexcept
{
type = dt_int32;
as<int32_t>() = value;
}
scalar(uint8_t value) noexcept
{
type = dt_uint8;
as<uint8_t>() = value;
}
scalar(uint16_t value) noexcept
{
type = dt_uint16;
as<uint16_t>() = value;
}
scalar(uint32_t value) noexcept
{
type = dt_uint32;
as<uint32_t>() = value;
}
scalar(bfloat16 value) noexcept
{
type = dt_bfloat16;
as<bfloat16>() = value;
}
scalar(half value) noexcept
{
type = dt_float16;
as<half>() = value;
}
scalar(float value) noexcept
{
type = dt_float32;
as<float>() = value;
}
template <class T>
T &as() noexcept { return *reinterpret_cast<T *>(&storage); }
template <class T>
const T &as() const noexcept { return *reinterpret_cast<const T *>(&storage); }
};
struct memory_range
{
memory_location_t memory_location;
datatype_t datatype;
uint16_t shared_module;
uint32_t start;
uint32_t size;
};
NNCASE_INLINE_VAR constexpr size_t MAX_MODULE_TYPE_LENGTH = 16;
typedef std::array<char, MAX_MODULE_TYPE_LENGTH> module_type_t;
template <std::size_t N, std::size_t... Is>
constexpr module_type_t
to_module_type(const char (&a)[N], std::index_sequence<Is...>)
{
return { { a[Is]... } };
}
template <std::size_t N>
constexpr module_type_t to_module_type(const char (&a)[N])
{
return to_module_type(a, std::make_index_sequence<N>());
}
inline padding operator+(const padding &lhs, const padding &rhs) noexcept
{
return { lhs.before + rhs.before, lhs.after + rhs.after };
}
inline bool operator==(const padding &lhs, const padding &rhs) noexcept
{
return lhs.before == rhs.before && lhs.after == rhs.after;
}
inline bool operator!=(const padding &lhs, const padding &rhs) noexcept
{
return lhs.before != rhs.before || lhs.after != rhs.after;
}
template <class T>
bool operator==(const value_range<T> &lhs, const value_range<T> &rhs) noexcept
{
return lhs.min == rhs.min && lhs.max == rhs.max;
}
template <class T>
bool operator!=(const value_range<T> &lhs, const value_range<T> &rhs) noexcept
{
return lhs.min != rhs.min || lhs.max != rhs.max;
}
inline bool operator==(const scalar &lhs, const scalar &rhs) noexcept
{
auto valid_bytes = detail::datatype_bytes(lhs.type);
return lhs.type == rhs.type && !memcmp(&lhs.storage, &rhs.storage, valid_bytes);
}
inline bool operator!=(const scalar &lhs, const scalar &rhs) noexcept
{
auto valid_bytes = detail::datatype_bytes(lhs.type);
return lhs.type != rhs.type || memcmp(&lhs.storage, &rhs.storage, valid_bytes);
}
}
template <>
struct std::hash<nncase::module_type_t>
{
auto operator()(const nncase::module_type_t &key) const noexcept
{
size_t result = 0;
const size_t prime = 31;
for (auto c : key)
result = c + (result * prime);
return result;
}
};

View File

@ -1,100 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "runtime_tensor_impl.h"
#include "shared_runtime_tensor.h"
BEGIN_NS_NNCASE_RUNTIME
namespace detail
{
enum class cache_status_t
{
valid,
need_invalidate,
need_write_back
};
struct host_memory_block
{
host_runtime_tensor::memory_pool_t pool;
uintptr_t virtual_address;
size_t size_bytes;
host_runtime_tensor::data_deleter_t deleter;
cache_status_t cache_status;
physical_memory_block physical_block;
host_memory_block() = default;
host_memory_block(const host_memory_block &) = delete;
host_memory_block(host_memory_block &&other) noexcept;
host_memory_block &operator=(const host_memory_block &) = delete;
host_memory_block &operator=(host_memory_block &&other) noexcept;
~host_memory_block()
{
free();
}
void free()
{
if (auto d = std::move(deleter))
d(reinterpret_cast<gsl::byte *>(virtual_address));
deleter = {};
physical_block.free(*this);
}
gsl::span<gsl::byte> virtual_buffer() const noexcept
{
return { reinterpret_cast<gsl::byte *>(virtual_address), size_bytes };
}
};
class NNCASE_API host_runtime_tensor_impl : public runtime_tensor_impl
{
public:
host_runtime_tensor_impl(datatype_t datatype, runtime_shape_t shape, runtime_shape_t strides, host_memory_block memory_block);
datatype_t datatype() const noexcept override;
const runtime_shape_t &shape() const noexcept override;
const runtime_shape_t &strides() const noexcept override;
runtime_tensor_type &tensor_type() const noexcept override;
const quant_param_t &quant_param() const noexcept override;
void quant_param(const quant_param_t &quant_param) noexcept override;
bool can_copy_from_different_type(const runtime_tensor_impl &src) const noexcept override;
bool can_copy_to_different_type(const runtime_tensor_impl &dest) const noexcept override;
result<void> copy_to_same_type(runtime_tensor_impl &dest) noexcept override;
result<void> copy_from_different_type(runtime_tensor_impl &src) noexcept override;
result<void> copy_to_different_type(runtime_tensor_impl &dest) noexcept override;
result<void> copy_from_host(runtime_tensor_impl &src) noexcept override;
result<void> copy_to_host(runtime_tensor_impl &dest) noexcept override;
result<host_runtime_tensor::mapped_buffer> map(host_runtime_tensor::map_access_t access) noexcept;
result<void> unmap(host_runtime_tensor::map_access_t access) noexcept;
result<void> sync(host_runtime_tensor::sync_op_t op, bool force = false) noexcept;
const host_memory_block &memory_block() const noexcept { return memory_block_; }
host_memory_block &memory_block() noexcept { return memory_block_; }
private:
datatype_t datatype_;
runtime_shape_t shape_;
runtime_shape_t strides_;
host_memory_block memory_block_;
quant_param_t quant_;
};
}
END_NS_NNCASE_RUNTIME

View File

@ -1,373 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "compiler_defs.h"
#include <functional>
#include <mpark/variant.hpp>
#include <system_error>
#include <type_traits>
namespace nncase
{
#define try_(x) \
{ \
auto v = (x); \
if (!v.is_ok()) \
return nncase::err(std::move(v.unwrap_err())); \
}
#define try_var(name, x) \
typename decltype((x))::traits::ok_type name; \
{ \
auto v = (x); \
if (v.is_ok()) \
name = std::move(v.unwrap()); \
else \
return nncase::err(std::move(v.unwrap_err())); \
}
#define try_var_err(name, x, e) \
typename decltype((x))::traits::ok_type name; \
{ \
auto v = (x); \
if (v.is_ok()) \
{ \
name = std::move(v.unwrap()); \
} \
else \
{ \
e = nncase::err(std::move(v.unwrap_err())); \
return; \
} \
}
#define try_set(name, x) \
{ \
auto v = (x); \
if (v.is_ok()) \
name = std::move(v.unwrap()); \
else \
return nncase::err(std::move(v.unwrap_err())); \
}
template <class T>
struct Ok
{
constexpr Ok(T &&value)
: value(std::move(value)) { }
constexpr Ok(const T &value)
: value(value) { }
template <class... Args>
constexpr explicit Ok(mpark::in_place_t, Args &&...args)
: value(std::forward<Args>(args)...) { }
T value;
};
template <>
struct Ok<void>
{
};
struct Err
{
template <class ErrCode, class = std::enable_if_t<std::is_error_condition_enum<ErrCode>::value>>
Err(ErrCode value)
: err(value) { }
Err(std::error_condition err)
: err(std::move(err)) { }
std::error_condition err;
};
inline constexpr Ok<void> ok()
{
return {};
}
template <class T, class... Args>
constexpr Ok<T> ok(Args &&...args)
{
return Ok<T>(mpark::in_place, std::forward<Args>(args)...);
}
template <class T>
constexpr Ok<std::decay_t<T>> ok(T &&value)
{
return Ok<std::decay_t<T>>(std::forward<T>(value));
}
inline Err err(std::error_condition value) noexcept
{
return Err(std::move(value));
}
template <class ErrCode, class = std::enable_if_t<std::is_error_condition_enum<ErrCode>::value>>
Err err(ErrCode value)
{
return err(std::error_condition(value));
}
template <class T>
class NNCASE_NODISCARD result;
namespace detail
{
template <class T>
NNCASE_INLINE_VAR bool constexpr is_result_v = false;
template <class T>
NNCASE_INLINE_VAR bool constexpr is_result_v<result<T>> = true;
template <class T>
struct result_traits
{
static_assert(!is_result_v<T>, "Cannot use nested result");
using ok_type = T;
};
template <class T, class U, class Func>
class map_call_impl
{
result<U> operator()(Func &&func, Ok<T> &value) noexcept
{
return ok(func(value.value));
}
};
template <class T, class Func>
struct map_traits;
template <class U, class Func>
class map_call_void_impl
{
result<U> operator()(Func &&func) noexcept
{
return ok(func());
}
};
template <class Func>
struct map_traits<void, Func>
{
using U = invoke_result_t<Func>;
static_assert(!is_result_v<U>, "Cannot map a callback returning result, use and_then instead");
result<U> operator()(Func &&func, NNCASE_UNUSED Ok<void> &value) noexcept
{
return map_call_void_impl<U, Func>()(std::forward<Func>(func));
}
};
template <class T, class Func>
struct map_err_traits;
template <class T, class Func>
struct and_then_traits
{
using result_t = invoke_result_t<Func, T>;
using traits_t = typename result_t::traits;
using U = typename traits_t::ok_type;
static_assert(is_result_v<result_t>, "Cannot then a callback not returning result, use map instead");
result_t operator()(Func &&func, Ok<T> &value) noexcept
{
return func(value.value);
}
};
template <class Func>
struct and_then_traits<void, Func>
{
using result_t = invoke_result_t<Func>;
using traits_t = typename result_t::traits;
using U = typename traits_t::ok_type;
static_assert(is_result_v<result_t>, "Cannot then a callback not returning result, use map instead");
result_t operator()(Func &&func, NNCASE_UNUSED Ok<void> &value) noexcept
{
return func();
}
};
template <class T>
struct unwrap_impl
{
T &operator()(Ok<T> &value) noexcept
{
return value.value;
}
T &&operator()(Ok<T> &&value) noexcept
{
return std::move(value.value);
}
};
template <>
struct unwrap_impl<void>
{
void operator()(NNCASE_UNUSED Ok<void> &value) noexcept
{
}
void operator()(NNCASE_UNUSED Ok<void> &&value) noexcept
{
}
};
}
template <class T>
class NNCASE_NODISCARD result
{
public:
using traits = detail::result_traits<T>;
constexpr result(Ok<T> value)
: ok_or_err_(std::move(value)) { }
result(Err err)
: ok_or_err_(std::move(err)) { }
constexpr bool is_ok() const noexcept { return ok_or_err_.index() == 0; }
constexpr bool is_err() const noexcept { return ok_or_err_.index() == 1; }
constexpr decltype(auto) unwrap() noexcept
{
if (is_ok())
return detail::unwrap_impl<T>()(value());
else
std::terminate();
}
constexpr decltype(auto) unwrap_or_throw() &
{
if (is_ok())
return detail::unwrap_impl<T>()(value());
else
throw std::runtime_error(unwrap_err().message());
}
constexpr decltype(auto) unwrap_or_throw() &&
{
if (is_ok())
return detail::unwrap_impl<T>()(std::move(value()));
else
throw std::runtime_error(unwrap_err().message());
}
constexpr std::error_condition &unwrap_err() noexcept
{
if (is_ok())
std::terminate();
else
return err().err;
}
constexpr auto expect(NNCASE_UNUSED gsl::cstring_span message) noexcept
{
if (is_ok())
return detail::unwrap_impl<T>()(value());
else
std::terminate();
}
template <class Func, class Traits = detail::map_traits<T, Func>>
constexpr typename Traits::result_t map(Func &&func) noexcept
{
if (is_ok())
return Traits()(std::forward<Func>(func), value());
else
return err();
}
template <class Func, class Traits = detail::map_err_traits<T, Func>>
constexpr typename Traits::result_t map_err(Func &&func) noexcept
{
if (is_ok())
return value();
else
return Traits()(std::forward<Func>(func), err());
}
template <class Func, class Traits = detail::and_then_traits<T, Func>>
constexpr typename Traits::result_t and_then(Func &&func) noexcept
{
if (is_ok())
return Traits()(std::forward<Func>(func), value());
else
return err();
}
private:
constexpr Ok<T> &&value() &&noexcept { return mpark::get<Ok<T>>(ok_or_err_); }
constexpr Ok<T> &value() &noexcept { return mpark::get<Ok<T>>(ok_or_err_); }
constexpr Err &err() noexcept { return mpark::get<Err>(ok_or_err_); }
private:
mpark::variant<Ok<T>, Err> ok_or_err_;
};
namespace detail
{
template <class T, class Func>
struct map_traits
{
using U = invoke_result_t<Func, T>;
static_assert(!is_result_v<U>, "Cannot map a callback returning result, use and_then instead");
using result_t = result<U>;
result<U> operator()(Func &&func, Ok<T> &value) noexcept
{
return map_call_impl<T, U, Func>()(std::forward<Func>(func), value);
}
};
template <class T, class Func>
struct map_err_traits
{
using U = invoke_result_t<Func, Err>;
static_assert(!is_result_v<U>, "Cannot map a callback returning result, use and_then instead");
result<U> operator()(Func &&func, Err &value) noexcept
{
return err(func(value.err));
}
};
template <class T, class Func>
class map_call_impl<T, void, Func>
{
result<void> operator()(Func &&func, Ok<T> &value) noexcept
{
func(value.value);
return ok();
}
};
template <class Func>
class map_call_void_impl<void, Func>
{
result<void> operator()(Func &&func) noexcept
{
func();
return ok();
}
};
}
}

View File

@ -1,86 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "model.h"
#include "result.h"
#include "runtime_tensor.h"
BEGIN_NS_NNCASE_RUNTIME
class interpreter;
class runtime_module;
struct runtime_module_init_context;
struct NNCASE_API runtime_function_init_context
{
virtual runtime_module_init_context &module_init_context() noexcept = 0;
virtual const function_header &header() noexcept = 0;
virtual gsl::span<const gsl::byte> body() noexcept = 0;
};
class NNCASE_API runtime_function
{
private:
struct inout_tensor_info
{
runtime_shape_t shape;
runtime_shape_t strides;
memory_range range;
runtime_tensor bind_tensor;
runtime_tensor staging_tensor;
runtime_tensor device_tensor;
};
public:
runtime_function(runtime_module &rt_module);
runtime_function(const runtime_function &) = delete;
virtual ~runtime_function() = default;
runtime_function &operator=(const runtime_function &) = delete;
result<void> initialize(gsl::span<const gsl::byte> payload, runtime_module_init_context &module_init_context) noexcept;
runtime_module &module() const noexcept;
uint32_t inputs_size() const noexcept;
const runtime_shape_t &input_shape(size_t index) const noexcept;
const memory_range &input_desc(size_t index) const noexcept;
result<runtime_tensor> input_tensor(size_t index) noexcept;
result<void> input_tensor(size_t index, runtime_tensor tensor) noexcept;
uint32_t outputs_size() const noexcept;
const runtime_shape_t &output_shape(size_t index) const noexcept;
const memory_range &output_desc(size_t index) const noexcept;
result<runtime_tensor> output_tensor(size_t index) noexcept;
result<void> output_tensor(size_t index, runtime_tensor tensor) noexcept;
result<void> invoke() noexcept;
protected:
virtual result<void> initialize_core(runtime_function_init_context &context) noexcept = 0;
virtual result<runtime_tensor> allocate_input_tensor(size_t index) noexcept = 0;
virtual result<runtime_tensor> allocate_output_tensor(size_t index) noexcept = 0;
virtual result<void> validate_input_tensor(size_t index, runtime_tensor tensor) noexcept = 0;
virtual result<void> validate_output_tensor(size_t index, runtime_tensor tensor) noexcept = 0;
result<runtime_tensor> device_input_tensor(size_t index) noexcept;
result<runtime_tensor> device_output_tensor(size_t index) noexcept;
virtual result<void> invoke_core() noexcept = 0;
private:
function_header header_;
std::vector<inout_tensor_info> input_tensors_;
std::vector<inout_tensor_info> output_tensors_;
runtime_module &rt_module_;
};
END_NS_NNCASE_RUNTIME

View File

@ -1,150 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "model.h"
#include "result.h"
#include <functional>
#include <memory>
BEGIN_NS_NNCASE_RUNTIME
struct runtime_tensor_type
{
const char *data;
explicit runtime_tensor_type(const char *data) noexcept
: data(data)
{
}
runtime_tensor_type(runtime_tensor_type &) = delete;
runtime_tensor_type &operator=(runtime_tensor_type &) = delete;
};
inline bool operator==(runtime_tensor_type &lhs, runtime_tensor_type &rhs) noexcept
{
return &lhs == &rhs;
}
inline bool operator!=(runtime_tensor_type &lhs, runtime_tensor_type &rhs) noexcept
{
return &lhs != &rhs;
}
namespace detail
{
class runtime_tensor_impl;
class host_runtime_tensor_impl;
}
class NNCASE_API runtime_tensor
{
public:
runtime_tensor() noexcept;
runtime_tensor(std::shared_ptr<detail::runtime_tensor_impl> impl) noexcept;
datatype_t datatype() const noexcept;
const runtime_shape_t &shape() const noexcept;
const runtime_shape_t &strides() const noexcept;
runtime_tensor_type &tensor_type() const noexcept;
const quant_param_t &quant_param() const noexcept;
void quant_param(const quant_param_t &quant_param) const noexcept;
bool empty() const noexcept;
bool is_host() const noexcept;
bool is_contiguous() const noexcept;
detail::runtime_tensor_impl *impl() noexcept { return impl_.get(); }
const detail::runtime_tensor_impl *impl() const noexcept { return impl_.get(); }
bool can_copy_to_without_staging(const runtime_tensor &dest) const noexcept;
result<void> copy_to(runtime_tensor &dest) noexcept;
result<runtime_tensor> as_host() noexcept;
void reset() noexcept;
private:
std::shared_ptr<detail::runtime_tensor_impl> impl_;
};
NNCASE_API bool operator==(const runtime_tensor &lhs, const runtime_tensor &rhs) noexcept;
NNCASE_API bool operator!=(const runtime_tensor &lhs, const runtime_tensor &rhs) noexcept;
namespace host_runtime_tensor
{
typedef enum memory_pool_
{
pool_cpu_only,
pool_shared
} memory_pool_t;
typedef enum sync_op_
{
sync_invalidate,
sync_write_back
} sync_op_t;
typedef enum map_access_
{
map_none = 0,
map_read = 1,
map_write = 2,
map_read_write = 3
} map_access_t;
DEFINE_ENUM_BITMASK_OPERATORS(map_access_t)
class NNCASE_API mapped_buffer
{
public:
mapped_buffer() noexcept;
mapped_buffer(detail::host_runtime_tensor_impl &impl, map_access_t access, uintptr_t address, size_t size_bytes) noexcept;
mapped_buffer(mapped_buffer &&other) noexcept;
mapped_buffer(const mapped_buffer &) = delete;
~mapped_buffer();
mapped_buffer &operator=(mapped_buffer &&) noexcept;
mapped_buffer &operator=(const mapped_buffer &) = delete;
result<void> unmap() noexcept;
gsl::span<gsl::byte> buffer() const noexcept
{
return { reinterpret_cast<gsl::byte *>(address_), size_bytes_ };
}
private:
detail::host_runtime_tensor_impl *impl_;
map_access_t access_;
uintptr_t address_;
size_t size_bytes_;
};
typedef std::function<void(gsl::byte *)> data_deleter_t;
NNCASE_API runtime_tensor_type &tensor_type() noexcept;
NNCASE_API result<runtime_tensor> create(datatype_t datatype, runtime_shape_t shape, memory_pool_t pool = pool_cpu_only, uintptr_t physical_address = 0) noexcept;
NNCASE_API result<runtime_tensor> create(datatype_t datatype, runtime_shape_t shape, gsl::span<gsl::byte> data, bool copy, memory_pool_t pool = pool_cpu_only, uintptr_t physical_address = 0) noexcept;
NNCASE_API result<runtime_tensor> create(datatype_t datatype, runtime_shape_t shape, gsl::span<gsl::byte> data, data_deleter_t data_deleter, memory_pool_t pool = pool_cpu_only, uintptr_t physical_address = 0) noexcept;
NNCASE_API result<runtime_tensor> create(datatype_t datatype, runtime_shape_t shape, runtime_shape_t strides, memory_pool_t pool = pool_cpu_only, uintptr_t physical_address = 0) noexcept;
NNCASE_API result<runtime_tensor> create(datatype_t datatype, runtime_shape_t shape, runtime_shape_t strides, gsl::span<gsl::byte> data, bool copy, memory_pool_t pool = pool_cpu_only, uintptr_t physical_address = 0) noexcept;
NNCASE_API result<runtime_tensor> create(datatype_t datatype, runtime_shape_t shape, runtime_shape_t strides, gsl::span<gsl::byte> data, data_deleter_t data_deleter, memory_pool_t pool = pool_cpu_only, uintptr_t physical_address = 0) noexcept;
NNCASE_API result<memory_pool_t> memory_pool(const runtime_tensor &tensor) noexcept;
NNCASE_API result<mapped_buffer> map(runtime_tensor &tensor, map_access_t access) noexcept;
NNCASE_API result<void> sync(runtime_tensor &tensor, sync_op_t op, bool force = false) noexcept;
}
namespace hrt = host_runtime_tensor;
END_NS_NNCASE_RUNTIME

View File

@ -1,52 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "runtime_tensor.h"
BEGIN_NS_NNCASE_RUNTIME
namespace detail
{
class NNCASE_API runtime_tensor_impl
{
public:
virtual ~runtime_tensor_impl() = default;
virtual datatype_t datatype() const noexcept = 0;
virtual const runtime_shape_t &shape() const noexcept = 0;
virtual const runtime_shape_t &strides() const noexcept = 0;
virtual runtime_tensor_type &tensor_type() const noexcept = 0;
virtual const quant_param_t &quant_param() const noexcept = 0;
virtual void quant_param(const quant_param_t &quant_param) noexcept = 0;
bool is_host() const noexcept;
bool is_contiguous() const noexcept;
bool can_copy_to_without_staging(const runtime_tensor &dest) const noexcept;
result<void> copy_to(runtime_tensor &dest) noexcept;
result<runtime_tensor> copy_as_host() noexcept;
virtual bool can_copy_from_different_type(const runtime_tensor_impl &src) const noexcept;
virtual bool can_copy_to_different_type(const runtime_tensor_impl &dest) const noexcept;
virtual result<void> copy_to_same_type(runtime_tensor_impl &dest) noexcept;
virtual result<void> copy_from_different_type(runtime_tensor_impl &src) noexcept;
virtual result<void> copy_to_different_type(runtime_tensor_impl &dest) noexcept;
virtual result<void> copy_from_host(runtime_tensor_impl &src) noexcept;
virtual result<void> copy_to_host(runtime_tensor_impl &dest) noexcept;
};
}
END_NS_NNCASE_RUNTIME

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,77 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "buffers.h"
#include "freelist.h"
#include <list>
#include <nncase/ir/ir_types.h>
#include <nncase/runtime/datatypes.h>
#include <optional>
#include <unordered_map>
namespace nncase::schedule
{
class NNCASE_API buffer_allocator
{
public:
struct allocated_buffer : memory_span
{
const physical_buffer *buffer;
size_t valid_size;
};
virtual ~buffer_allocator() = default;
virtual void base_offset(size_t value) = 0;
virtual void mark(const physical_buffer &buffer) = 0;
virtual void finish() = 0;
size_t max_usage() const noexcept { return max_usage_; }
const std::unordered_map<const physical_buffer *, allocated_buffer> &allocations() const noexcept { return allocations_; }
virtual size_t get_size_in_bytes(const logical_buffer &buffer);
protected:
virtual allocated_buffer make_alloc(const physical_buffer &buffer);
virtual size_t alignment() const noexcept;
protected:
size_t max_usage_;
std::unordered_map<const physical_buffer *, allocated_buffer> allocations_;
};
class NNCASE_API linear_buffer_allocator : public buffer_allocator
{
public:
void base_offset(size_t value) override;
void mark(const physical_buffer &buffer) override;
void finish() override;
};
class NNCASE_API first_fit_allocator : public buffer_allocator
{
public:
first_fit_allocator(std::optional<size_t> fixed_size = std::nullopt);
void base_offset(size_t value) override;
void mark(const physical_buffer &buffer) override;
void finish() override;
private:
freelist list_;
std::vector<const physical_buffer *> living_buffers_;
};
using allocator_map_t = std::unordered_map<memory_location_t, buffer_allocator *>;
using shared_allocator_map_t = std::unordered_map<module_type_t, buffer_allocator *>;
}

View File

@ -1,132 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <nncase/ir/connectors.h>
#include <nncase/ir/ir_types.h>
#include <nncase/runtime/datatypes.h>
#include <optional>
#include <vector>
namespace nncase::schedule
{
class logical_buffer;
class physical_buffer;
struct memory_span
{
size_t start;
size_t size;
size_t end() const noexcept { return start + size; }
};
struct sub_buffer_desc
{
logical_buffer *parent = nullptr;
size_t offset;
ir::shape_t shape;
};
struct buffer_lifetime
{
size_t used_count;
size_t birth;
size_t age;
bool is_alive() const noexcept { return used_count > 0; }
size_t end() const noexcept { return birth + age; }
};
class NNCASE_API logical_buffer
{
public:
logical_buffer(size_t id, ir::output_connector &owner, memory_location_t location)
: id_(id), owner_(owner), memory_location_(location), strides_parent_(nullptr), physical_(nullptr) { }
size_t id() const noexcept { return id_; }
ir::output_connector &owner() const noexcept { return owner_; }
datatype_t type() const noexcept { return owner_.type(); }
const ir::shape_t &shape() const noexcept { return owner_.shape(); }
const std::optional<sub_buffer_desc> &parent() const noexcept { return parent_; }
std::optional<sub_buffer_desc> &parent() noexcept { return parent_; }
const std::optional<size_t> &absolute_offset() const noexcept { return absolute_offset_; }
std::optional<size_t> &absolute_offset() noexcept { return absolute_offset_; }
const std::optional<ir::shape_t> &strides_shape() const noexcept { return strides_shape_; }
std::optional<ir::shape_t> &strides_shape() noexcept { return strides_shape_; }
logical_buffer *strides_parent() const noexcept { return strides_parent_; }
logical_buffer *&strides_parent() noexcept { return strides_parent_; }
const buffer_lifetime &lifetime() const noexcept { return lifetime_; }
buffer_lifetime &lifetime() noexcept { return lifetime_; }
physical_buffer *physical() const noexcept { return physical_; }
physical_buffer *&physical() noexcept { return physical_; }
memory_location_t memory_location() const noexcept { return memory_location_; }
memory_location_t &memory_location() noexcept { return memory_location_; }
module_type_t shared_module() const noexcept { return shared_module_; }
void shared_module(const module_type_t &type) noexcept { shared_module_ = type; }
bool no_action_concat_with_strides() const noexcept { return no_action_concat_with_strides_; }
bool &no_action_concat_with_strides() noexcept { return no_action_concat_with_strides_; }
private:
size_t id_;
ir::output_connector &owner_;
memory_location_t memory_location_;
module_type_t shared_module_;
std::optional<sub_buffer_desc> parent_;
std::optional<size_t> absolute_offset_;
std::optional<ir::shape_t> strides_shape_;
logical_buffer *strides_parent_;
buffer_lifetime lifetime_ {};
physical_buffer *physical_;
bool no_action_concat_with_strides_ = false;
};
class NNCASE_API physical_buffer
{
public:
physical_buffer(size_t id, logical_buffer &owner)
: id_(id), owner_(owner) { }
size_t id() const noexcept { return id_; }
logical_buffer &owner() const noexcept { return owner_; }
const buffer_lifetime &lifetime() const noexcept { return owner().lifetime(); }
buffer_lifetime &lifetime() noexcept { return owner().lifetime(); }
std::span<logical_buffer *const> logical_buffers() const noexcept { return logical_buffers_; }
std::vector<logical_buffer *> &logical_buffers() noexcept { return logical_buffers_; }
memory_span allocation() const noexcept { return allocation_; }
memory_span &allocation() noexcept { return allocation_; }
size_t alignment() const noexcept { return alignment_; }
void alignment(size_t value) { alignment_ = value; }
private:
size_t id_;
logical_buffer &owner_;
std::vector<logical_buffer *> logical_buffers_;
memory_span allocation_;
size_t alignment_ = 8;
};
}

View File

@ -1,47 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "buffers.h"
#include <list>
#include <map>
#include <nncase/runtime/datatypes.h>
#include <optional>
#include <stdint.h>
namespace nncase::schedule
{
class NNCASE_API freelist
{
using free_nodes_t = std::map<size_t, memory_span>;
public:
freelist(std::optional<size_t> fixed_size);
size_t max_usage() const noexcept { return heap_end_; }
bool can_allocate(size_t size);
memory_span allocate(size_t size);
void free(const memory_span &node);
std::vector<memory_span> free_nodes() const;
private:
free_nodes_t::iterator reserve(size_t size);
void merge(free_nodes_t::iterator offset);
private:
bool is_fixed_;
free_nodes_t free_nodes_;
size_t heap_end_ = 0;
};
}

View File

@ -1,38 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "schedule_types.h"
namespace nncase::schedule
{
class lifetime_recorder
{
public:
lifetime_recorder(std::list<logical_buffer> &buffers, std::unordered_map<const ir::output_connector *, logical_buffer *> &buffer_map);
size_t current_age() const noexcept { return cnt_age_; }
void current_age(size_t age);
void allocate(ir::output_connector &conn, memory_location_t location);
void release(ir::output_connector &conn);
void grow_age();
private:
size_t next_buffer_id_ = 0;
size_t cnt_age_ = 0;
std::list<logical_buffer> &buffers_;
std::unordered_map<const ir::output_connector *, logical_buffer *> &buffer_map_;
};
}

View File

@ -1,135 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "buffer_allocator.h"
#include "liveness_analysis.h"
#include "schedule_types.h"
#include <filesystem>
namespace nncase
{
class target;
}
namespace nncase::schedule
{
class module_schedule_context;
class model_schedule_context;
struct caller_context
{
lifetime_recorder &lifetime;
};
class function_schedule_context : public function_schedule_result
{
public:
function_schedule_context(ir::graph &graph, module_schedule_context &mod_sched);
function_schedule_context(const function_schedule_context &) = delete;
function_schedule_context(function_schedule_context &&) = default;
function_schedule_context &operator=(const function_schedule_context &) = delete;
const module_type_t &module_type() const noexcept { return graph->module_type(); }
std::span<ir::output_node *> outputs() const noexcept { return outputs_; }
std::unordered_map<const ir::output_connector *, logical_buffer *> &logical_buffer_map() noexcept { return logical_buffer_map_; }
std::list<logical_buffer> &logical_buffers() noexcept { return logical_buffers_; }
std::vector<physical_buffer> &physical_buffers() noexcept { return physical_buffers_; }
void visit_function(caller_context &caller_ctx);
void end_schedule();
private:
void create_allocators();
void generate_compute_sequence();
void make_logical_buffers(caller_context &caller_ctx);
void analyze_buffer_alias();
void update_offset();
void fix_lifetime();
void make_physical_buffers();
void allocate_physical_buffers();
void assign_allocations();
void dump(const std::filesystem::path &dump_dir);
private:
module_schedule_context &mod_sched_;
std::span<ir::output_node *> outputs_;
allocator_map_t allocators_;
std::vector<std::shared_ptr<buffer_allocator>> allocator_holder_;
std::unordered_map<const ir::output_connector *, logical_buffer *> logical_buffer_map_;
std::list<logical_buffer> logical_buffers_;
std::vector<physical_buffer> physical_buffers_;
};
class module_schedule_context
{
public:
module_schedule_context(module_schedule_result &result, model_schedule_context &model_sched, module_type_t type);
module_schedule_context(const module_schedule_context &) = delete;
module_schedule_context(module_schedule_context &&) = default;
module_schedule_context &operator=(const module_schedule_context &) = delete;
module_schedule_result &module_result() const noexcept { return result_; }
model_schedule_context &model_sched() const noexcept { return model_sched_; }
allocator_map_t &allocators() noexcept { return allocators_; }
buffer_allocator &shared_allocator(const module_type_t &type);
void visit_function(ir::graph &graph, caller_context &caller_ctx);
void end_schedule();
private:
module_schedule_result &result_;
model_schedule_context &model_sched_;
module_type_t type_;
allocator_map_t allocators_;
std::vector<std::shared_ptr<buffer_allocator>> allocator_holder_;
shared_allocator_map_t shared_allocators_;
std::vector<function_schedule_context> functions_;
std::filesystem::path dump_dir_;
};
class model_schedule_context
{
public:
model_schedule_context(model_schedule_result &result, nncase::target &target, bool skip_buffer_alias);
model_schedule_context(const model_schedule_context &) = delete;
model_schedule_context(model_schedule_context &&) = default;
model_schedule_context &operator=(const model_schedule_context &) = delete;
nncase::target &target() const noexcept { return target_; }
bool skip_buffer_alias() const noexcept { return skip_buffer_alias_; }
void config_dump(std::filesystem::path dump_dir);
const std::filesystem::path &dump_dir() const noexcept { return dump_dir_; }
model_schedule_result &model_result() const noexcept { return result_; }
void schedule(ir::graph &entry_function);
void visit_function(ir::graph &graph, caller_context &caller_ctx);
private:
void end_schedule();
private:
model_schedule_result &result_;
nncase::target &target_;
bool skip_buffer_alias_;
std::filesystem::path dump_dir_;
module_schedule_context *entry_module_;
ir::graph *entry_function_;
std::unordered_map<module_type_t, module_schedule_context> module_contexts_;
};
}

View File

@ -1,75 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "buffer_allocator.h"
#include "buffers.h"
#include <nncase/ir/graph.h>
#include <unordered_map>
#include <vector>
namespace nncase::schedule
{
struct buffer_allocation
{
memory_location_t memory_location;
datatype_t type;
size_t shared_module;
size_t start;
size_t size;
ir::shape_t shape;
ir::shape_t strides;
ir::shape_t strides_shape;
size_t linear_end() const noexcept { return start + size; }
bool overlap(const buffer_allocation &rhs) const noexcept
{
return size != 0 && rhs.size != 0 && memory_location == rhs.memory_location && (start < rhs.linear_end() && linear_end() > rhs.start);
}
memory_range runtime_type() const
{
return { .memory_location = memory_location, .datatype = type, .shared_module = (uint16_t)shared_module, .start = (uint32_t)start, .size = (uint32_t)size };
}
};
using allocation_map_t = std::unordered_map<const ir::output_connector *, buffer_allocation>;
struct module_schedule_result;
struct function_schedule_result
{
ir::graph *graph;
module_schedule_result *module;
std::vector<ir::node *> compute_sequence;
size_t input_pool_size;
size_t output_pool_size;
};
struct module_schedule_result
{
module_type_t type;
std::vector<function_schedule_result> functions;
std::unordered_map<ir::graph *, function_schedule_result *> functions_map;
allocation_map_t allocations;
std::unordered_map<memory_location_t, size_t> max_usages;
std::unordered_map<module_type_t, size_t> shared_max_usages;
};
struct model_schedule_result
{
std::vector<module_schedule_result> modules;
function_schedule_result *entry_function;
};
}

View File

@ -1,42 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "schedule_context.h"
#include "schedule_types.h"
#include <filesystem>
#include <span>
namespace nncase
{
class target;
namespace schedule
{
class NNCASE_API scheduler
{
public:
scheduler(target &target, ir::graph &main_graph, std::span<ir::output_node *> outputs);
model_schedule_result schedule(bool skip_buffer_alias = false);
void config_dump(std::filesystem::path dump_dir);
private:
target &target_;
ir::graph &main_graph_;
std::span<ir::output_node *> outputs_;
std::filesystem::path dump_dir_;
};
}
}

View File

@ -1,46 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstddef>
#include <cstdint>
#include <filesystem>
#include <memory>
#include <nncase/runtime/datatypes.h>
#include <span>
#include <vector>
namespace nncase
{
struct simulate_options
{
std::filesystem::path output_path;
std::filesystem::path dataset;
std::string dataset_format;
std::function<void(size_t cnt, size_t total)> progress;
std::string input_layout = "NCHW";
float input_mean = 0.f;
float input_std = 1.f;
};
class NNCASE_API simulator
{
public:
static std::unique_ptr<simulator> create(std::vector<uint8_t> model, const simulate_options &options);
virtual ~simulator();
virtual void run() = 0;
};
}

View File

@ -1,42 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <nncase/targets/target.h>
namespace nncase::targets
{
class NNCASE_API neutral_target : public target
{
public:
using target::target;
void register_allocators(const module_type_t &type, schedule::allocator_map_t &allocators, std::vector<std::shared_ptr<schedule::buffer_allocator>> &allocator_holders) override;
void register_evaluator_ops() override;
void register_target_independent_passes(const module_type_t &type, ir::transforms::pass_manager &pass_mgr) override;
void register_target_dependent_passes(const module_type_t &type, ir::transforms::pass_manager &pass_mgr, bool use_ptq) override;
void register_quantize_annotation_passes(const module_type_t &type, ir::transforms::pass_manager &pass_mgr) override;
void register_quantize_passes(const module_type_t &type, ir::transforms::pass_manager &pass_mgr, datatype_t quant_type, std::string_view w_quant_type, bool use_mse_quant_w) override;
void register_allocation_passes(const module_type_t &type, ir::transforms::pass_manager &pass_mgr) override;
void add_quantization_broadcast(std::unordered_set<ir::node_opcode> &opcodes) override;
protected:
void move_transpose_transform(ir::transforms::transform_pass &pass, bool add_constant_folding = true);
void fold_pad_conv_transform(ir::transforms::transform_pass &pass, bool add_constant_folding = true);
void fold_dilated_conv_transform(ir::transforms::transform_pass &pass, bool add_constant_folding = true);
void add_default_transforms(ir::transforms::transform_pass &pass, bool add_constant_folding = true);
std::unique_ptr<target_options> on_create_options() override;
};
}

View File

@ -1,95 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <memory>
#include <nncase/ir/quantizer.h>
#include <nncase/runtime/model.h>
#include <nncase/schedule/buffer_allocator.h>
#include <unordered_map>
#include <unordered_set>
#include <vector>
namespace nncase::ir
{
struct node_opcode;
class quantizer;
}
namespace nncase::codegen
{
struct module_builder_params;
class module_builder;
}
namespace nncase::ir::transforms
{
class pass;
class transform_pass;
class pass_manager;
}
namespace nncase
{
struct target_options
{
virtual ~target_options() = default;
std::string input_type;
std::string inference_type;
float weights_quantize_threshold;
uint32_t output_quantize_threshold;
bool quantize_binary;
bool is_fpga;
};
struct target_attributes
{
};
class NNCASE_API target
{
public:
virtual ~target() = default;
target_options &options();
target_attributes attributes()
{
target_attributes attrs {};
config_attributes(attrs);
return attrs;
}
virtual void register_allocators(const module_type_t &type, schedule::allocator_map_t &allocators, std::vector<std::shared_ptr<schedule::buffer_allocator>> &allocator_holders) = 0;
virtual void register_evaluator_ops() = 0;
virtual void register_target_independent_passes(const module_type_t &type, ir::transforms::pass_manager &pass_mgr) = 0;
virtual void register_target_dependent_passes(const module_type_t &type, ir::transforms::pass_manager &pass_mgr, bool use_ptq) = 0;
virtual void register_quantize_annotation_passes(const module_type_t &type, ir::transforms::pass_manager &pass_mgr);
virtual std::unique_ptr<ir::quantizer> create_quantizer(const module_type_t &type, ir::calibrate_method calib_method);
virtual void register_quantize_passes(const module_type_t &type, ir::transforms::pass_manager &pass_mgr, datatype_t quant_type, std::string_view w_quant_type, bool use_mse_quant_w);
virtual void register_target_dependent_after_quantization_passes(const module_type_t &type, ir::transforms::pass_manager &pass_mgr);
virtual void register_target_dependent_after_buffer_fusion_passes(const module_type_t &type, ir::transforms::pass_manager &pass_mgr);
virtual void register_allocation_passes(const module_type_t &type, ir::transforms::pass_manager &pass_mgr) = 0;
virtual std::unique_ptr<codegen::module_builder> create_module_builder(const module_type_t &type, std::string_view module_name, const codegen::module_builder_params &params);
virtual void add_quantization_broadcast(std::unordered_set<ir::node_opcode> &opcodes) = 0;
protected:
virtual std::unique_ptr<target_options> on_create_options() = 0;
virtual void config_attributes(target_attributes &attrs);
private:
std::unique_ptr<target_options> options_;
};
}

View File

@ -1,45 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../transform.h"
#include <unordered_set>
namespace nncase::ir::transforms
{
class NNCASE_API add_quant_checkpoints_transform : public transform
{
public:
template <class TIt>
add_quant_checkpoints_transform(TIt begin, TIt end)
: opcodes_(begin, end)
{
}
template <class... TArgs>
add_quant_checkpoints_transform(std::in_place_t, TArgs &&...opcodes)
: opcodes_({ std::forward<TArgs>(opcodes)... })
{
}
void process(transform_context &context) override;
protected:
bool skip_self_contained_check() const noexcept override { return true; }
bool on_try_match(ir::node &node, transform_context &context) override;
private:
std::unordered_set<ir::node_opcode> opcodes_;
};
}

View File

@ -1,49 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../transform.h"
namespace nncase::ir::transforms
{
class NNCASE_API add_input_dequantize_transform : public transform
{
public:
add_input_dequantize_transform(datatype_t dt) noexcept
: input_type_(dt) { }
void process(transform_context &context) override;
protected:
bool skip_self_contained_check() const noexcept override { return true; }
bool on_try_match(ir::node &node, transform_context &context) override;
private:
datatype_t input_type_;
};
class NNCASE_API add_output_quantize_transform : public transform
{
public:
add_output_quantize_transform(datatype_t dt) noexcept
: output_type_(dt) { }
void process(transform_context &context) override;
protected:
bool skip_self_contained_check() const noexcept override { return true; }
bool on_try_match(ir::node &node, transform_context &context) override;
private:
datatype_t output_type_;
};
}

View File

@ -1,28 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../transform.h"
namespace nncase::ir::transforms
{
class NNCASE_API add_to_conv2d_transform : public transform
{
public:
void process(transform_context &context) override;
protected:
bool on_try_match(ir::node &node, transform_context &context) override;
};
}

View File

@ -1,33 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../transform.h"
namespace nncase::ir::transforms
{
#define DEFINE_BINARY_MOTION(name) \
class NNCASE_API binary_##name##_motion_up_transform : public transform \
{ \
public: \
void process(transform_context &context) override; \
\
protected: \
bool on_try_match(ir::node &node, transform_context &context) override; \
};
DEFINE_BINARY_MOTION(reduce_window2d)
#undef DEFINE_BINARY_MOTION
}

View File

@ -1,29 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../transform.h"
namespace nncase::ir::transforms
{
class NNCASE_API bitcast_clamp_motion_transform : public transform
{
public:
void process(transform_context &context) override;
protected:
bool skip_self_contained_check() const noexcept override { return true; }
bool on_try_match(ir::node &node, transform_context &context) override;
};
}

View File

@ -1,29 +0,0 @@
/* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../transform.h"
namespace nncase::ir::transforms
{
class NNCASE_API clamp_to_binary_transform : public transform
{
public:
void process(transform_context &context) override;
protected:
bool skip_self_contained_check() const noexcept override { return true; }
bool on_try_match(ir::node &node, transform_context &context) override;
};
}

Some files were not shown because too many files have changed in this diff Show More