TensorInfo

This commit is contained in:
2025-11-17 12:44:50 +04:00
parent 41f5634ce9
commit bbd9c67c96
6 changed files with 395 additions and 609 deletions

View File

@@ -1,6 +1,181 @@
from tensor.tensor import *
a = iMatrix([2, 3], 2)
a *= 3.0
print(a)
def test_matrix_operations():
print("=" * 50)
print("ТЕСТИРОВАНИЕ БИБЛИОТЕКИ MATRIX")
print("=" * 50)
# Тест создания матриц
print("\n1. СОЗДАНИЕ МАТРИЦ:")
print("-" * 30)
# Создание матрицы с заполнением одним значением
m1 = Matrix([2, 3], 1.0)
print(f"Matrix([2, 3], 1.0) = {m1}")
# Создание матрицы с разными значениями
m2 = Matrix([2, 3], 2.0, 3.0)
print(f"Matrix([2, 3], 2.0, 3.0) = {m2}")
# Создание матрицы для умножения
m3 = Matrix([3, 2], 2.0)
print(f"Matrix([3, 2], 2.0) = {m3}")
# Тест получения свойств
print("\n2. СВОЙСТВА МАТРИЦ:")
print("-" * 30)
print(f"m1.get_shape() = {m1.get_shape()}")
print(f"m1.get_axes() = {m1.get_axes()}")
print(f"m1.get_size() = {m1.get_size()}")
# Тест доступа к элементам
print("\n3. ДОСТУП К ЭЛЕМЕНТАМ:")
print("-" * 30)
print(f"m1[0] = {m1[0]}")
print(f"m1[0, 1] = {m1[0, 1]}")
# Установка значений
m1[0, 1] = 5.0
print(f"После m1[0, 1] = 5.0: {m1}")
# Тест арифметических операций
print("\n4. АРИФМЕТИЧЕСКИЕ ОПЕРАЦИИ:")
print("-" * 30)
# Сложение
m_add = m1 + m2
print(f"m1 + m2 = {m_add}")
# Вычитание
m_sub = m1 - m2
print(f"m1 - m2 = {m_sub}")
# Умножение на скаляр
m_mul_scalar = m1 * 2.0
print(f"m1 * 2.0 = {m_mul_scalar}")
# Поэлементное умножение
m_mul_element = m1 * m2
print(f"m1 * m2 (поэлементно) = {m_mul_element}")
# Деление на скаляр
m_div = m1 / 2.0
print(f"m1 / 2.0 = {m_div}")
# Унарные операторы
m_neg = -m1
print(f"-m1 = {m_neg}")
m_pos = +m1
print(f"+m1 = {m_pos}")
# Тест матричного умножения
print("\n5. МАТРИЧНОЕ УМНОЖЕНИЕ:")
print("-" * 30)
try:
m_matmul = m1 @ m3
print(f"m1 @ m3 = {m_matmul}")
except Exception as e:
print(f"Ошибка при матричном умножении: {e}")
# Тест транспонирования
print("\n6. ТРАНСПОНИРОВАНИЕ:")
print("-" * 30)
m_transposed = m1.t()
print(f"m1.t() = {m_transposed}")
try:
m_transpose_method = m1.transpose(0, 1)
print(f"m1.transpose(0, 1) = {m_transpose_method}")
except Exception as e:
print(f"Ошибка при transpose(0, 1): {e}")
try:
m_transpose_list = m1.transpose([0, 1])
print(f"m1.transpose([0, 1]) = {m_transpose_list}")
except Exception as e:
print(f"Ошибка при transpose([0, 1]): {e}")
# Тест операций на месте
print("\n7. ОПЕРАЦИИ НА МЕСТЕ:")
print("-" * 30)
m_test = Matrix([2, 2], 1.0)
print(f"Исходная матрица: {m_test}")
m_test += 2.0
print(f"После m_test += 2.0: {m_test}")
m_test -= 1.0
print(f"После m_test -= 1.0: {m_test}")
m_test *= 3.0
print(f"После m_test *= 3.0: {m_test}")
m_test /= 2.0
print(f"После m_test /= 2.0: {m_test}")
# Тест с вашими матрицами из примера
print("\n8. ТЕСТ С ВАШИМИ МАТРИЦАМИ:")
print("-" * 30)
a = Matrix([2, 3], 2)
b = Matrix([3, 2], 1)
print(f"a = {a}")
print(f"b = {b}")
try:
result = a @ b
print(f"a @ b = {result}")
except Exception as e:
print(f"Ошибка при a @ b: {e}")
# Тест обратных операций
print("\n9. ОБРАТНЫЕ ОПЕРАЦИИ:")
print("-" * 30)
m_base = Matrix([2, 2], 3.0)
print(f"Исходная матрица: {m_base}")
# Правое сложение
m_radd = 2.0 + m_base
print(f"2.0 + m_base = {m_radd}")
# Правое умножение
m_rmul = 2.0 * m_base
print(f"2.0 * m_base = {m_rmul}")
# Правое вычитание
m_rsub = 10.0 - m_base
print(f"10.0 - m_base = {m_rsub}")
print("\n" + "=" * 50)
print("ТЕСТИРОВАНИЕ ЗАВЕРШЕНО")
print("=" * 50)
def test_edge_cases():
print("\n\n10. ТЕСТ ГРАНИЧНЫХ СЛУЧАЕВ:")
print("=" * 50)
try:
# Попытка создания с разными параметрами
m_empty = Matrix([0, 0])
print(f"Matrix([0, 0]) = {m_empty}")
except Exception as e:
print(f"Ошибка при создании Matrix([0, 0]): {e}")
try:
# Попытка доступа к несуществующему элементу
m_test = Matrix([2, 2], 1.0)
print(f"Попытка доступа к m_test[5, 5]: ", end="")
value = m_test[5, 5]
print(value)
except Exception as e:
print(f"Ошибка: {e}")
if __name__ == "__main__":
test_matrix_operations()
test_edge_cases()

View File

@@ -1,5 +1,5 @@
CXX = g++
CXXFLAGS = -Wall -Wextra -O1 -g -std=c++23
CXXFLAGS = -Wall -Wextra -Wpedantic -O1 -g -std=c++23
ifeq ($(OS),Windows_NT)
DETECTED_OS := Windows

View File

@@ -1,2 +1,8 @@
#include "tensor.hpp"
#include <iostream>
int main() { return 0; }
int main() {
Tensor<float, 2> a = Tensors::rand<float>(1, 3);
std::cout << a.toString();
return 0;
}

View File

@@ -15,10 +15,9 @@ void register_tensor(py::module &m, const std::string &name) {
const std::vector<T> &>())
.def(py::init<const std::array<size_t, Dim> &, T, T>())
.def("get_shape", &Tensor<T, Dim>::getShape)
.def("get_data", &Tensor<T, Dim>::getData)
.def("get_size", &Tensor<T, Dim>::getSize)
.def("get_axes", &Tensor<T, Dim>::getAxes)
.def("get_shape", &TensorInfo<T, Dim>::getShape)
.def("get_axes", &TensorInfo<T, Dim>::getAxes)
.def("get_size", &TensorInfo<T, Dim>::getSize)
.def(py::self + py::self)
.def(py::self - py::self)
@@ -44,6 +43,15 @@ void register_tensor(py::module &m, const std::string &name) {
.def("__repr__", &Tensor<T, Dim>::toString);
if constexpr (Dim >= 2) {
tensor
.def("transpose", py::overload_cast<const std::array<int, Dim> &>(
&Tensor<T, Dim>::transpose))
.def("transpose",
py::overload_cast<int, int>(&Tensor<T, Dim>::transpose))
.def("t", &Tensor<T, Dim>::t);
}
if constexpr (Dim != 0)
tensor
.def(
@@ -86,15 +94,6 @@ void register_tensor(py::module &m, const std::string &name) {
if constexpr (Dim == 1 || Dim == 2)
tensor.def("__matmul__", &Tensor<T, Dim>::operator%);
if constexpr (Dim >= 2) {
tensor
.def("transpose", py::overload_cast<const std::array<int, Dim> &>(
&Tensor<T, Dim>::transpose))
.def("transpose",
py::overload_cast<int, int>(&Tensor<T, Dim>::transpose))
.def("t", &Tensor<T, Dim>::t);
}
}
PYBIND11_MODULE(tensor, m) {
@@ -103,10 +102,10 @@ PYBIND11_MODULE(tensor, m) {
register_tensor<float, 0>(m, "Scalar");
register_tensor<float, 1>(m, "Vector");
register_tensor<float, 2>(m, "Matrix");
register_tensor<float, 3>(m, "Tensor3");
register_tensor<int, 0>(m, "iScalar");
register_tensor<int, 1>(m, "iVector");
register_tensor<int, 2>(m, "iMatrix");
register_tensor<int, 3>(m, "iTensor3");
// register_tensor<float, 3>(m, "Tensor3");
//
// register_tensor<int, 0>(m, "iScalar");
// register_tensor<int, 1>(m, "iVector");
// register_tensor<int, 2>(m, "iMatrix");
// register_tensor<int, 3>(m, "iTensor3");
}

View File

@@ -2,14 +2,14 @@
#include <random>
#include <sstream>
#include <stdexcept>
#include <type_traits>
#include <vector>
template <typename T, int Dim> class Tensor {
private:
template <typename T, int Dim> class Tensor;
template <typename T, int Dim> class TensorInfo {
protected:
std::array<size_t, Dim> shape_;
std::array<int, Dim> axes_;
std::vector<T> data_;
template <typename... Indices> size_t computeIndex(Indices... indices) const {
static_assert(sizeof...(Indices) == Dim, "Invalid number of indices");
@@ -26,7 +26,7 @@ private:
return index;
}
void checkItHasSameShape(const Tensor &other) {
void checkItHasSameShape(const TensorInfo &other) {
if (getShape() != other.getShape())
throw std::invalid_argument("Tensor shapes must match");
}
@@ -36,83 +36,50 @@ private:
}
public:
Tensor() = delete;
Tensor(const std::array<size_t, Dim> &shape) {
typedef class Tensor<T, Dim> Ten;
TensorInfo() = delete;
TensorInfo(const std::array<size_t, Dim> &shape) {
for (size_t d : shape)
if (d == 0)
throw std::invalid_argument("Invalid shape");
shape_ = shape;
for (int i = 0; i < Dim; ++i)
axes_[i] = i;
size_t total_size = 1;
for (size_t dim : shape)
total_size *= dim;
data_.resize(total_size);
}
Tensor(const std::array<size_t, Dim> &shape, T fill) : Tensor(shape) {
std::fill(data_.begin(), data_.end(), fill);
}
Tensor(const std::array<size_t, Dim> &shape, const std::vector<T> &data)
: Tensor(shape) {
if (data.size() != data_.size())
throw std::invalid_argument("Invalid data size");
data_ = data;
}
Tensor(const std::array<size_t, Dim> &shape, T min, T max) : Tensor(shape) {
static std::random_device rd;
static std::mt19937 gen(rd());
if constexpr (std::is_integral_v<T>) {
std::uniform_int_distribution<T> dis(min, max);
for (auto &element : data_)
element = dis(gen);
} else if constexpr (std::is_floating_point_v<T>) {
std::uniform_real_distribution<T> dis(min, max);
for (auto &element : data_)
element = dis(gen);
} else
throw std::invalid_argument("Invalid randomized type");
}
Tensor(const Tensor &other)
: shape_(other.shape_), axes_(other.axes_), data_(other.data_) {}
Tensor &operator=(const Tensor &other) {
TensorInfo(const TensorInfo &other)
: shape_(other.shape_), axes_(other.axes_) {}
TensorInfo &operator=(const TensorInfo &other) {
shape_ = other.shape_;
axes_ = other.axes_;
data_ = other.data_;
return *this;
}
Tensor(Tensor &&other) noexcept
: shape_(std::move(other.shape_)), axes_(std::move(other.axes_)),
data_(std::move(other.data_)) {}
Tensor &operator=(Tensor &&other) noexcept {
TensorInfo(TensorInfo &&other) noexcept
: shape_(std::move(other.shape_)), axes_(std::move(other.axes_)) {}
TensorInfo &operator=(TensorInfo &&other) noexcept {
shape_ = std::move(other.shape_);
axes_ = std::move(other.axes_);
data_ = std::move(other.data_);
return *this;
}
~Tensor() = default;
~TensorInfo() = default;
const std::array<int, Dim> &getAxes() const { return axes_; }
const std::vector<T> &getData() const { return data_; }
size_t getSize() const { return data_.size(); }
const std::array<size_t, Dim> getShape() const {
std::array<size_t, Dim> result;
for (int i = 0; i < Dim; ++i)
result[i] = shape_[axes_[i]];
return result;
}
size_t getSize() const {
size_t size = 1;
for (size_t i = 0; i < shape_.size(); ++i)
size *= shape_[i];
return size;
};
T &operator[](size_t i) { return data_[i]; }
const T &operator[](size_t i) const { return data_[i]; }
template <typename... Indices> T &operator()(Indices... indices) {
return data_[computeIndex(indices...)];
}
template <typename... Indices> const T &operator()(Indices... indices) const {
return data_[computeIndex(indices...)];
}
Tensor &transpose(const std::array<int, Dim> &new_axes) {
Ten &transpose(const std::array<int, Dim> &new_axes) {
std::array<bool, Dim> used{};
for (int axis : new_axes) {
checkAxisInDim(axis);
@@ -121,123 +88,209 @@ public:
used[axis] = true;
}
axes_ = new_axes;
return *this;
return static_cast<Ten &>(*this);
}
Tensor &transpose(int axis_a, int axis_b) {
Ten &transpose(int axis_a, int axis_b) {
checkAxisInDim(axis_a);
checkAxisInDim(axis_b);
if (axis_a == axis_b)
throw std::invalid_argument("Duplicate axis index");
std::swap(axes_[axis_a], axes_[axis_b]);
return *this;
return static_cast<Ten &>(*this);
}
Tensor &t() {
Ten &t() {
static_assert(Dim >= 2, "Can't change the only axis");
std::swap(axes_[Dim - 1], axes_[Dim - 2]);
return *this;
return static_cast<Ten &>(*this);
}
Tensor operator+() const { return *this; }
Tensor operator-() const {
virtual Ten operator+() const = 0;
virtual Ten operator-() const = 0;
virtual Ten &operator+=(const T &scalar) = 0;
virtual Ten &operator*=(const T &scalar) = 0;
Ten operator+(const T &scalar) const {
Ten result = static_cast<const Ten &>(*this);
result += scalar;
return result;
}
friend Ten operator+(const T &scalar, const Ten &tensor) {
return tensor + scalar;
}
Ten &operator-=(const T &scalar) {
*this += -scalar;
return static_cast<Ten &>(*this);
}
Ten operator-(const T &scalar) const {
Ten result = static_cast<const Ten &>(*this);
result -= scalar;
return result;
}
friend Ten operator-(const T &scalar, const Ten &tensor) {
return tensor + (-scalar);
}
Ten operator*(const T &scalar) const {
Ten result = static_cast<const Ten &>(*this);
result *= scalar;
return result;
}
friend Ten operator*(const T &scalar, const Ten &tensor) {
return tensor * scalar;
}
Ten &operator/=(const T &scalar) {
*this *= T(1) / scalar;
return static_cast<Ten &>(*this);
}
Ten operator/(const T &scalar) const {
Ten result = static_cast<const Ten &>(*this);
result /= scalar;
return result;
}
virtual Ten &operator+=(const Ten &other) = 0;
virtual Ten &operator*=(const Ten &other) = 0;
Ten operator+(const Ten &other) const {
Ten result = static_cast<const Ten &>(*this);
result += other;
return result;
}
Ten &operator-=(const Ten &other) {
checkItHasSameShape(other);
*this += -other;
return static_cast<Ten &>(*this);
}
Ten operator-(const Ten &other) const {
Ten result = static_cast<const Ten &>(*this);
result -= other;
return result;
}
Ten operator*(const Ten &other) const {
Ten result = static_cast<const Ten &>(*this);
result *= other;
return result;
}
virtual std::string toString() const = 0;
};
template <typename T, int Dim> class Tensor : public TensorInfo<T, Dim> {
private:
std::vector<T> data_;
public:
typedef class TensorInfo<T, Dim> TensorInfo;
using TensorInfo::axes_;
using TensorInfo::checkAxisInDim;
using TensorInfo::checkItHasSameShape;
using TensorInfo::computeIndex;
using TensorInfo::getSize;
using TensorInfo::shape_;
Tensor() = delete;
Tensor(const std::array<size_t, Dim> &shape) : TensorInfo(shape) {
size_t size = 1;
for (size_t dim : shape)
size *= dim;
data_.resize(size);
}
Tensor(const std::array<size_t, Dim> &shape, T value) : Tensor(shape) {
std::fill(data_.begin(), data_.end(), value);
}
Tensor(const std::array<size_t, Dim> &shape, const std::vector<T> &data)
: Tensor(shape) {
if (data.size() != data_.size())
throw std::invalid_argument("Invalid fill data size");
data_ = data;
}
Tensor(const std::array<size_t, Dim> &shape, T min, T max) : Tensor(shape) {
static std::random_device rd;
static std::mt19937 gen(rd());
if constexpr (std::is_integral_v<T>) {
std::uniform_int_distribution<T> dis(min, max);
for (T &e : data_)
e = dis(gen);
} else if constexpr (std::is_floating_point_v<T>) {
std::uniform_real_distribution<T> dis(min, max);
for (T &e : data_)
e = dis(gen);
} else
throw std::invalid_argument("Invalid randomized type");
}
Tensor(const Tensor &other) : TensorInfo(other), data_(other.data_) {}
Tensor &operator=(const Tensor &other) {
TensorInfo::operator=(other);
data_ = other.data_;
return *this;
}
Tensor(Tensor &&other) noexcept
: TensorInfo(std::move(other)), data_(std::move(other.data_)) {}
Tensor &operator=(Tensor &&other) noexcept {
TensorInfo::operator=(std::move(other));
data_ = std::move(other.data_);
return *this;
}
~Tensor() = default;
T &operator[](size_t i) { return data_[i]; }
const T &operator[](size_t i) const { return data_[i]; }
template <typename... Indices> T &operator()(Indices... indices) {
return data_[computeIndex(indices...)];
}
template <typename... Indices> const T &operator()(Indices... indices) const {
return data_[computeIndex(indices...)];
}
using TensorInfo::operator+;
using TensorInfo::operator-;
Tensor operator+() const override {
Tensor result = *this;
for (T &e : result.data_)
e = +e;
return result;
}
Tensor operator-() const override {
Tensor result = *this;
for (T &e : result.data_)
e = -e;
return result;
}
Tensor &operator+=(const T &scalar) {
Tensor &operator+=(const T &scalar) override {
for (T &e : data_)
e += scalar;
return *this;
}
Tensor operator+(const T &scalar) const {
Tensor result = *this;
result += scalar;
return result;
}
friend Tensor operator+(const T &scalar, const Tensor &tensor) {
return tensor + scalar;
}
Tensor &operator-=(const T &scalar) {
for (T &e : data_)
e -= scalar;
return *this;
}
Tensor operator-(const T &scalar) const {
Tensor result = *this;
result -= scalar;
return result;
}
friend Tensor operator-(const T &scalar, const Tensor &tensor) {
Tensor result = tensor;
for (T &e : result.data_)
e = scalar - e;
return result;
}
Tensor &operator*=(const T &scalar) {
Tensor &operator*=(const T &scalar) override {
for (T &e : data_)
e *= scalar;
return *this;
}
Tensor operator*(const T &scalar) const {
Tensor result = *this;
result *= scalar;
return result;
}
friend Tensor operator*(const T &scalar, const Tensor &tensor) {
return tensor * scalar;
}
Tensor &operator/=(const T &scalar) {
if (scalar == T(0))
throw std::invalid_argument("Division by zero");
for (T &e : data_)
e /= scalar;
return *this;
}
Tensor operator/(const T &scalar) const {
Tensor result = *this;
result /= scalar;
return result;
}
Tensor &operator+=(const Tensor &other) {
Tensor &operator+=(const Tensor &other) override {
checkItHasSameShape(other);
for (size_t i = 0; i < data_.size(); ++i)
data_[i] += other.data_[i];
return *this;
}
Tensor operator+(const Tensor &other) const {
Tensor result = *this;
result += other;
return result;
}
Tensor &operator-=(const Tensor &other) {
checkItHasSameShape(other);
for (size_t i = 0; i < data_.size(); ++i)
data_[i] -= other.data_[i];
return *this;
}
Tensor operator-(const Tensor &other) const {
Tensor result = *this;
result -= other;
return result;
}
Tensor &operator*=(const Tensor &other) {
Tensor &operator*=(const Tensor &other) override {
checkItHasSameShape(other);
for (size_t i = 0; i < data_.size(); ++i)
data_[i] *= other.data_[i];
return *this;
}
Tensor operator*(const Tensor &other) const {
Tensor result = *this;
result *= other;
return result;
}
Tensor<T, Dim == 1 ? 0 : 2> operator%(const Tensor &other) const {
static_assert(Dim == 1 || Dim == 2,
@@ -270,7 +323,7 @@ public:
}
}
std::string toString() const {
std::string toString() const override {
std::ostringstream oss;
if constexpr (Dim == 0) {
oss << "Scalar<" << typeid(T).name() << ">: " << data_[0];

View File

@@ -4,7 +4,7 @@ Tensor math library
from __future__ import annotations
import collections.abc
import typing
__all__: list[str] = ['Matrix', 'Scalar', 'Tensor3', 'Vector', 'iMatrix', 'iScalar', 'iTensor3', 'iVector']
__all__: list[str] = ['Matrix', 'Scalar', 'Vector']
class Matrix:
@typing.overload
def __add__(self, arg0: Matrix) -> Matrix:
@@ -86,8 +86,6 @@ class Matrix:
...
def get_axes(self) -> typing.Annotated[list[int], "FixedSize(2)"]:
...
def get_data(self) -> list[float]:
...
def get_shape(self) -> typing.Annotated[list[int], "FixedSize(2)"]:
...
def get_size(self) -> int:
@@ -167,105 +165,10 @@ class Scalar:
...
def get_axes(self) -> typing.Annotated[list[int], "FixedSize(0)"]:
...
def get_data(self) -> list[float]:
...
def get_shape(self) -> typing.Annotated[list[int], "FixedSize(0)"]:
...
def get_size(self) -> int:
...
class Tensor3:
@typing.overload
def __add__(self, arg0: Tensor3) -> Tensor3:
...
@typing.overload
def __add__(self, arg0: typing.SupportsFloat) -> Tensor3:
...
@typing.overload
def __getitem__(self, arg0: typing.SupportsInt) -> float:
...
@typing.overload
def __getitem__(self, arg0: tuple) -> float:
...
@typing.overload
def __iadd__(self, arg0: Tensor3) -> Tensor3:
...
@typing.overload
def __iadd__(self, arg0: typing.SupportsFloat) -> Tensor3:
...
@typing.overload
def __imul__(self, arg0: Tensor3) -> Tensor3:
...
@typing.overload
def __imul__(self, arg0: typing.SupportsFloat) -> Tensor3:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(3)"]) -> None:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(3)"], arg1: typing.SupportsFloat) -> None:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(3)"], arg1: collections.abc.Sequence[typing.SupportsFloat]) -> None:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(3)"], arg1: typing.SupportsFloat, arg2: typing.SupportsFloat) -> None:
...
@typing.overload
def __isub__(self, arg0: Tensor3) -> Tensor3:
...
@typing.overload
def __isub__(self, arg0: typing.SupportsFloat) -> Tensor3:
...
def __itruediv__(self, arg0: typing.SupportsFloat) -> Tensor3:
...
@typing.overload
def __mul__(self, arg0: Tensor3) -> Tensor3:
...
@typing.overload
def __mul__(self, arg0: typing.SupportsFloat) -> Tensor3:
...
def __neg__(self) -> Tensor3:
...
def __pos__(self) -> Tensor3:
...
def __radd__(self, arg0: typing.SupportsFloat) -> Tensor3:
...
def __repr__(self) -> str:
...
def __rmul__(self, arg0: typing.SupportsFloat) -> Tensor3:
...
def __rsub__(self, arg0: typing.SupportsFloat) -> Tensor3:
...
@typing.overload
def __setitem__(self, arg0: typing.SupportsInt, arg1: typing.SupportsFloat) -> None:
...
@typing.overload
def __setitem__(self, arg0: tuple, arg1: typing.SupportsFloat) -> None:
...
@typing.overload
def __sub__(self, arg0: Tensor3) -> Tensor3:
...
@typing.overload
def __sub__(self, arg0: typing.SupportsFloat) -> Tensor3:
...
def __truediv__(self, arg0: typing.SupportsFloat) -> Tensor3:
...
def get_axes(self) -> typing.Annotated[list[int], "FixedSize(3)"]:
...
def get_data(self) -> list[float]:
...
def get_shape(self) -> typing.Annotated[list[int], "FixedSize(3)"]:
...
def get_size(self) -> int:
...
def t(self) -> Tensor3:
...
@typing.overload
def transpose(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(3)"]) -> Tensor3:
...
@typing.overload
def transpose(self, arg0: typing.SupportsInt, arg1: typing.SupportsInt) -> Tensor3:
...
class Vector:
@typing.overload
def __add__(self, arg0: Vector) -> Vector:
@@ -347,356 +250,6 @@ class Vector:
...
def get_axes(self) -> typing.Annotated[list[int], "FixedSize(1)"]:
...
def get_data(self) -> list[float]:
...
def get_shape(self) -> typing.Annotated[list[int], "FixedSize(1)"]:
...
def get_size(self) -> int:
...
class iMatrix:
@typing.overload
def __add__(self, arg0: iMatrix) -> iMatrix:
...
@typing.overload
def __add__(self, arg0: typing.SupportsInt) -> iMatrix:
...
@typing.overload
def __getitem__(self, arg0: typing.SupportsInt) -> int:
...
@typing.overload
def __getitem__(self, arg0: tuple) -> int:
...
@typing.overload
def __iadd__(self, arg0: iMatrix) -> iMatrix:
...
@typing.overload
def __iadd__(self, arg0: typing.SupportsInt) -> iMatrix:
...
@typing.overload
def __imul__(self, arg0: iMatrix) -> iMatrix:
...
@typing.overload
def __imul__(self, arg0: typing.SupportsInt) -> iMatrix:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(2)"]) -> None:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(2)"], arg1: typing.SupportsInt) -> None:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(2)"], arg1: collections.abc.Sequence[typing.SupportsInt]) -> None:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(2)"], arg1: typing.SupportsInt, arg2: typing.SupportsInt) -> None:
...
@typing.overload
def __isub__(self, arg0: iMatrix) -> iMatrix:
...
@typing.overload
def __isub__(self, arg0: typing.SupportsInt) -> iMatrix:
...
def __itruediv__(self, arg0: typing.SupportsInt) -> iMatrix:
...
def __matmul__(self, arg0: iMatrix) -> iMatrix:
...
@typing.overload
def __mul__(self, arg0: iMatrix) -> iMatrix:
...
@typing.overload
def __mul__(self, arg0: typing.SupportsInt) -> iMatrix:
...
def __neg__(self) -> iMatrix:
...
def __pos__(self) -> iMatrix:
...
def __radd__(self, arg0: typing.SupportsInt) -> iMatrix:
...
def __repr__(self) -> str:
...
def __rmul__(self, arg0: typing.SupportsInt) -> iMatrix:
...
def __rsub__(self, arg0: typing.SupportsInt) -> iMatrix:
...
@typing.overload
def __setitem__(self, arg0: typing.SupportsInt, arg1: typing.SupportsInt) -> None:
...
@typing.overload
def __setitem__(self, arg0: tuple, arg1: typing.SupportsInt) -> None:
...
@typing.overload
def __sub__(self, arg0: iMatrix) -> iMatrix:
...
@typing.overload
def __sub__(self, arg0: typing.SupportsInt) -> iMatrix:
...
def __truediv__(self, arg0: typing.SupportsInt) -> iMatrix:
...
def get_axes(self) -> typing.Annotated[list[int], "FixedSize(2)"]:
...
def get_data(self) -> list[int]:
...
def get_shape(self) -> typing.Annotated[list[int], "FixedSize(2)"]:
...
def get_size(self) -> int:
...
def t(self) -> iMatrix:
...
@typing.overload
def transpose(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(2)"]) -> iMatrix:
...
@typing.overload
def transpose(self, arg0: typing.SupportsInt, arg1: typing.SupportsInt) -> iMatrix:
...
class iScalar:
@typing.overload
def __add__(self, arg0: iScalar) -> iScalar:
...
@typing.overload
def __add__(self, arg0: typing.SupportsInt) -> iScalar:
...
@typing.overload
def __iadd__(self, arg0: iScalar) -> iScalar:
...
@typing.overload
def __iadd__(self, arg0: typing.SupportsInt) -> iScalar:
...
@typing.overload
def __imul__(self, arg0: iScalar) -> iScalar:
...
@typing.overload
def __imul__(self, arg0: typing.SupportsInt) -> iScalar:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(0)"]) -> None:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(0)"], arg1: typing.SupportsInt) -> None:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(0)"], arg1: collections.abc.Sequence[typing.SupportsInt]) -> None:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(0)"], arg1: typing.SupportsInt, arg2: typing.SupportsInt) -> None:
...
@typing.overload
def __isub__(self, arg0: iScalar) -> iScalar:
...
@typing.overload
def __isub__(self, arg0: typing.SupportsInt) -> iScalar:
...
def __itruediv__(self, arg0: typing.SupportsInt) -> iScalar:
...
@typing.overload
def __mul__(self, arg0: iScalar) -> iScalar:
...
@typing.overload
def __mul__(self, arg0: typing.SupportsInt) -> iScalar:
...
def __neg__(self) -> iScalar:
...
def __pos__(self) -> iScalar:
...
def __radd__(self, arg0: typing.SupportsInt) -> iScalar:
...
def __repr__(self) -> str:
...
def __rmul__(self, arg0: typing.SupportsInt) -> iScalar:
...
def __rsub__(self, arg0: typing.SupportsInt) -> iScalar:
...
@typing.overload
def __sub__(self, arg0: iScalar) -> iScalar:
...
@typing.overload
def __sub__(self, arg0: typing.SupportsInt) -> iScalar:
...
def __truediv__(self, arg0: typing.SupportsInt) -> iScalar:
...
def get_axes(self) -> typing.Annotated[list[int], "FixedSize(0)"]:
...
def get_data(self) -> list[int]:
...
def get_shape(self) -> typing.Annotated[list[int], "FixedSize(0)"]:
...
def get_size(self) -> int:
...
class iTensor3:
@typing.overload
def __add__(self, arg0: iTensor3) -> iTensor3:
...
@typing.overload
def __add__(self, arg0: typing.SupportsInt) -> iTensor3:
...
@typing.overload
def __getitem__(self, arg0: typing.SupportsInt) -> int:
...
@typing.overload
def __getitem__(self, arg0: tuple) -> int:
...
@typing.overload
def __iadd__(self, arg0: iTensor3) -> iTensor3:
...
@typing.overload
def __iadd__(self, arg0: typing.SupportsInt) -> iTensor3:
...
@typing.overload
def __imul__(self, arg0: iTensor3) -> iTensor3:
...
@typing.overload
def __imul__(self, arg0: typing.SupportsInt) -> iTensor3:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(3)"]) -> None:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(3)"], arg1: typing.SupportsInt) -> None:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(3)"], arg1: collections.abc.Sequence[typing.SupportsInt]) -> None:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(3)"], arg1: typing.SupportsInt, arg2: typing.SupportsInt) -> None:
...
@typing.overload
def __isub__(self, arg0: iTensor3) -> iTensor3:
...
@typing.overload
def __isub__(self, arg0: typing.SupportsInt) -> iTensor3:
...
def __itruediv__(self, arg0: typing.SupportsInt) -> iTensor3:
...
@typing.overload
def __mul__(self, arg0: iTensor3) -> iTensor3:
...
@typing.overload
def __mul__(self, arg0: typing.SupportsInt) -> iTensor3:
...
def __neg__(self) -> iTensor3:
...
def __pos__(self) -> iTensor3:
...
def __radd__(self, arg0: typing.SupportsInt) -> iTensor3:
...
def __repr__(self) -> str:
...
def __rmul__(self, arg0: typing.SupportsInt) -> iTensor3:
...
def __rsub__(self, arg0: typing.SupportsInt) -> iTensor3:
...
@typing.overload
def __setitem__(self, arg0: typing.SupportsInt, arg1: typing.SupportsInt) -> None:
...
@typing.overload
def __setitem__(self, arg0: tuple, arg1: typing.SupportsInt) -> None:
...
@typing.overload
def __sub__(self, arg0: iTensor3) -> iTensor3:
...
@typing.overload
def __sub__(self, arg0: typing.SupportsInt) -> iTensor3:
...
def __truediv__(self, arg0: typing.SupportsInt) -> iTensor3:
...
def get_axes(self) -> typing.Annotated[list[int], "FixedSize(3)"]:
...
def get_data(self) -> list[int]:
...
def get_shape(self) -> typing.Annotated[list[int], "FixedSize(3)"]:
...
def get_size(self) -> int:
...
def t(self) -> iTensor3:
...
@typing.overload
def transpose(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(3)"]) -> iTensor3:
...
@typing.overload
def transpose(self, arg0: typing.SupportsInt, arg1: typing.SupportsInt) -> iTensor3:
...
class iVector:
@typing.overload
def __add__(self, arg0: iVector) -> iVector:
...
@typing.overload
def __add__(self, arg0: typing.SupportsInt) -> iVector:
...
@typing.overload
def __getitem__(self, arg0: typing.SupportsInt) -> int:
...
@typing.overload
def __getitem__(self, arg0: tuple) -> int:
...
@typing.overload
def __iadd__(self, arg0: iVector) -> iVector:
...
@typing.overload
def __iadd__(self, arg0: typing.SupportsInt) -> iVector:
...
@typing.overload
def __imul__(self, arg0: iVector) -> iVector:
...
@typing.overload
def __imul__(self, arg0: typing.SupportsInt) -> iVector:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(1)"]) -> None:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(1)"], arg1: typing.SupportsInt) -> None:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(1)"], arg1: collections.abc.Sequence[typing.SupportsInt]) -> None:
...
@typing.overload
def __init__(self, arg0: typing.Annotated[collections.abc.Sequence[typing.SupportsInt], "FixedSize(1)"], arg1: typing.SupportsInt, arg2: typing.SupportsInt) -> None:
...
@typing.overload
def __isub__(self, arg0: iVector) -> iVector:
...
@typing.overload
def __isub__(self, arg0: typing.SupportsInt) -> iVector:
...
def __itruediv__(self, arg0: typing.SupportsInt) -> iVector:
...
def __matmul__(self, arg0: iVector) -> iScalar:
...
@typing.overload
def __mul__(self, arg0: iVector) -> iVector:
...
@typing.overload
def __mul__(self, arg0: typing.SupportsInt) -> iVector:
...
def __neg__(self) -> iVector:
...
def __pos__(self) -> iVector:
...
def __radd__(self, arg0: typing.SupportsInt) -> iVector:
...
def __repr__(self) -> str:
...
def __rmul__(self, arg0: typing.SupportsInt) -> iVector:
...
def __rsub__(self, arg0: typing.SupportsInt) -> iVector:
...
@typing.overload
def __setitem__(self, arg0: typing.SupportsInt, arg1: typing.SupportsInt) -> None:
...
@typing.overload
def __setitem__(self, arg0: tuple, arg1: typing.SupportsInt) -> None:
...
@typing.overload
def __sub__(self, arg0: iVector) -> iVector:
...
@typing.overload
def __sub__(self, arg0: typing.SupportsInt) -> iVector:
...
def __truediv__(self, arg0: typing.SupportsInt) -> iVector:
...
def get_axes(self) -> typing.Annotated[list[int], "FixedSize(1)"]:
...
def get_data(self) -> list[int]:
...
def get_shape(self) -> typing.Annotated[list[int], "FixedSize(1)"]:
...
def get_size(self) -> int: