forked from kaldi-asr/kaldi
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtensor-common.h
More file actions
164 lines (126 loc) · 4.58 KB
/
tensor-common.h
File metadata and controls
164 lines (126 loc) · 4.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
// tensor/tensor-common.h
// Copyright 2019 Johns Hopkins University (author: Daniel Povey)
// See ../../COPYING for clarification regarding multiple authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
#ifndef KALDI_TENSOR_TENSOR_COMMON_H_
#define KALDI_TENSOR_TENSOR_COMMON_H_ 1
#include <cstdint>
#include <vector>
#include <string>
/**
This is some notes on plans for kaldi10 tensor stuff, nothing is fully fleshed out.
*/
namespace kaldi {
namespace tensor {
typedef int64_t int64;
typedef uint64_t uint64;
typedef int32_t int32;
typedef uint32_t uint32;
enum DeviceType {
kCpuDevice = 0,
kCudaDevice = 1
};
// We may later add a device number (like which GPU we are using),
// once we support multiple GPUs.
struct Device {
DeviceType device_type;
Device(): device_type(kCpuDevice) { }
Device(DeviceType t): device_type(t) { }
std::string ToString() const;
// TODO: operator ==
// maybe in future we'll make a way to set the default device.
};
enum DataType {
// We will of course later extend this with many more types, including
// integer types and half-precision floats.
kFloatDtype = 1,
kDoubleDtype = 2,
kInt32Dtype = 3,
// The following enum members are to be used when we want a case statement
// over pairs of dtypes, say dtype1 and dtype2. We would do this as: DataType
// pair_dtype = static_cast<DataType>(int32(dtype1) + (int32(dtype2) << 4));
kFloatFloatDtypes = 0x11,
kFloatDoubleDtypes = 0x12,
kFloatInt32Dtypes = 0x13,
kDoubleFloatDtype = 0x21,
kDoubleDoubleDtype = 0x22,
kDoubleInt32Dtype = 0x23
};
inline int32 SizeOf(DataType dtype) {
switch(dtype) {
case 0: return 4;
case 1: return 8;
case 2: KALDI_ERR << "Invalid data-type " << int32(dtype); return 0;
}
}
/// Enumeration that says what strides we should choose when allocating
/// A Tensor.
enum StridePolicy {
kKeepStrideOrder, // Means: keep the size-ordering of the strides from the
// source Tensor (but the chosen strides will all be
// positive even of some of the source Tensor's strides
// were negative).
kNormalized // Means: strides for dimensions that are != 1 are ordered from
// greatest to smallest as in a "C" array in the public
// numbering, or smallest to greatest in the private numbering.
// Per our policy, any dimension that is 1 will be given a zero stride.
// C.f. "Normalized strides" in pattern.h
kCopyStrides // Means: use the exact strides provided.
};
/// Enumeration that says whether to zero a freshly initialized Tensor. Note:
/// the Tensor won't actually be zeroed when you construct it, it will be zeroed
/// whenever it's actually needed (delayed allocation).
enum InitializePolicy {
kZeroData,
kUninitialized
};
/// This enumeration value lists the unary functions that we might
/// want to apply to Tensors; it exists so that much of the glue
/// code can be templated.
enum UnaryFunctionEnum {
kUnaryFunctionExp,
kUnaryFunctionLog,
kUnaryFunctionRelu,
kUnaryFunctionInvert,
kUnaryFunctionSquare
// TODO: add more.
};
/// This enumeration value lists the binary functions that we might
/// want to apply to Tensors; it exists so that much of the glue
/// code can be templated. (Note: multiplication is not counted
/// here; that is a special case as it will genearlly go to BLAS).
enum BinaryFunctionEnum {
kBinaryFunctionAdd,
kBinaryFunctionDivide,
kBinaryFunctionMax,
kBinaryFunctionMin
};
enum TensorUseEnum {
kRead,
kReadWrite,
kWrite,
kCheckUninitialized,
kInitialize,
kReadAndInvalidate,
kInvalidate
};
// In practice we don't expect user-owned tensors with num-axes greater than 5
// to exist, but there are certain manipulations we do when simplifying matrix
// multiplications that temporarily add an extra dimension, and it's most
// convenient to just increase the maximum.
#define KALDI_TENSOR_MAX_AXES 6
} // namespace tensor
} // namespace kaldi
#endif // KALDI_TENSOR_TENSOR_COMMON_H_