-
Notifications
You must be signed in to change notification settings - Fork 74k
/
types.h
530 lines (448 loc) · 19.9 KB
/
types.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_FRAMEWORK_TYPES_H_
#define TENSORFLOW_CORE_FRAMEWORK_TYPES_H_
#include <cstddef>
#include <map>
#include <set>
#include <string>
#include "absl/numeric/bits.h"
#include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive
#include "tensorflow/core/framework/bfloat16.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/resource_handle.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/framework/device_type.h"
namespace tensorflow {
class Variant;
// MemoryType is used to describe whether input or output Tensors of
// an OpKernel should reside in "Host memory" (e.g., CPU memory) or
// "Device" Memory (CPU memory for CPU devices, GPU memory for GPU
// devices).
enum MemoryType {
DEVICE_MEMORY = 0,
HOST_MEMORY = 1,
};
using tsl::DeviceType; // NOLINT
// Convenient constants that can be passed to a DeviceType constructor.
// See comments for CreateOpKernel in op_kernel.h for uses of DEVICE_DEFAULT
// and other device types.
TF_EXPORT extern const char* const DEVICE_DEFAULT; // "DEFAULT"
TF_EXPORT extern const char* const DEVICE_CPU; // "CPU"
TF_EXPORT extern const char* const DEVICE_GPU; // "GPU"
TF_EXPORT extern const char* const DEVICE_TPU; // "TPU"
TF_EXPORT extern const char* const DEVICE_TPU_SYSTEM; // "TPU_SYSTEM"
template <typename Device>
struct DeviceName {};
template <>
struct DeviceName<Eigen::ThreadPoolDevice> {
static const std::string value;
};
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
template <>
struct DeviceName<Eigen::GpuDevice> {
static const std::string value;
};
#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
typedef absl::InlinedVector<MemoryType, 4UL> MemoryTypeVector;
typedef absl::Span<const MemoryType> MemoryTypeSlice;
typedef absl::InlinedVector<DataType, 4UL> DataTypeVector;
typedef absl::Span<const DataType> DataTypeSlice;
typedef absl::InlinedVector<DeviceType, 4UL> DeviceTypeVector;
typedef absl::InlinedVector<std::pair<DeviceType, int32>, 4UL>
PrioritizedDeviceTypeVector;
// Convert the enums to strings for errors:
std::string DataTypeString(DataType dtype);
std::string DeviceTypeString(const DeviceType& device_type);
std::string DataTypeSliceString(const DataTypeSlice dtypes);
inline std::string DataTypeVectorString(const DataTypeVector& dtypes) {
return DataTypeSliceString(dtypes);
}
// DataTypeSet represents a set of DataType values as a simple and efficient
// bit mask. Note that DataTypeSet cannot represent all DataType values; it
// cannot represent any of the DT_*_REF values.
class DataTypeSet {
private:
const uint32 mask_;
static constexpr uint32 kNumBits = 32;
public:
constexpr DataTypeSet(const DataTypeSet& other) : mask_(other.mask_) {}
explicit constexpr DataTypeSet(uint32 mask) : mask_(mask) {}
constexpr bool Contains(DataType dt) const {
return (static_cast<uint32>(dt) < kNumBits) &&
((mask_ >> static_cast<uint32>(dt)) & 1u) != 0u;
}
class Iterator {
const DataTypeSet& set_;
uint32 pos_;
public:
Iterator(const DataTypeSet& set, uint32 pos) : set_(set), pos_(pos) {
DCHECK_LE(pos, kNumBits);
}
DataType operator*() const { return static_cast<DataType>(pos_); }
Iterator& operator++() {
++pos_;
DCHECK_LE(pos_, kNumBits);
if (pos_ < kNumBits) {
uint32 remaining_mask = set_.mask_ >> pos_;
if (remaining_mask != 0u) {
pos_ += absl::countr_zero(remaining_mask);
}
}
DCHECK_LE(pos_, kNumBits);
return *this;
}
bool operator==(const Iterator& other) const { return pos_ == other.pos_; }
bool operator!=(const Iterator& other) const { return !(*this == other); }
size_t operator-(const Iterator& other) const {
return this->pos_ - other.pos_;
}
};
Iterator begin() const {
// The begin position is the index of the first bit set to 1 in the entire
// bit mask. If there are no bits set to 1, then the index is 0.
if (mask_ != 0) {
return Iterator(*this, absl::countr_zero(mask_));
}
// The set is empty.
return Iterator(*this, 0);
}
Iterator end() const {
// The end position is the index of the highest bit that is set, plus 1.
// If there are no bits set to 1, then the index is 0.
if (mask_ != 0) {
return Iterator(*this, kNumBits - absl::countl_zero(mask_));
}
// The set is empty.
return Iterator(*this, 0);
}
size_t size() const { return absl::popcount(mask_); }
constexpr DataTypeSet operator|(const DataTypeSet& other) const {
return DataTypeSet(mask_ | other.mask_);
}
};
// If "sp" names a valid type, store it in "*dt" and return true. Otherwise,
// return false.
bool DataTypeFromString(StringPiece sp, DataType* dt);
constexpr inline DataTypeSet ToSet(DataType dt) {
return DataTypeSet(1u << static_cast<uint32>(dt));
}
// DT_FLOAT + kDataTypeRefOffset == DT_FLOAT_REF, etc.
enum { kDataTypeRefOffset = 100 };
inline bool IsRefType(DataType dtype) {
return dtype > static_cast<DataType>(kDataTypeRefOffset);
}
inline DataType MakeRefType(DataType dtype) {
DCHECK(!IsRefType(dtype));
return static_cast<DataType>(dtype + kDataTypeRefOffset);
}
inline DataType RemoveRefType(DataType dtype) {
DCHECK(IsRefType(dtype));
return static_cast<DataType>(dtype - kDataTypeRefOffset);
}
inline DataType BaseType(DataType dtype) {
return IsRefType(dtype) ? RemoveRefType(dtype) : dtype;
}
// Returns true if the actual type is the same as or ref of the expected type.
inline bool TypesCompatible(DataType expected, DataType actual) {
return expected == actual || expected == BaseType(actual);
}
// Does not include _ref types.
constexpr DataTypeSet kAllTypes =
ToSet(DT_FLOAT) | ToSet(DT_DOUBLE) | ToSet(DT_INT32) | ToSet(DT_UINT8) |
ToSet(DT_INT16) | ToSet(DT_UINT16) | ToSet(DT_INT8) | ToSet(DT_STRING) |
ToSet(DT_COMPLEX64) | ToSet(DT_COMPLEX128) | ToSet(DT_INT64) |
ToSet(DT_BOOL) | ToSet(DT_QINT8) | ToSet(DT_QUINT8) | ToSet(DT_QINT16) |
ToSet(DT_QUINT16) | ToSet(DT_QINT32) | ToSet(DT_HALF) | ToSet(DT_RESOURCE) |
ToSet(DT_VARIANT) | ToSet(DT_UINT32) | ToSet(DT_UINT64) |
ToSet(DT_BFLOAT16) | ToSet(DT_FLOAT8_E5M2) | ToSet(DT_FLOAT8_E4M3FN) |
ToSet(DT_INT4) | ToSet(DT_UINT4);
inline const DataTypeSet& AllTypes() { return kAllTypes; }
#if !defined(IS_MOBILE_PLATFORM) || defined(SUPPORT_SELECTIVE_REGISTRATION)
// Types that support '<' and '>'.
constexpr DataTypeSet kRealNumberTypes =
ToSet(DT_FLOAT) | ToSet(DT_DOUBLE) | ToSet(DT_INT32) | ToSet(DT_INT64) |
ToSet(DT_UINT8) | ToSet(DT_INT16) | ToSet(DT_INT8) | ToSet(DT_UINT16) |
ToSet(DT_HALF) | ToSet(DT_UINT32) | ToSet(DT_UINT64) | ToSet(DT_BFLOAT16);
inline const DataTypeSet& RealNumberTypes() { return kRealNumberTypes; }
// Return the list of all numeric types.
// Includes complex and quantized types.
// NOTE: On Android, we only include the float and int32 types for now.
const DataTypeSet kNumberTypes =
ToSet(DT_FLOAT) | ToSet(DT_DOUBLE) | ToSet(DT_INT64) | ToSet(DT_INT32) |
ToSet(DT_UINT8) | ToSet(DT_UINT16) | ToSet(DT_INT16) | ToSet(DT_INT8) |
ToSet(DT_COMPLEX64) | ToSet(DT_COMPLEX128) | ToSet(DT_QINT8) |
ToSet(DT_QUINT8) | ToSet(DT_QINT16) | ToSet(DT_QUINT16) | ToSet(DT_QINT32) |
ToSet(DT_HALF) | ToSet(DT_UINT32) | ToSet(DT_UINT64) | ToSet(DT_BFLOAT16);
inline const DataTypeSet& NumberTypes() { return kNumberTypes; }
constexpr DataTypeSet kQuantizedTypes = ToSet(DT_QINT8) | ToSet(DT_QUINT8) |
ToSet(DT_QINT16) | ToSet(DT_QUINT16) |
ToSet(DT_QINT32);
inline const DataTypeSet& QuantizedTypes() { return kQuantizedTypes; }
// Types that support '<' and '>', including quantized types.
const DataTypeSet kRealAndQuantizedTypes =
ToSet(DT_FLOAT) | ToSet(DT_DOUBLE) | ToSet(DT_INT32) | ToSet(DT_INT64) |
ToSet(DT_UINT8) | ToSet(DT_UINT16) | ToSet(DT_INT16) | ToSet(DT_INT8) |
ToSet(DT_QINT8) | ToSet(DT_QUINT8) | ToSet(DT_QINT16) | ToSet(DT_QUINT16) |
ToSet(DT_QINT32) | ToSet(DT_HALF) | ToSet(DT_BFLOAT16);
inline const DataTypeSet& RealAndQuantizedTypes() {
return kRealAndQuantizedTypes;
}
#elif defined(__ANDROID_TYPES_FULL__)
constexpr DataTypeSet kRealNumberTypes =
ToSet(DT_FLOAT) | ToSet(DT_INT32) | ToSet(DT_INT64) | ToSet(DT_HALF);
inline DataTypeSet RealNumberTypes() { return kRealNumberTypes; }
constexpr DataTypeSet kNumberTypes =
ToSet(DT_FLOAT) | ToSet(DT_INT32) | ToSet(DT_INT64) | ToSet(DT_QINT8) |
ToSet(DT_QUINT8) | ToSet(DT_QINT32) | ToSet(DT_HALF);
inline DataTypeSet NumberTypes() { return kNumberTypes; }
constexpr DataTypeSet kQuantizedTypes = ToSet(DT_QINT8) | ToSet(DT_QUINT8) |
ToSet(DT_QINT16) | ToSet(DT_QUINT16) |
ToSet(DT_QINT32);
inline DataTypeSet QuantizedTypes() { return kQuantizedTypes; }
constexpr DataTypeSet kRealAndQuantizedTypes =
ToSet(DT_FLOAT) | ToSet(DT_INT32) | ToSet(DT_INT64) | ToSet(DT_QINT8) |
ToSet(DT_QUINT8) | ToSet(DT_QINT16) | ToSet(DT_QUINT16) | ToSet(DT_QINT32) |
ToSet(DT_HALF);
inline DataTypeSet RealAndQuantizedTypes() { return kRealAndQuantizedTypes; }
#else // defined(IS_MOBILE_PLATFORM) && !defined(__ANDROID_TYPES_FULL__)
constexpr DataTypeSet kRealNumberTypes = ToSet(DT_FLOAT) | ToSet(DT_INT32);
inline DataTypeSet RealNumberTypes() { return kRealNumberTypes; }
constexpr DataTypeSet kNumberTypes = ToSet(DT_FLOAT) | ToSet(DT_INT32) |
ToSet(DT_QINT8) | ToSet(DT_QUINT8) |
ToSet(DT_QINT32);
inline DataTypeSet NumberTypes() { return kNumberTypes; }
constexpr DataTypeSet kQuantizedTypes = ToSet(DT_QINT8) | ToSet(DT_QUINT8) |
ToSet(DT_QINT16) | ToSet(DT_QUINT16) |
ToSet(DT_QINT32);
inline DataTypeSet QuantizedTypes() { return kQuantizedTypes; }
constexpr DataTypeSet kRealAndQuantizedTypes =
ToSet(DT_FLOAT) | ToSet(DT_INT32) | ToSet(DT_QINT8) | ToSet(DT_QUINT8) |
ToSet(DT_QINT16) | ToSet(DT_QUINT16) | ToSet(DT_QINT32);
inline DataTypeSet RealAndQuantizedTypes() { return kRealAndQuantizedTypes; }
#endif // defined(IS_MOBILE_PLATFORM)
// Validates type T for whether it is a supported DataType.
template <class T>
struct IsValidDataType;
// DataTypeToEnum<T>::v() and DataTypeToEnum<T>::value are the DataType
// constants for T, e.g. DataTypeToEnum<float>::v() is DT_FLOAT.
template <class T>
struct DataTypeToEnum {
static_assert(IsValidDataType<T>::value, "Specified Data Type not supported");
}; // Specializations below
// EnumToDataType<VALUE>::Type is the type for DataType constant VALUE, e.g.
// EnumToDataType<DT_FLOAT>::Type is float.
template <DataType VALUE>
struct EnumToDataType {}; // Specializations below
// Template specialization for both DataTypeToEnum and EnumToDataType.
#define MATCH_TYPE_AND_ENUM(TYPE, ENUM) \
template <> \
struct DataTypeToEnum<TYPE> { \
static DataType v() { return ENUM; } \
static DataType ref() { return MakeRefType(ENUM); } \
static constexpr DataType value = ENUM; \
}; \
template <> \
struct IsValidDataType<TYPE> { \
static constexpr bool value = true; \
}; \
template <> \
struct EnumToDataType<ENUM> { \
typedef TYPE Type; \
}
MATCH_TYPE_AND_ENUM(float, DT_FLOAT);
MATCH_TYPE_AND_ENUM(double, DT_DOUBLE);
MATCH_TYPE_AND_ENUM(int32, DT_INT32);
MATCH_TYPE_AND_ENUM(uint32, DT_UINT32);
MATCH_TYPE_AND_ENUM(uint16, DT_UINT16);
MATCH_TYPE_AND_ENUM(uint8, DT_UINT8);
MATCH_TYPE_AND_ENUM(int16, DT_INT16);
MATCH_TYPE_AND_ENUM(int8, DT_INT8);
MATCH_TYPE_AND_ENUM(tstring, DT_STRING);
MATCH_TYPE_AND_ENUM(complex64, DT_COMPLEX64);
MATCH_TYPE_AND_ENUM(complex128, DT_COMPLEX128);
MATCH_TYPE_AND_ENUM(bool, DT_BOOL);
MATCH_TYPE_AND_ENUM(qint8, DT_QINT8);
MATCH_TYPE_AND_ENUM(quint8, DT_QUINT8);
MATCH_TYPE_AND_ENUM(qint16, DT_QINT16);
MATCH_TYPE_AND_ENUM(quint16, DT_QUINT16);
MATCH_TYPE_AND_ENUM(qint32, DT_QINT32);
MATCH_TYPE_AND_ENUM(bfloat16, DT_BFLOAT16);
MATCH_TYPE_AND_ENUM(Eigen::half, DT_HALF);
MATCH_TYPE_AND_ENUM(float8_e5m2, DT_FLOAT8_E5M2);
MATCH_TYPE_AND_ENUM(float8_e4m3fn, DT_FLOAT8_E4M3FN);
MATCH_TYPE_AND_ENUM(int4, DT_INT4);
MATCH_TYPE_AND_ENUM(uint4, DT_UINT4);
MATCH_TYPE_AND_ENUM(ResourceHandle, DT_RESOURCE);
MATCH_TYPE_AND_ENUM(Variant, DT_VARIANT);
template <>
struct DataTypeToEnum<long> {
static DataType v() { return value; }
static DataType ref() { return MakeRefType(value); }
static constexpr DataType value = sizeof(long) == 4 ? DT_INT32 : DT_INT64;
};
template <>
struct IsValidDataType<long> {
static constexpr bool value = true;
};
template <>
struct EnumToDataType<DT_INT64> {
typedef int64_t Type;
};
template <>
struct DataTypeToEnum<unsigned long> {
static DataType v() { return value; }
static DataType ref() { return MakeRefType(value); }
static constexpr DataType value =
sizeof(unsigned long) == 4 ? DT_UINT32 : DT_UINT64;
};
template <>
struct IsValidDataType<unsigned long> {
static constexpr bool value = true;
};
template <>
struct EnumToDataType<DT_UINT64> {
typedef tensorflow::uint64 Type;
};
template <>
struct DataTypeToEnum<long long> {
static DataType v() { return DT_INT64; }
static DataType ref() { return MakeRefType(DT_INT64); }
static constexpr DataType value = DT_INT64;
};
template <>
struct IsValidDataType<long long> {
static constexpr bool value = true;
};
template <>
struct DataTypeToEnum<unsigned long long> {
static DataType v() { return DT_UINT64; }
static DataType ref() { return MakeRefType(DT_UINT64); }
static constexpr DataType value = DT_UINT64;
};
template <>
struct IsValidDataType<unsigned long long> {
static constexpr bool value = true;
};
#undef MATCH_TYPE_AND_ENUM
// All types not specialized are marked invalid.
template <class T>
struct IsValidDataType {
static constexpr bool value = false;
};
// Extra validity checking; not part of public API.
static_assert(IsValidDataType<int64_t>::value, "Incorrect impl for int64");
static_assert(IsValidDataType<int32>::value, "Incorrect impl for int32");
// TODO(jeff): Maybe unify this with Tensor::CanUseDMA, or the underlying
// is_simple<T> in tensor.cc (and possible choose a more general name?)
constexpr DataTypeSet kDataTypesCanUseMemcpy =
ToSet(DT_FLOAT) | ToSet(DT_DOUBLE) | ToSet(DT_INT32) | ToSet(DT_UINT32) |
ToSet(DT_UINT8) | ToSet(DT_UINT16) | ToSet(DT_INT16) | ToSet(DT_INT8) |
ToSet(DT_COMPLEX64) | ToSet(DT_COMPLEX128) | ToSet(DT_INT64) |
ToSet(DT_UINT64) | ToSet(DT_BOOL) | ToSet(DT_QINT8) | ToSet(DT_QUINT8) |
ToSet(DT_QINT16) | ToSet(DT_QUINT16) | ToSet(DT_QINT32) |
ToSet(DT_BFLOAT16) | ToSet(DT_HALF) | ToSet(DT_FLOAT8_E5M2) |
ToSet(DT_FLOAT8_E4M3FN) | ToSet(DT_INT4) | ToSet(DT_UINT4);
inline bool DataTypeCanUseMemcpy(DataType dt) {
return kDataTypesCanUseMemcpy.Contains(dt);
}
// Returns true iff 'dt' is a real, non-quantized floating point type.
constexpr DataTypeSet kDataTypeIsFloating =
ToSet(DT_HALF) | ToSet(DT_BFLOAT16) | ToSet(DT_FLOAT) | ToSet(DT_DOUBLE) |
ToSet(DT_FLOAT8_E4M3FN) | ToSet(DT_FLOAT8_E5M2);
inline bool DataTypeIsFloating(DataType dt) {
return kDataTypeIsFloating.Contains(dt);
}
// Returns true iff 'dt' is a numeric type.
inline bool DataTypeIsNumeric(DataType dt) { return kNumberTypes.Contains(dt); }
// Returns true iff 'dt' is a complex type.
constexpr DataTypeSet kDataTypeIsComplex =
ToSet(DT_COMPLEX64) | ToSet(DT_COMPLEX128);
inline bool DataTypeIsComplex(DataType dt) {
return kDataTypeIsComplex.Contains(dt);
}
inline bool DataTypeIsQuantized(DataType dt) {
return kQuantizedTypes.Contains(dt);
}
// Is the dtype nonquantized integral?
constexpr DataTypeSet kDataTypeIsInteger =
ToSet(DT_INT4) | ToSet(DT_UINT4) | ToSet(DT_INT8) | ToSet(DT_UINT8) |
ToSet(DT_INT16) | ToSet(DT_UINT16) | ToSet(DT_INT32) | ToSet(DT_UINT32) |
ToSet(DT_INT64) | ToSet(DT_UINT64);
inline bool DataTypeIsInteger(DataType dt) {
return kDataTypeIsInteger.Contains(dt);
}
// Is the dtype a signed integral type?
constexpr DataTypeSet kDataTypeIsSigned = ToSet(DT_INT4) | ToSet(DT_INT8) |
ToSet(DT_INT16) | ToSet(DT_INT32) |
ToSet(DT_INT64);
inline bool DataTypeIsSigned(DataType dt) {
return kDataTypeIsSigned.Contains(dt);
}
// Is the dtype an unsigned integral type?
constexpr DataTypeSet kDataTypeIsUnsigned = ToSet(DT_UINT4) | ToSet(DT_UINT8) |
ToSet(DT_UINT16) |
ToSet(DT_UINT32) | ToSet(DT_UINT64);
inline bool DataTypeIsUnsigned(DataType dt) {
return kDataTypeIsUnsigned.Contains(dt);
}
// Returns a 0 on failure
int DataTypeSize(DataType dt);
// Returns HOST_MEMORY if `dtype` is always on host or is a DT_INT32,
// DEVICE_MEMORY otherwise.
MemoryType MTypeFromDType(const DataType dtype);
// Returns HOST_MEMORY if `dtype` is always on host, DEVICE_MEMORY otherwise.
// The reason we have MTypeFromDType() and MTypeFromDTypeIntsOnDevice(): for
// GPUs, we would like to keep int operations on host for performance concerns.
// But for TPUs (and other devices), int operations are placed on device.
MemoryType MTypeFromDTypeIntsOnDevice(const DataType dtype);
// Types that always sit on host: DT_STRING, DT_STRING_REF, DT_RESOURCE.
// For DT_RESOURCE, the handle always sits on host (even if the underlying
// object has device-allocated resources).
bool DataTypeAlwaysOnHost(DataType dt);
// FullType implementation.
// Reference container for a type definition. These values are usually interned.
// These containers admit a notion of ordering for efficient access. The
// ordering has no semantic otherwise.
struct TypeRef {
std::shared_ptr<FullTypeDef> full_type;
bool operator==(const TypeRef& other) const {
// TODO(mdan): This should be more efficient.
return full_type->SerializeAsString() ==
other.full_type->SerializeAsString();
}
bool operator<(const TypeRef& other) const {
return full_type->SerializeAsString() <
other.full_type->SerializeAsString();
}
};
struct TypeHasher {
std::size_t operator()(const TypeRef& k) const {
return std::hash<std::string>()(k.full_type->SerializeAsString());
}
};
// Maps a legacy DType proto enum to an equivalent FullType ID,
// i.e. sets the type_id of t based on dtype.
void map_dtype_to_tensor(const DataType& dtype, FullTypeDef& t);
// Set the type id_of t to TFT_TENSOR and add a child arg by mapping
// a legacy DType proto enun to an equivalent FullType ID, e.g.
// if dtype is DT_FLOAT, sets t to TFT_TENSOR[TFT_FLOAT].
void map_dtype_to_child_of_tensor(const DataType& dtype, FullTypeDef& t);
} // namespace tensorflow
#endif // TENSORFLOW_CORE_FRAMEWORK_TYPES_H_