diff --git a/.zenodo.json b/.zenodo.json
new file mode 100644
index 00000000000000..7161180c51ae3e
--- /dev/null
+++ b/.zenodo.json
@@ -0,0 +1,13 @@
+{
+ "description": "TensorFlow is an end-to-end open source platform for machine learning. It has a comprehensive, flexible ecosystem of tools, libraries, and community resources that lets researchers push the state-of-the-art in ML and developers easily build and deploy ML-powered applications.",
+ "license": "Apache-2.0",
+ "title": "TensorFlow",
+ "upload_type": "software",
+ "creators": [
+ {
+ "name": "TensorFlow Developers"
+ }
+ ],
+ "access_right": "open",
+ "notes": "Specific TensorFlow versions can be found in the \"Versions\" list on the right side of this page.
See the full list of authors on GitHub."
+}
diff --git a/RELEASE.md b/RELEASE.md
index 5dc9456da9c8b1..39db9362064462 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -1,3 +1,208 @@
+# Release 2.3.4
+
+This release introduces several vulnerability fixes:
+
+* Fixes a heap out of bounds access in sparse reduction operations ([CVE-2021-37635](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37635))
+* Fixes a floating point exception in `SparseDenseCwiseDiv` ([CVE-2021-37636](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37636))
+* Fixes a null pointer dereference in `CompressElement` ([CVE-2021-37637](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37637))
+* Fixes a null pointer dereference in `RaggedTensorToTensor` ([CVE-2021-37638](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37638))
+* Fixes a null pointer dereference and a heap OOB read arising from operations restoring tensors ([CVE-2021-37639](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37639))
+* Fixes an integer division by 0 in sparse reshaping ([CVE-2021-37640](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37640))
+* Fixes a division by 0 in `ResourceScatterDiv` ([CVE-2021-37642](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37642))
+* Fixes a heap OOB in `RaggedGather` ([CVE-2021-37641](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37641))
+* Fixes a `std::abort` raised from `TensorListReserve` ([CVE-2021-37644](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37644))
+* Fixes a null pointer dereference in `MatrixDiagPartOp` ([CVE-2021-37643](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37643))
+* Fixes an integer overflow due to conversion to unsigned ([CVE-2021-37645](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37645))
+* Fixes a bad allocation error in `StringNGrams` caused by integer conversion ([CVE-2021-37646](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37646))
+* Fixes a null pointer dereference in `SparseTensorSliceDataset` ([CVE-2021-37647](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37647))
+* Fixes an incorrect validation of `SaveV2` inputs ([CVE-2021-37648](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37648))
+* Fixes a null pointer dereference in `UncompressElement` ([CVE-2021-37649](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37649))
+* Fixes a segfault and a heap buffer overflow in `{Experimental,}DatasetToTFRecord` ([CVE-2021-37650](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37650))
+* Fixes a heap buffer overflow in `FractionalAvgPoolGrad` ([CVE-2021-37651](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37651))
+* Fixes a use after free in boosted trees creation ([CVE-2021-37652](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37652))
+* Fixes a division by 0 in `ResourceGather` ([CVE-2021-37653](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37653))
+* Fixes a heap OOB and a `CHECK` fail in `ResourceGather` ([CVE-2021-37654](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37654))
+* Fixes a heap OOB in `ResourceScatterUpdate` ([CVE-2021-37655](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37655))
+* Fixes an undefined behavior arising from reference binding to nullptr in `RaggedTensorToSparse` ([CVE-2021-37656](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37656))
+* Fixes an undefined behavior arising from reference binding to nullptr in `MatrixDiagV*` ops ([CVE-2021-37657](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37657))
+* Fixes an undefined behavior arising from reference binding to nullptr in `MatrixSetDiagV*` ops ([CVE-2021-37658](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37658))
+* Fixes an undefined behavior arising from reference binding to nullptr and heap OOB in binary cwise ops ([CVE-2021-37659](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37659))
+* Fixes a division by 0 in inplace operations ([CVE-2021-37660](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37660))
+* Fixes a crash caused by integer conversion to unsigned ([CVE-2021-37661](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37661))
+* Fixes an undefined behavior arising from reference binding to nullptr in boosted trees ([CVE-2021-37662](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37662))
+* Fixes a heap OOB in boosted trees ([CVE-2021-37664](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37664))
+* Fixes vulnerabilities arising from incomplete validation in `QuantizeV2` ([CVE-2021-37663](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37663))
+* Fixes vulnerabilities arising from incomplete validation in MKL requantization ([CVE-2021-37665](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37665))
+* Fixes an undefined behavior arising from reference binding to nullptr in `RaggedTensorToVariant` ([CVE-2021-37666](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37666))
+* Fixes an undefined behavior arising from reference binding to nullptr in unicode encoding ([CVE-2021-37667](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37667))
+* Fixes an FPE in `tf.raw_ops.UnravelIndex` ([CVE-2021-37668](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37668))
+* Fixes a crash in NMS ops caused by integer conversion to unsigned ([CVE-2021-37669](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37669))
+* Fixes a heap OOB in `UpperBound` and `LowerBound` ([CVE-2021-37670](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37670))
+* Fixes an undefined behavior arising from reference binding to nullptr in map operations ([CVE-2021-37671](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37671))
+* Fixes a heap OOB in `SdcaOptimizerV2` ([CVE-2021-37672](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37672))
+* Fixes a `CHECK`-fail in `MapStage` ([CVE-2021-37673](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37673))
+* Fixes a vulnerability arising from incomplete validation in `MaxPoolGrad` ([CVE-2021-37674](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37674))
+* Fixes an undefined behavior arising from reference binding to nullptr in shape inference ([CVE-2021-37676](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37676))
+* Fixes a division by 0 in most convolution operators ([CVE-2021-37675](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37675))
+* Fixes vulnerabilities arising from missing validation in shape inference for `Dequantize` ([CVE-2021-37677](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37677))
+* Fixes an arbitrary code execution due to YAML deserialization ([CVE-2021-37678](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37678))
+* Fixes a heap OOB in nested `tf.map_fn` with `RaggedTensor`s ([CVE-2021-37679](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37679))
+* Fixes a division by zero in TFLite ([CVE-2021-37680](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37680))
+* Fixes an NPE in TFLite ([CVE-2021-37681](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37681))
+* Fixes a vulnerability arising from use of unitialized value in TFLite ([CVE-2021-37682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37682))
+* Fixes an FPE in TFLite division operations ([CVE-2021-37683](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37683))
+* Fixes an FPE in TFLite pooling operations ([CVE-2021-37684](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37684))
+* Fixes an infinite loop in TFLite ([CVE-2021-37686](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37686))
+* Fixes a heap OOB in TFLite ([CVE-2021-37685](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37685))
+* Fixes a heap OOB in TFLite's `Gather*` implementations ([CVE-2021-37687](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37687))
+* Fixes an undefined behavior arising from null pointer dereference in TFLite ([CVE-2021-37688](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37688))
+* Fixes an undefined behavior arising from null pointer dereference in TFLite MLIR optimizations ([CVE-2021-37689](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37689))
+* Fixes a FPE in LSH in TFLite ([CVE-2021-37691](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37691))
+* Fixes a segfault on strings tensors with mismatched dimensions, arising in Go code ([CVE-2021-37692](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37692))
+* Fixes a use after free and a potential segfault in shape inference functions ([CVE-2021-37690](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37690))
+* Updates `curl` to `7.77.0` to handle [CVE-2021-22876](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22876), [CVE-2021-22897](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22897), [CVE-2021-22898](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22898), and [CVE-2021-22901](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22901).
+
+# Release 2.3.3
+
+This release introduces several vulnerability fixes:
+
+* Fixes a heap buffer overflow in `RaggedBinCount` ([CVE-2021-29512](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29512))
+* Fixes a heap out of bounds write in `RaggedBinCount` ([CVE-2021-29514](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29514))
+* Fixes a type confusion during tensor casts which leads to dereferencing null pointers ([CVE-2021-29513](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29513))
+* Fixes a reference binding to null pointer in `MatrixDiag*` ops ([CVE-2021-29515](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29515))
+* Fixes a null pointer dereference via invalid Ragged Tensors ([CVE-2021-29516](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29516))
+* Fixes a division by zero in `Conv3D` ([CVE-2021-29517](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29517))
+* Fixes vulnerabilities where session operations in eager mode lead to null pointer dereferences ([CVE-2021-29518](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29518))
+* Fixes a `CHECK`-fail in `SparseCross` caused by type confusion ([CVE-2021-29519](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29519))
+* Fixes a segfault in `SparseCountSparseOutput` ([CVE-2021-29521](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29521))
+* Fixes a heap buffer overflow in `Conv3DBackprop*` ([CVE-2021-29520](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29520))
+* Fixes a division by 0 in `Conv3DBackprop*` ([CVE-2021-29522](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29522))
+* Fixes a `CHECK`-fail in `AddManySparseToTensorsMap` ([CVE-2021-29523](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29523))
+* Fixes a division by 0 in `Conv2DBackpropFilter` ([CVE-2021-29524](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29524))
+* Fixes a division by 0 in `Conv2DBackpropInput` ([CVE-2021-29525](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29525))
+* Fixes a division by 0 in `Conv2D` ([CVE-2021-29526](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29526))
+* Fixes a division by 0 in `QuantizedConv2D` ([CVE-2021-29527](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29527))
+* Fixes a division by 0 in `QuantizedMul` ([CVE-2021-29528](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29528))
+* Fixes vulnerabilities caused by invalid validation in `SparseMatrixSparseCholesky` ([CVE-2021-29530](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29530))
+* Fixes a heap buffer overflow caused by rounding ([CVE-2021-29529](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29529))
+* Fixes a `CHECK`-fail in `tf.raw_ops.EncodePng` ([CVE-2021-29531](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29531))
+* Fixes a heap out of bounds read in `RaggedCross` ([CVE-2021-29532](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29532))
+* Fixes a `CHECK`-fail in `DrawBoundingBoxes` ([CVE-2021-29533](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29533))
+* Fixes a heap buffer overflow in `QuantizedMul` ([CVE-2021-29535](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29535))
+* Fixes a `CHECK`-fail in `SparseConcat` ([CVE-2021-29534](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29534))
+* Fixes a heap buffer overflow in `QuantizedResizeBilinear` ([CVE-2021-29537](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29537))
+* Fixes a heap buffer overflow in `QuantizedReshape` ([CVE-2021-29536](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29536))
+* Fixes a division by zero in `Conv2DBackpropFilter` ([CVE-2021-29538](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29538))
+* Fixes a heap buffer overflow in `Conv2DBackpropFilter` ([CVE-2021-29540](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29540))
+* Fixes a heap buffer overflow in `StringNGrams` ([CVE-2021-29542](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29542))
+* Fixes a null pointer dereference in `StringNGrams` ([CVE-2021-29541](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29541))
+* Fixes a `CHECK`-fail in `QuantizeAndDequantizeV4Grad` ([CVE-2021-29544](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29544))
+* Fixes a `CHECK`-fail in `CTCGreedyDecoder` ([CVE-2021-29543](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29543))
+* Fixes a heap buffer overflow in `SparseTensorToCSRSparseMatrix` ([CVE-2021-29545](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29545))
+* Fixes a division by 0 in `QuantizedBiasAdd` ([CVE-2021-29546](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29546))
+* Fixes a heap out of bounds in `QuantizedBatchNormWithGlobalNormalization` ([CVE-2021-29547](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29547))
+* Fixes a division by 0 in `QuantizedBatchNormWithGlobalNormalization` ([CVE-2021-29548](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29548))
+* Fixes a division by 0 in `QuantizedAdd` ([CVE-2021-29549](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29549))
+* Fixes a division by 0 in `FractionalAvgPool` ([CVE-2021-29550](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29550))
+* Fixes an OOB read in `MatrixTriangularSolve` ([CVE-2021-29551](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29551))
+* Fixes a heap OOB in `QuantizeAndDequantizeV3` ([CVE-2021-29553](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29553))
+* Fixes a `CHECK`-failure in `UnsortedSegmentJoin` ([CVE-2021-29552](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29552))
+* Fixes a division by 0 in `DenseCountSparseOutput` ([CVE-2021-29554](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29554))
+* Fixes a division by 0 in `FusedBatchNorm` ([CVE-2021-29555](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29555))
+* Fixes a division by 0 in `SparseMatMul` ([CVE-2021-29557](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29557))
+* Fixes a division by 0 in `Reverse` ([CVE-2021-29556](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29556))
+* Fixes a heap buffer overflow in `SparseSplit` ([CVE-2021-29558](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29558))
+* Fixes a heap OOB access in unicode ops ([CVE-2021-29559](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29559))
+* Fixes a heap buffer overflow in `RaggedTensorToTensor` ([CVE-2021-29560](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29560))
+* Fixes a `CHECK`-fail in `LoadAndRemapMatrix` ([CVE-2021-29561](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29561))
+* Fixes a `CHECK`-fail in `tf.raw_ops.IRFFT` ([CVE-2021-29562](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29562))
+* Fixes a `CHECK`-fail in `tf.raw_ops.RFFT` ([CVE-2021-29563](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29563))
+* Fixes a null pointer dereference in `EditDistance` ([CVE-2021-29564](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29564))
+* Fixes a null pointer dereference in `SparseFillEmptyRows` ([CVE-2021-29565](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29565))
+* Fixes a heap OOB access in `Dilation2DBackpropInput` ([CVE-2021-29566](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29566))
+* Fixes a reference binding to null in `ParameterizedTruncatedNormal` ([CVE-2021-29568](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29568))
+* Fixes a set of vulnerabilities caused by lack of validation in `SparseDenseCwiseMul` ([CVE-2021-29567](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29567))
+* Fixes a heap out of bounds read in `MaxPoolGradWithArgmax` ([CVE-2021-29570](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29570))
+* Fixes a heap out of bounds read in `RequantizationRange` ([CVE-2021-29569](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29569))
+* Fixes a memory corruption in `DrawBoundingBoxesV2` ([CVE-2021-29571](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29571))
+* Fixes a reference binding to nullptr in `SdcaOptimizer` ([CVE-2021-29572](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29572))
+* Fixes an overflow and a denial of service in `tf.raw_ops.ReverseSequence` ([CVE-2021-29575](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29575))
+* Fixes a division by 0 in `MaxPoolGradWithArgmax` ([CVE-2021-29573](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29573))
+* Fixes an undefined behavior in `MaxPool3DGradGrad` ([CVE-2021-29574](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29574))
+* Fixes a heap buffer overflow in `MaxPool3DGradGrad` ([CVE-2021-29576](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29576))
+* Fixes a heap buffer overflow in `AvgPool3DGrad` ([CVE-2021-29577](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29577))
+* Fixes an undefined behavior and a `CHECK`-fail in `FractionalMaxPoolGrad` ([CVE-2021-29580](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29580))
+* Fixes a heap buffer overflow in `FractionalAvgPoolGrad` ([CVE-2021-29578](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29578))
+* Fixes a heap buffer overflow in `MaxPoolGrad` ([CVE-2021-29579](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29579))
+* Fixes a segfault in `CTCBeamSearchDecoder` ([CVE-2021-29581](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29581))
+* Fixes a heap OOB read in `tf.raw_ops.Dequantize` ([CVE-2021-29582](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29582))
+* Fixes a `CHECK`-fail due to integer overflow ([CVE-2021-29584](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29584))
+* Fixes a heap buffer overflow and undefined behavior in `FusedBatchNorm` ([CVE-2021-29583](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29583))
+* Fixes a division by zero in padding computation in TFLite ([CVE-2021-29585](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29585))
+* Fixes a division by zero in optimized pooling implementations in TFLite ([CVE-2021-29586](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29586))
+* Fixes a division by zero in TFLite's implementation of `SpaceToDepth` ([CVE-2021-29587](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29587))
+* Fixes a division by zero in TFLite's implementation of `GatherNd` ([CVE-2021-29589](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29589))
+* Fixes a division by zero in TFLite's implementation of `TransposeConv` ([CVE-2021-29588](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29588))
+* Fixes a heap OOB read in TFLite's implementation of `Minimum` or `Maximum` ([CVE-2021-29590](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29590))
+* Fixes a null pointer dereference in TFLite's `Reshape` operator ([CVE-2021-29592](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29592))
+* Fixes a stack overflow due to looping TFLite subgraph ([CVE-2021-29591](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29591))
+* Fixes a division by zero in TFLite's implementation of `DepthToSpace` ([CVE-2021-29595](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29595))
+* Fixes a division by zero in TFLite's convolution code ([CVE-2021-29594](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29594))
+* Fixes a division by zero in TFLite's implementation of `EmbeddingLookup` ([CVE-2021-29596](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29596))
+* Fixes a division by zero in TFLite's implementation of `BatchToSpaceNd` ([CVE-2021-29593](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29593))
+* Fixes a division by zero in TFLite's implementation of `SpaceToBatchNd` ([CVE-2021-29597](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29597))
+* Fixes a division by zero in TFLite's implementation of `SVDF` ([CVE-2021-29598](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29598))
+* Fixes a division by zero in TFLite's implementation of `Split` ([CVE-2021-29599](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29599))
+* Fixes a division by zero in TFLite's implementation of `OneHot` ([CVE-2021-29600](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29600))
+* Fixes a division by zero in TFLite's implementation of `DepthwiseConv` ([CVE-2021-29602](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29602))
+* Fixes a division by zero in TFLite's implementation of hashtable lookup ([CVE-2021-29604](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29604))
+* Fixes a integer overflow in TFLite concatentation ([CVE-2021-29601](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29601))
+* Fixes a integer overflow in TFLite memory allocation ([CVE-2021-29605](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29605))
+* Fixes a heap OOB write in TFLite ([CVE-2021-29603](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29603))
+* Fixes a heap OOB read in TFLite ([CVE-2021-29606](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29606))
+* Fixes a heap OOB and null pointer dereference in `RaggedTensorToTensor` ([CVE-2021-29608](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29608))
+* Fixes vulnerabilities caused by incomplete validation in `SparseAdd` ([CVE-2021-29609](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29609))
+* Fixes vulnerabilities caused by incomplete validation in `SparseSparseMinimum` ([CVE-2021-29607](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29607))
+* Fixes vulnerabilities caused by incomplete validation in `SparseReshape` ([CVE-2021-29611](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29611))
+* Fixes vulnerabilities caused by invalid validation in `QuantizeAndDequantizeV2` ([CVE-2021-29610](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29610))
+* Fixes a heap buffer overflow in `BandedTriangularSolve` ([CVE-2021-29612](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29612))
+* Fixes vulnerabilities caused by incomplete validation in `tf.raw_ops.CTCLoss` ([CVE-2021-29613](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29613))
+* Fixes an interpreter crash from vulnerabilities in `tf.io.decode_raw` ([CVE-2021-29614](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29614))
+* Fixes a stack overflow in `ParseAttrValue` with nested tensors ([CVE-2021-29615](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29615))
+* Fixes a null dereference in Grappler's `TrySimplify` ([CVE-2021-29616](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29616))
+* Fixes a crash in `tf.transpose` with complex inputs ([CVE-2021-29618](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29618))
+* Fixes a crash in `tf.strings.substr` due to `CHECK`-fail ([CVE-2021-29617](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29617))
+* Fixes a segfault in `tf.raw_ops.SparseCountSparseOutput` ([CVE-2021-29619](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29619))
+* Fixes a segfault in `tf.raw_ops.ImmutableConst` ([CVE-2021-29539](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29539))
+* Updates `curl` to `7.76.0` to handle [CVE-2020-8169](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8169), [CVE-2020-8177](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8177), [CVE-2020-8231](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8231), [CVE-2020-8284](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8284), [CVE-2020-8285](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8285) and [CVE-2020-8286](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8286).
+
+# Release 2.3.2
+
+## Bug Fixes and Other Changes
+* Fixes an access to unitialized memory in Eigen code
+ ([CVE-2020-26266](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26266))
+* Fixes a security vulnerability caused by lack of validation in
+ `tf.raw_ops.DataFormatVecPermute` and `tf.raw_ops.DataFormatDimMap`
+ ([CVE-2020-26267](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26267))
+* Fixes a vulnerability caused by attempting to write to immutable memory region in
+ `tf.raw_ops.ImmutableConst`
+ ([CVE-2020-26268](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26268)
+* Fixes a `CHECK`-fail in LSTM with zero-length input
+ ([CVE-2020-26270](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26270))
+* Fixes a security vulnerability caused by accessing heap data outside of bounds
+ when loading a specially crafted `SavedModel`
+ ([CVE-2020-26271](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26271))
+* Solves an OOM issue on TPUs when XLA contexts use fused average updates
+* Updates `libjpeg-turbo` to `2.0.5` to handle
+ [CVE-2020-13790](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-13790).
+* Updates `junit` to `4.13.1` to handle
+ [CVE-2020-15250](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15250).
+* Updates `PCRE` to `8.44` to handle
+ [CVE-2019-20838](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-20838)
+ and
+ [CVE-2020-14155](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-14155).
+* Updates `sqlite3` to `3.44.0` to keep in sync with master branch.
+
# Release 2.3.1
## Bug Fixes and Other Changes
diff --git a/tensorflow/compiler/mlir/lite/transforms/optimize.cc b/tensorflow/compiler/mlir/lite/transforms/optimize.cc
index 30ae4b81f4f324..0be9d2d109ff82 100644
--- a/tensorflow/compiler/mlir/lite/transforms/optimize.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/optimize.cc
@@ -56,6 +56,9 @@ constexpr char kRelu6[] = "RELU6";
constexpr char kRelu1[] = "RELU_N1_TO_1";
bool L2NormalizeReduceAxis(Value sq_op, DenseElementsAttr axis) {
+ if (axis.getNumElements() == 0) {
+ return false;
+ }
if (sq_op.getType().cast().getRank() - 1 ==
*axis.getValues().begin() ||
*axis.getValues().begin() == -1) {
diff --git a/tensorflow/core/common_runtime/graph_constructor.cc b/tensorflow/core/common_runtime/graph_constructor.cc
index ab5b086b25c55b..4456c28be9ea21 100644
--- a/tensorflow/core/common_runtime/graph_constructor.cc
+++ b/tensorflow/core/common_runtime/graph_constructor.cc
@@ -44,6 +44,7 @@ limitations under the License.
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/scanner.h"
#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/public/version.h"
@@ -1425,6 +1426,17 @@ void GraphConstructor::Undo() {
Status GraphConstructor::MakeEdge(Node* src, int output_index, Node* dst,
int input_index) {
+ if (output_index >= src->num_outputs()) {
+ return errors::InvalidArgument(
+ "Output ", output_index, " of node ", src->name(),
+ " does not exist. Node only has ", src->num_outputs(), " outputs.");
+ }
+ if (input_index >= dst->num_inputs()) {
+ return errors::InvalidArgument(
+ "Input ", input_index, " of node ", dst->name(),
+ " does not exist. Node only has ", dst->num_inputs(), " inputs.");
+ }
+
DataType src_out = src->output_type(output_index);
DataType dst_in = dst->input_type(input_index);
if (!TypesCompatible(dst_in, src_out)) {
diff --git a/tensorflow/core/common_runtime/shape_refiner.cc b/tensorflow/core/common_runtime/shape_refiner.cc
index a968aaf09b6ad3..3c5421a9507076 100644
--- a/tensorflow/core/common_runtime/shape_refiner.cc
+++ b/tensorflow/core/common_runtime/shape_refiner.cc
@@ -117,9 +117,26 @@ Status InferShapesForFunctionSubNode(const Node* node, ShapeRefiner* refiner,
TF_RETURN_IF_ERROR(outer_context->MakeShapeFromShapeProto(proto, &handle));
outer_context->set_output(index, handle);
- auto* resource = node_context->input_handle_shapes_and_types(0);
+ const std::vector* resource =
+ node_context->input_handle_shapes_and_types(0);
if (resource) {
- outer_context->set_output_handle_shapes_and_types(index, *resource);
+ // `ShapesAndType`s contain `ShapeHandle`s. These `ShapeHandle`s point
+ // to `Shape`s that are owned by a different inference context too. We
+ // need to copy them to the outer context to prevent them from being
+ // destroyed before they are used.
+ std::vector copied_shapes_and_types;
+ for (auto& shape_and_type : *resource) {
+ ShapeHandle handle;
+ TensorShapeProto proto;
+ node_context->ShapeHandleToProto(shape_and_type.shape, &proto);
+ TF_RETURN_IF_ERROR(
+ outer_context->MakeShapeFromShapeProto(proto, &handle));
+ copied_shapes_and_types.push_back(
+ ShapeAndType(handle, shape_and_type.dtype));
+ }
+
+ outer_context->set_output_handle_shapes_and_types(
+ index, copied_shapes_and_types);
}
}
diff --git a/tensorflow/core/data/compression_utils.cc b/tensorflow/core/data/compression_utils.cc
index d132bdca8dabfc..f550b150ce945d 100644
--- a/tensorflow/core/data/compression_utils.cc
+++ b/tensorflow/core/data/compression_utils.cc
@@ -29,9 +29,10 @@ Status CompressElement(const std::vector& element,
int64 total_size = 0;
for (auto& component : element) {
if (DataTypeCanUseMemcpy(component.dtype())) {
- // Some datatypes can be memcopied, allowing us to save two copies
- // (AsProtoTensorContent and SerializeToArray).
- total_size += DMAHelper::buffer(&component)->size();
+ const TensorBuffer* buffer = DMAHelper::buffer(&component);
+ if (buffer) {
+ total_size += buffer->size();
+ }
} else {
non_memcpy_components.emplace_back();
component.AsProtoTensorContent(&non_memcpy_components.back());
@@ -53,8 +54,10 @@ Status CompressElement(const std::vector& element,
component.shape().AsProto(metadata->mutable_tensor_shape());
if (DataTypeCanUseMemcpy(component.dtype())) {
const TensorBuffer* buffer = DMAHelper::buffer(&component);
- memcpy(position, buffer->data(), buffer->size());
- metadata->set_tensor_size_bytes(buffer->size());
+ if (buffer) {
+ memcpy(position, buffer->data(), buffer->size());
+ metadata->set_tensor_size_bytes(buffer->size());
+ }
} else {
TensorProto& proto = non_memcpy_components[non_memcpy_component_index++];
proto.SerializeToArray(position, proto.ByteSizeLong());
@@ -94,8 +97,13 @@ Status UncompressElement(const CompressedElement& compressed,
if (DataTypeCanUseMemcpy(metadata.dtype())) {
out->emplace_back(metadata.dtype(), metadata.tensor_shape());
TensorBuffer* buffer = DMAHelper::buffer(&out->back());
- iov[i].iov_base = buffer->data();
- iov[i].iov_len = buffer->size();
+ if (buffer) {
+ iov[i].iov_base = buffer->data();
+ iov[i].iov_len = buffer->size();
+ } else {
+ iov[i].iov_base = nullptr;
+ iov[i].iov_len = 0;
+ }
} else {
// Allocate an empty Tensor. We will fill it out later after
// uncompressing into the tensor_proto_str.
diff --git a/tensorflow/core/framework/attr_value_util.cc b/tensorflow/core/framework/attr_value_util.cc
index a307c8a18c1862..ca1f316409b39b 100644
--- a/tensorflow/core/framework/attr_value_util.cc
+++ b/tensorflow/core/framework/attr_value_util.cc
@@ -38,6 +38,9 @@ namespace {
// Do not construct large tensors to compute their hash or compare for equality.
constexpr int kMaxAttrValueTensorByteSize = 32 * 1024 * 1024; // 32mb
+// Limit nesting of tensors to 100 deep to prevent memory overflow.
+constexpr int kMaxTensorNestDepth = 100;
+
// Return the size of the tensor represented by this TensorProto. If shape is
// not fully defined return -1.
int64 TensorByteSize(const TensorProto& t) {
@@ -224,6 +227,54 @@ string SummarizeFunc(const NameAttrList& func) {
return strings::StrCat(func.name(), "[", absl::StrJoin(entries, ", "), "]");
}
+bool ParseAttrValueHelper_TensorNestsUnderLimit(int limit, string to_parse) {
+ int nests = 0;
+ int maxed_out = to_parse.length();
+ int open_curly = to_parse.find('{');
+ int open_bracket = to_parse.find('<');
+ int close_curly = to_parse.find('}');
+ int close_bracket = to_parse.find('>');
+ if (open_curly == -1) {
+ open_curly = maxed_out;
+ }
+ if (open_bracket == -1) {
+ open_bracket = maxed_out;
+ }
+ int min = std::min(open_curly, open_bracket);
+ do {
+ if (open_curly == maxed_out && open_bracket == maxed_out) {
+ return true;
+ }
+ if (min == open_curly) {
+ nests += 1;
+ open_curly = to_parse.find('{', open_curly + 1);
+ if (open_curly == -1) {
+ open_curly = maxed_out;
+ }
+ } else if (min == open_bracket) {
+ nests += 1;
+ open_bracket = to_parse.find('<', open_bracket + 1);
+ if (open_bracket == -1) {
+ open_bracket = maxed_out;
+ }
+ } else if (min == close_curly) {
+ nests -= 1;
+ close_curly = to_parse.find('}', close_curly + 1);
+ if (close_curly == -1) {
+ close_curly = maxed_out;
+ }
+ } else if (min == close_bracket) {
+ nests -= 1;
+ close_bracket = to_parse.find('>', close_bracket + 1);
+ if (close_bracket == -1) {
+ close_bracket = maxed_out;
+ }
+ }
+ min = std::min({open_curly, open_bracket, close_curly, close_bracket});
+ } while (nests < 100);
+ return false;
+}
+
} // namespace
string SummarizeAttrValue(const AttrValue& attr_value) {
@@ -448,7 +499,12 @@ bool ParseAttrValue(StringPiece type, StringPiece text, AttrValue* out) {
} else {
to_parse = strings::StrCat(field_name, ": ", text);
}
-
+ if (field_name == "tensor") {
+ if (!ParseAttrValueHelper_TensorNestsUnderLimit(kMaxTensorNestDepth,
+ to_parse)) {
+ return false;
+ }
+ }
return ProtoParseFromString(to_parse, out);
}
diff --git a/tensorflow/core/framework/common_shape_fns.cc b/tensorflow/core/framework/common_shape_fns.cc
index b9efddf4cdbc99..a81f7400389843 100644
--- a/tensorflow/core/framework/common_shape_fns.cc
+++ b/tensorflow/core/framework/common_shape_fns.cc
@@ -659,6 +659,8 @@ Status Conv2DShapeImpl(shape_inference::InferenceContext* c,
if (c->ValueKnown(input_depth_dim) && c->ValueKnown(filter_input_depth_dim)) {
int64 input_depth_value = c->Value(input_depth_dim),
filter_input_depth_value = c->Value(filter_input_depth_dim);
+ if (filter_input_depth_value == 0)
+ return errors::InvalidArgument("Depth of filter must not be 0");
if (input_depth_value % filter_input_depth_value != 0)
return errors::InvalidArgument(
"Depth of input (", input_depth_value,
@@ -668,6 +670,8 @@ Status Conv2DShapeImpl(shape_inference::InferenceContext* c,
int64 num_groups = input_depth_value / filter_input_depth_value;
if (c->ValueKnown(output_depth_dim)) {
int64 output_depth_value = c->Value(output_depth_dim);
+ if (num_groups == 0)
+ return errors::InvalidArgument("Number of groups must not be 0");
if (output_depth_value % num_groups != 0)
return errors::InvalidArgument(
"Depth of output (", output_depth_value,
@@ -798,6 +802,8 @@ Status Conv3DShape(shape_inference::InferenceContext* c) {
if (c->ValueKnown(input_depth_dim) && c->ValueKnown(filter_input_depth_dim)) {
int64 input_depth_value = c->Value(input_depth_dim),
filter_input_depth_value = c->Value(filter_input_depth_dim);
+ if (filter_input_depth_value == 0)
+ return errors::InvalidArgument("Depth of filter must not be 0");
if (input_depth_value % filter_input_depth_value != 0)
return errors::InvalidArgument(
"Depth of input (", input_depth_value,
@@ -807,6 +813,8 @@ Status Conv3DShape(shape_inference::InferenceContext* c) {
int64 num_groups = input_depth_value / filter_input_depth_value;
if (c->ValueKnown(output_depth_dim)) {
int64 output_depth_value = c->Value(output_depth_dim);
+ if (num_groups == 0)
+ return errors::InvalidArgument("Number of groups must not be 0");
if (output_depth_value % num_groups != 0)
return errors::InvalidArgument(
"Depth of output (", output_depth_value,
@@ -2364,6 +2372,9 @@ Status SparseReduceShapeFn(InferenceContext* c) {
int64 ndims = shape_vec.size();
absl::flat_hash_set axes;
+ if (ndims == 0)
+ return errors::InvalidArgument(
+ "Number of dims in shape tensor must not be 0");
for (int i = 0; i < axes_vec.size(); i++) {
axes.insert((axes_vec(i) + ndims) % ndims);
}
diff --git a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc
index 520346b0166a33..2aeeed75ef0b22 100644
--- a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc
+++ b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc
@@ -2000,6 +2000,12 @@ class ReorderCastLikeAndValuePreserving : public ArithmeticOptimizerStage {
Status TrySimplify(NodeDef* consumer, string* simplified_node_name) override {
NodeDef* producer;
+
+ if (consumer->input_size() < 1) {
+ return errors::FailedPrecondition("Node ", simplified_node_name,
+ " lacks inputs");
+ }
+
TF_RETURN_IF_ERROR(GetInputNode(consumer->input(0), &producer));
const bool producer_is_cast = IsCastLike(*producer);
const bool can_optimize =
@@ -2402,6 +2408,11 @@ class ReplaceMulWithSquare : public ArithmeticOptimizerStage {
~ReplaceMulWithSquare() override = default;
bool IsSupported(const NodeDef* node) const override {
+ if (!node || node->input_size() < 2) {
+ // Invalid node
+ return false;
+ }
+
return IsAnyMul(*node) && node->input(0) == node->input(1);
}
diff --git a/tensorflow/core/grappler/optimizers/dependency_optimizer.cc b/tensorflow/core/grappler/optimizers/dependency_optimizer.cc
index 58ef14e3d3d60f..1febfc01e2d741 100644
--- a/tensorflow/core/grappler/optimizers/dependency_optimizer.cc
+++ b/tensorflow/core/grappler/optimizers/dependency_optimizer.cc
@@ -68,6 +68,12 @@ bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const {
// The output values of this node may be needed.
return false;
}
+
+ if (node.input_size() < 1) {
+ // Node lacks input, is invalid
+ return false;
+ }
+
const NodeDef* input = node_map_->GetNode(NodeName(node.input(0)));
CHECK(input != nullptr) << "node = " << node.name()
<< " input = " << node.input(0);
diff --git a/tensorflow/core/kernels/banded_triangular_solve_op.cc b/tensorflow/core/kernels/banded_triangular_solve_op.cc
index 666282e52c8031..ad2467ebefaf1a 100644
--- a/tensorflow/core/kernels/banded_triangular_solve_op.cc
+++ b/tensorflow/core/kernels/banded_triangular_solve_op.cc
@@ -217,6 +217,7 @@ class BandedTriangularSolveOpCpu : public OpKernel {
const Tensor& in1 = ctx->input(1);
ValidateInputTensors(ctx, in0, in1);
+ if (!ctx->status().ok()) return;
MatMulBCast bcast(in0.shape().dim_sizes(), in1.shape().dim_sizes());
OP_REQUIRES(
@@ -275,6 +276,14 @@ class BandedTriangularSolveOpCpu : public OpKernel {
OP_REQUIRES(
ctx, in1.dims() >= 2,
errors::InvalidArgument("In[1] ndims must be >= 2: ", in1.dims()));
+
+ OP_REQUIRES(ctx, in0.NumElements() > 0,
+ errors::InvalidArgument("In[0] must not be an empty tensor: ",
+ in0.DebugString()));
+
+ OP_REQUIRES(ctx, in1.NumElements() > 0,
+ errors::InvalidArgument("In[1] must not be an empty tensor: ",
+ in1.DebugString()));
}
bool lower_;
bool adjoint_;
diff --git a/tensorflow/core/kernels/bincount_op.cc b/tensorflow/core/kernels/bincount_op.cc
index a84b25f2541013..c75f67bb56009a 100644
--- a/tensorflow/core/kernels/bincount_op.cc
+++ b/tensorflow/core/kernels/bincount_op.cc
@@ -414,6 +414,15 @@ class RaggedBincountOp : public OpKernel {
int num_values = values.size();
int batch_idx = 0;
+ OP_REQUIRES(ctx, splits(0) == 0,
+ errors::InvalidArgument("Splits must start with 0, not with ",
+ splits(0)));
+
+ OP_REQUIRES(ctx, splits(num_rows) == num_values,
+ errors::InvalidArgument(
+ "Splits must end with the number of values, got ",
+ splits(num_rows), " instead of ", num_values));
+
Tensor* out_t;
OP_REQUIRES_OK(
ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t));
diff --git a/tensorflow/core/kernels/boosted_trees/quantile_ops.cc b/tensorflow/core/kernels/boosted_trees/quantile_ops.cc
index 0065bdd66aa708..916db1f436148b 100644
--- a/tensorflow/core/kernels/boosted_trees/quantile_ops.cc
+++ b/tensorflow/core/kernels/boosted_trees/quantile_ops.cc
@@ -116,6 +116,9 @@ class BoostedTreesCreateQuantileStreamResourceOp : public OpKernel {
const Tensor* num_streams_t;
OP_REQUIRES_OK(context, context->input(kNumStreamsName, &num_streams_t));
int64 num_streams = num_streams_t->scalar()();
+ OP_REQUIRES(context, num_streams >= 0,
+ errors::InvalidArgument(
+ "Num_streams input cannot be a negative integer"));
auto result =
new QuantileStreamResource(epsilon, max_elements_, num_streams);
diff --git a/tensorflow/core/kernels/boosted_trees/resource_ops.cc b/tensorflow/core/kernels/boosted_trees/resource_ops.cc
index ac1fb5652da5f9..8036f2b20f36bb 100644
--- a/tensorflow/core/kernels/boosted_trees/resource_ops.cc
+++ b/tensorflow/core/kernels/boosted_trees/resource_ops.cc
@@ -53,6 +53,7 @@ class BoostedTreesCreateEnsembleOp : public OpKernel {
if (!result->InitFromSerialized(
tree_ensemble_serialized_t->scalar()(), stamp_token)) {
result->Unref();
+ result.release(); // Needed due to the `->Unref` above, to prevent UAF
OP_REQUIRES(
context, false,
errors::InvalidArgument("Unable to parse tree ensemble proto."));
diff --git a/tensorflow/core/kernels/boosted_trees/stats_ops.cc b/tensorflow/core/kernels/boosted_trees/stats_ops.cc
index 851e5b78e847b7..dc8c4110b47259 100644
--- a/tensorflow/core/kernels/boosted_trees/stats_ops.cc
+++ b/tensorflow/core/kernels/boosted_trees/stats_ops.cc
@@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
#include
+#include
#include
#include "third_party/eigen3/Eigen/Core"
@@ -22,6 +23,7 @@ limitations under the License.
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/kernels/boosted_trees/boosted_trees.pb.h"
#include "tensorflow/core/kernels/boosted_trees/tree_helper.h"
+#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
@@ -51,6 +53,16 @@ class BoostedTreesCalculateBestGainsPerFeatureOp : public OpKernel {
// node_id_range
const Tensor* node_id_range_t;
OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t));
+ OP_REQUIRES(
+ context, node_id_range_t->dims() == 1,
+ errors::InvalidArgument("node_id_range must be a rank 1 tensor, but "
+ "given node_id_range has dims of ",
+ node_id_range_t->dims()));
+ OP_REQUIRES(context, node_id_range_t->dim_size(0) == 2,
+ errors::InvalidArgument(
+ "node_id_range must be a rank 1 tensor with shape=[2], but "
+ "given node_id_range has shape ",
+ node_id_range_t->dim_size(0), " on its first dim"));
const auto node_id_range = node_id_range_t->vec();
const int32 node_id_first = node_id_range(0); // inclusive
const int32 node_id_last = node_id_range(1); // exclusive
@@ -244,12 +256,18 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel {
// node_id_range
const Tensor* node_id_range_t;
OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t));
+ OP_REQUIRES(
+ context, node_id_range_t->NumElements() == 2,
+ errors::InvalidArgument("node_id_range argument must have shape [2]"));
const auto node_id_range = node_id_range_t->vec();
const int32 node_id_first = node_id_range(0); // inclusive
const int32 node_id_last = node_id_range(1); // exclusive
const Tensor* stats_summary_t;
OP_REQUIRES_OK(context, context->input("stats_summary", &stats_summary_t));
+ OP_REQUIRES(
+ context, stats_summary_t->shape().dims() == 4,
+ errors::InvalidArgument("stats_summary argument must have rank 4"));
TTypes::ConstTensor stats_summary =
stats_summary_t->tensor();
const int32 feature_dims = stats_summary_t->dim_size(1);
@@ -262,6 +280,8 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel {
const Tensor* l1_t;
OP_REQUIRES_OK(context, context->input("l1", &l1_t));
+ OP_REQUIRES(context, l1_t->NumElements() == 1,
+ errors::InvalidArgument("l1 argument must be a scalar"));
const auto l1 = l1_t->scalar()();
DCHECK_GE(l1, 0);
if (logits_dim_ > 1) {
@@ -271,17 +291,25 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel {
const Tensor* l2_t;
OP_REQUIRES_OK(context, context->input("l2", &l2_t));
+ OP_REQUIRES(context, l2_t->NumElements() == 1,
+ errors::InvalidArgument("l2 argument must be a scalar"));
const auto l2 = l2_t->scalar()();
DCHECK_GE(l2, 0);
const Tensor* tree_complexity_t;
OP_REQUIRES_OK(context,
context->input("tree_complexity", &tree_complexity_t));
+ OP_REQUIRES(
+ context, tree_complexity_t->NumElements() == 1,
+ errors::InvalidArgument("tree_complexity argument must be a scalar"));
const auto tree_complexity = tree_complexity_t->scalar()();
const Tensor* min_node_weight_t;
OP_REQUIRES_OK(context,
context->input("min_node_weight", &min_node_weight_t));
+ OP_REQUIRES(
+ context, min_node_weight_t->NumElements() == 1,
+ errors::InvalidArgument("min_node_weight argument must be a scalar"));
const auto min_node_weight = min_node_weight_t->scalar()();
std::vector output_node_ids;
@@ -290,7 +318,7 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel {
std::vector output_thresholds;
std::vector output_left_node_contribs;
std::vector output_right_node_contribs;
- std::vector output_split_types;
+ std::vector output_split_types;
// TODO(tanzheny) parallelize the computation.
// Iterate each node and find the best gain per node.
@@ -567,6 +595,16 @@ class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel {
// node_id_range
const Tensor* node_id_range_t;
OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t));
+ OP_REQUIRES(
+ context, node_id_range_t->dims() == 1,
+ errors::InvalidArgument("node_id_range must be a rank 1 tensor, but "
+ "given node_id_range has dims of ",
+ node_id_range_t->dims()));
+ OP_REQUIRES(context, node_id_range_t->dim_size(0) == 2,
+ errors::InvalidArgument(
+ "node_id_range must be a rank 1 tensor with shape=[2], but "
+ "given node_id_range has shape ",
+ node_id_range_t->dim_size(0), " on its first dim"));
const auto node_id_range = node_id_range_t->vec();
const int32 node_id_first = node_id_range(0); // Inclusive.
const int32 node_id_last = node_id_range(1); // Exclusive.
@@ -1025,6 +1063,13 @@ class BoostedTreesSparseCalculateBestFeatureSplitOp : public OpKernel {
const int32 feature_dim = stats_summary_indices(idx, 1);
const int32 bucket_id = stats_summary_indices(idx, 2);
const int32 stat_dim = stats_summary_indices(idx, 3);
+ OP_REQUIRES(context, stat_dim < stats_dims,
+ errors::InvalidArgument(
+ "Stat dim, the sum of logits dim and hessian dim in "
+ "stats_summary_indices, cannot be greater than stats "
+ "dims, the last value in stats_summary_shape, which was ",
+ stats_dims, ". At index (", idx,
+ ", 4), stats_summary_indices contains value ", stat_dim));
std::pair const& f_insert_result = f_map.insert(
FeatureMapIterator::value_type(feature_dim, BucketMap()));
auto& b_map = f_insert_result.first->second;
diff --git a/tensorflow/core/kernels/conv_grad_filter_ops.cc b/tensorflow/core/kernels/conv_grad_filter_ops.cc
index b16d3c7270fde0..d37ac5af59470a 100644
--- a/tensorflow/core/kernels/conv_grad_filter_ops.cc
+++ b/tensorflow/core/kernels/conv_grad_filter_ops.cc
@@ -496,6 +496,14 @@ class Conv2DCustomBackpropFilterOp : public OpKernel {
const int filter_total_size = dims.spatial_dims[0].filter_size *
dims.spatial_dims[1].filter_size *
dims.in_depth;
+ OP_REQUIRES(
+ context,
+ filter_total_size * dims.out_depth == filter_backprop->NumElements(),
+ errors::InvalidArgument(
+ "filter_size does not have enough elements, requested ",
+ filter_total_size * dims.out_depth, ", got ",
+ filter_backprop->NumElements()));
+
// The output image size is the spatial size of the output.
const int output_image_size =
dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size;
@@ -519,6 +527,11 @@ class Conv2DCustomBackpropFilterOp : public OpKernel {
const size_t work_unit_size = size_A + size_B + size_C;
+ OP_REQUIRES(
+ context, work_unit_size != 0,
+ errors::InvalidArgument(
+ "Work size for convolution would be 0, which is not acceptable"));
+
const size_t shard_size =
(target_working_set_size + work_unit_size - 1) / work_unit_size;
diff --git a/tensorflow/core/kernels/conv_grad_input_ops.cc b/tensorflow/core/kernels/conv_grad_input_ops.cc
index 2dd63d1f4d05b7..a89e5c7185c0f6 100644
--- a/tensorflow/core/kernels/conv_grad_input_ops.cc
+++ b/tensorflow/core/kernels/conv_grad_input_ops.cc
@@ -668,6 +668,11 @@ class Conv2DCustomBackpropInputOp : public OpKernel {
dims.batch_size == 1 ||
thread_work_unit_size >= min_thread_work_unit_size;
+ OP_REQUIRES(
+ context, work_unit_size > 0,
+ errors::InvalidArgument("input, filter_sizes and out_backprop tensors "
+ "must all have at least 1 element"));
+
const size_t shard_size =
use_parallel_contraction
? 1
diff --git a/tensorflow/core/kernels/conv_grad_ops_3d.cc b/tensorflow/core/kernels/conv_grad_ops_3d.cc
index 322da2537f0da5..1ef931a97d93da 100644
--- a/tensorflow/core/kernels/conv_grad_ops_3d.cc
+++ b/tensorflow/core/kernels/conv_grad_ops_3d.cc
@@ -239,6 +239,28 @@ class Conv3DBackpropInputOp : public OpKernel {
input_shape = context->input(0).shape();
}
+ OP_REQUIRES(context, input_shape.dims() == 5,
+ errors::InvalidArgument("input tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, filter_shape.dims() == 5,
+ errors::InvalidArgument("filter_sizes tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dims() == 5,
+ errors::InvalidArgument("out_backprop tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, input_shape.dim_size(4) == filter_shape.dim_size(3),
+ errors::InvalidArgument("input and filter_sizes must have the same "
+ "number of channels. Got ",
+ input_shape.dim_size(4), " for input and ",
+ filter_shape.dim_size(3), " for filter_sizes"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4),
+ errors::InvalidArgument("out_backprop and filter_sizes must have the "
+ "same number of channels. Got ",
+ out_backprop_shape.dim_size(4),
+ " for out_backprop and ",
+ filter_shape.dim_size(4), " for filter_sizes"));
+
ConvBackpropDimensions dims;
OP_REQUIRES_OK(context, ConvBackpropComputeDimensions(
"Conv3DBackpropInputOp", /*num_spatial_dims=*/3,
@@ -346,6 +368,28 @@ class Conv3DCustomBackpropInputOp : public OpKernel {
input_shape = context->input(0).shape();
}
+ OP_REQUIRES(context, input_shape.dims() == 5,
+ errors::InvalidArgument("input tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, filter_shape.dims() == 5,
+ errors::InvalidArgument("filter_sizes tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dims() == 5,
+ errors::InvalidArgument("out_backprop tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, input_shape.dim_size(4) == filter_shape.dim_size(3),
+ errors::InvalidArgument("input and filter_sizes must have the same "
+ "number of channels. Got ",
+ input_shape.dim_size(4), " for input and ",
+ filter_shape.dim_size(3), " for filter_sizes"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4),
+ errors::InvalidArgument("out_backprop and filter_sizes must have the "
+ "same number of channels. Got ",
+ out_backprop_shape.dim_size(4),
+ " for out_backprop and ",
+ filter_shape.dim_size(4), " for filter_sizes"));
+
ConvBackpropDimensions dims;
OP_REQUIRES_OK(context, ConvBackpropComputeDimensions(
"Conv3DBackpropInputOp", /*num_spatial_dims=*/3,
@@ -416,6 +460,11 @@ class Conv3DCustomBackpropInputOp : public OpKernel {
// contraction compared to sharding and matmuls.
const bool use_parallel_contraction = dims.batch_size == 1;
+ OP_REQUIRES(
+ context, work_unit_size > 0,
+ errors::InvalidArgument("input, filter_sizes and out_backprop tensors "
+ "must all have at least 1 element"));
+
const size_t shard_size =
use_parallel_contraction
? 1
@@ -696,6 +745,28 @@ class Conv3DBackpropFilterOp : public OpKernel {
filter_shape = context->input(1).shape();
}
+ OP_REQUIRES(context, input_shape.dims() == 5,
+ errors::InvalidArgument("input tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, filter_shape.dims() == 5,
+ errors::InvalidArgument("filter_sizes tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dims() == 5,
+ errors::InvalidArgument("out_backprop tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, input_shape.dim_size(4) == filter_shape.dim_size(3),
+ errors::InvalidArgument("input and filter_sizes must have the same "
+ "number of channels. Got ",
+ input_shape.dim_size(4), " for input and ",
+ filter_shape.dim_size(3), " for filter_sizes"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4),
+ errors::InvalidArgument("out_backprop and filter_sizes must have the "
+ "same number of channels. Got ",
+ out_backprop_shape.dim_size(4),
+ " for out_backprop and ",
+ filter_shape.dim_size(4), " for filter_sizes"));
+
ConvBackpropDimensions dims;
OP_REQUIRES_OK(context,
ConvBackpropComputeDimensions(
@@ -808,6 +879,28 @@ class Conv3DCustomBackpropFilterOp : public OpKernel {
filter_shape = context->input(1).shape();
}
+ OP_REQUIRES(context, input_shape.dims() == 5,
+ errors::InvalidArgument("input tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, filter_shape.dims() == 5,
+ errors::InvalidArgument("filter_sizes tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dims() == 5,
+ errors::InvalidArgument("out_backprop tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, input_shape.dim_size(4) == filter_shape.dim_size(3),
+ errors::InvalidArgument("input and filter_sizes must have the same "
+ "number of channels. Got ",
+ input_shape.dim_size(4), " for input and ",
+ filter_shape.dim_size(3), " for filter_sizes"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4),
+ errors::InvalidArgument("out_backprop and filter_sizes must have the "
+ "same number of channels. Got ",
+ out_backprop_shape.dim_size(4),
+ " for out_backprop and ",
+ filter_shape.dim_size(4), " for filter_sizes"));
+
ConvBackpropDimensions dims;
OP_REQUIRES_OK(context,
ConvBackpropComputeDimensions(
@@ -880,6 +973,11 @@ class Conv3DCustomBackpropFilterOp : public OpKernel {
const int64 work_unit_size = size_A + size_B + size_C;
+ OP_REQUIRES(
+ context, work_unit_size > 0,
+ errors::InvalidArgument("input, filter_sizes and out_backprop tensors "
+ "must all have at least 1 element"));
+
const size_t shard_size =
(target_working_set_size + work_unit_size - 1) / work_unit_size;
diff --git a/tensorflow/core/kernels/conv_grad_shape_utils.cc b/tensorflow/core/kernels/conv_grad_shape_utils.cc
index acb052968e1708..942e085b8ac3b2 100644
--- a/tensorflow/core/kernels/conv_grad_shape_utils.cc
+++ b/tensorflow/core/kernels/conv_grad_shape_utils.cc
@@ -127,6 +127,10 @@ Status ConvBackpropComputeDimensionsV2(
// dimensions of the filter Tensor.
VLOG(2) << "input vs filter_in depth " << dims->in_depth << " "
<< filter_shape.dim_size(num_dims - 2);
+ if (filter_shape.dim_size(num_dims - 2) <= 0) {
+ return errors ::InvalidArgument(
+ label, ": filter depth must be strictly greated than zero");
+ }
if (dims->in_depth % filter_shape.dim_size(num_dims - 2)) {
return errors::InvalidArgument(
label, ": input depth must be evenly divisible by filter depth");
diff --git a/tensorflow/core/kernels/conv_ops.cc b/tensorflow/core/kernels/conv_ops.cc
index ab8e24a311ff68..287cf4a923b31c 100644
--- a/tensorflow/core/kernels/conv_ops.cc
+++ b/tensorflow/core/kernels/conv_ops.cc
@@ -425,6 +425,9 @@ Status ComputeConv2DDimension(const Conv2DParameters& params,
errors::InvalidArgument("Patch depth too large"));
const int in_depth = static_cast(in_depth_raw);
const int patch_depth = static_cast(patch_depth_raw);
+ TF_REQUIRES(patch_depth > 0,
+ errors::InvalidArgument(
+ "filter depth must be stricly positive, got ", patch_depth));
TF_REQUIRES(in_depth % patch_depth == 0,
errors::InvalidArgument(
"input depth must be evenly divisible by filter depth: ",
diff --git a/tensorflow/core/kernels/conv_ops_3d.h b/tensorflow/core/kernels/conv_ops_3d.h
index 9dcdea5b18f10b..8073ca5a9dfdce 100644
--- a/tensorflow/core/kernels/conv_ops_3d.h
+++ b/tensorflow/core/kernels/conv_ops_3d.h
@@ -56,6 +56,11 @@ struct LaunchConvOp {
errors::InvalidArgument("CPU implementation of Conv3D "
"currently only supports dilated rates "
"of 1."));
+ OP_REQUIRES(context, filter.dim_size(3) == input.dim_size(input.dims() - 1),
+ errors::InvalidArgument(
+ "Number of channels in filter (", filter.dim_size(3),
+ ") must match last dimension of input (",
+ input.dim_size(input.dims() - 1), ")"));
functor::CuboidConvolution()(
context->template eigen_device(), output->tensor(),
input.tensor(), filter.tensor(), strides[2], strides[1],
@@ -135,6 +140,8 @@ class Conv3DOp : public BinaryOpBase {
const int64 filter_depth = filter.dim_size(3);
const int64 out_depth = filter.dim_size(4);
+ OP_REQUIRES(context, filter_depth != 0,
+ errors::InvalidArgument("filter_depth must be non-zero"));
OP_REQUIRES(context, in_depth % filter_depth == 0,
errors::InvalidArgument(
"Input depth must be evenly divisible by filter depth: ",
diff --git a/tensorflow/core/kernels/count_ops.cc b/tensorflow/core/kernels/count_ops.cc
index 087deef0812f00..40aa1fe458c1ee 100644
--- a/tensorflow/core/kernels/count_ops.cc
+++ b/tensorflow/core/kernels/count_ops.cc
@@ -122,6 +122,9 @@ class DenseCount : public OpKernel {
int num_batch_elements = 1;
for (int i = 0; i < num_batch_dimensions; ++i) {
+ OP_REQUIRES(context, data.shape().dim_size(i) != 0,
+ errors::InvalidArgument(
+ "Invalid input: Shapes dimension cannot be 0."));
num_batch_elements *= data.shape().dim_size(i);
}
int num_value_elements = data.shape().num_elements() / num_batch_elements;
@@ -192,10 +195,22 @@ class SparseCount : public OpKernel {
"; values shape: ", values.shape().DebugString()));
}
+ OP_REQUIRES(context, shape.NumElements() != 0,
+ errors::InvalidArgument(
+ "The shape argument requires at least one element."));
+
bool is_1d = shape.NumElements() == 1;
- int num_batches = is_1d ? 1 : shape.flat()(0);
+ auto shape_vector = shape.flat();
+ int num_batches = is_1d ? 1 : shape_vector(0);
int num_values = values.NumElements();
+ for (int b = 0; b < shape_vector.size(); b++) {
+ OP_REQUIRES(context, shape_vector(b) >= 0,
+ errors::InvalidArgument(
+ "Elements in dense_shape must be >= 0. Instead got:",
+ shape.DebugString()));
+ }
+
OP_REQUIRES(context, num_values == indices.shape().dim_size(0),
errors::InvalidArgument(
"Number of values must match first dimension of indices.",
@@ -212,6 +227,14 @@ class SparseCount : public OpKernel {
for (int idx = 0; idx < num_values; ++idx) {
int batch = is_1d ? 0 : indices_values(idx, 0);
+ if (batch >= num_batches) {
+ OP_REQUIRES(context, batch < num_batches,
+ errors::InvalidArgument(
+ "Indices value along the first dimension must be ",
+ "lower than the first index of the shape.", "Got ",
+ batch, " as batch and ", num_batches,
+ " as the first dimension of the shape."));
+ }
const auto& value = values_values(idx);
if (value >= 0 && (maxlength_ <= 0 || value < maxlength_)) {
if (binary_output_) {
diff --git a/tensorflow/core/kernels/ctc_decoder_ops.cc b/tensorflow/core/kernels/ctc_decoder_ops.cc
index d62aef2d03b988..9efdac60e369c2 100644
--- a/tensorflow/core/kernels/ctc_decoder_ops.cc
+++ b/tensorflow/core/kernels/ctc_decoder_ops.cc
@@ -70,6 +70,9 @@ class CTCDecodeHelper {
if (inputs_shape.dims() != 3) {
return errors::InvalidArgument("inputs is not a 3-Tensor");
}
+ if (inputs_shape.num_elements() == 0) {
+ return errors::InvalidArgument("inputs must not be empty");
+ }
const int64 max_time = inputs_shape.dim_size(0);
const int64 batch_size = inputs_shape.dim_size(1);
@@ -232,6 +235,8 @@ class CTCGreedyDecoderOp : public OpKernel {
int prev_indices = -1;
for (int t = 0; t < seq_len_t(b); ++t) {
int max_class_indices;
+ OP_REQUIRES(ctx, input_list_t[t].dimension(1) > 0,
+ errors::InvalidArgument("Invalid input dimensions."));
log_prob_t(b, 0) +=
-RowMax(input_list_t[t], b, &max_class_indices);
if (max_class_indices != blank_index &&
diff --git a/tensorflow/core/kernels/ctc_loss_op.cc b/tensorflow/core/kernels/ctc_loss_op.cc
index 6358e82fdda853..ca505e1db93145 100644
--- a/tensorflow/core/kernels/ctc_loss_op.cc
+++ b/tensorflow/core/kernels/ctc_loss_op.cc
@@ -100,11 +100,18 @@ class CTCLossOp : public OpKernel {
errors::InvalidArgument("sequence_length is not a vector"));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(labels_indices->shape()),
errors::InvalidArgument("labels_indices is not a matrix"));
+ OP_REQUIRES(ctx, labels_indices->dim_size(1) > 1,
+ errors::InvalidArgument(
+ "labels_indices second dimension must be >= 1. Received ",
+ labels_indices->dim_size(1)));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(labels_values->shape()),
errors::InvalidArgument("labels_values is not a vector"));
const TensorShape& inputs_shape = inputs->shape();
const int64 max_time = inputs_shape.dim_size(0);
+ OP_REQUIRES(ctx, max_time != 0,
+ errors::InvalidArgument(
+ "Max time or first dimension of input cannot be 0."));
const int64 batch_size = inputs_shape.dim_size(1);
const int64 num_classes_raw = inputs_shape.dim_size(2);
OP_REQUIRES(
diff --git a/tensorflow/core/kernels/cwise_ops_common.h b/tensorflow/core/kernels/cwise_ops_common.h
index c0aee43d26800a..45efaf34892135 100644
--- a/tensorflow/core/kernels/cwise_ops_common.h
+++ b/tensorflow/core/kernels/cwise_ops_common.h
@@ -271,6 +271,11 @@ class SimpleBinaryOp : public OpKernel {
void Compute(OpKernelContext* ctx) override {
const Tensor& in0 = ctx->input(0);
const Tensor& in1 = ctx->input(1);
+ OP_REQUIRES(
+ ctx, in0.NumElements() == in1.NumElements(),
+ errors::InvalidArgument("The two arguments to a cwise op must have "
+ "same number of elements, got ",
+ in0.NumElements(), " and ", in1.NumElements()));
auto in0_flat = in0.flat();
auto in1_flat = in1.flat();
const Device& eigen_device = ctx->eigen_device();
diff --git a/tensorflow/core/kernels/data/experimental/compression_ops.cc b/tensorflow/core/kernels/data/experimental/compression_ops.cc
index efa7018acb6293..8cc214671bd742 100644
--- a/tensorflow/core/kernels/data/experimental/compression_ops.cc
+++ b/tensorflow/core/kernels/data/experimental/compression_ops.cc
@@ -48,6 +48,11 @@ void UncompressElementOp::Compute(OpKernelContext* ctx) {
Tensor tensor = ctx->input(0);
const Variant& variant = tensor.scalar()();
const CompressedElement* compressed = variant.get();
+ OP_REQUIRES(
+ ctx, compressed != nullptr,
+ errors::InvalidArgument(
+ "Input does not contain a compressed element. Instead got tensor ",
+ tensor.DebugString()));
std::vector components;
OP_REQUIRES_OK(ctx, UncompressElement(*compressed, &components));
diff --git a/tensorflow/core/kernels/data/experimental/to_tf_record_op.cc b/tensorflow/core/kernels/data/experimental/to_tf_record_op.cc
index bfa894cd473b40..56401bb91f5753 100644
--- a/tensorflow/core/kernels/data/experimental/to_tf_record_op.cc
+++ b/tensorflow/core/kernels/data/experimental/to_tf_record_op.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/core/framework/function_handle_cache.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
+#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/data/dataset_utils.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
@@ -87,8 +88,20 @@ class ToTFRecordOp : public AsyncOpKernel {
TF_RETURN_IF_ERROR(dataset->MakeIterator(
&iter_ctx, /*parent=*/nullptr, "ToTFRecordOpIterator", &iterator));
+ const int num_output_dtypes = dataset->output_dtypes().size();
+ if (num_output_dtypes != 1) {
+ return errors::InvalidArgument(
+ "ToTFRecordOp currently only support datasets of 1 single column, ",
+ "but got ", num_output_dtypes);
+ }
+ const DataType dt = dataset->output_dtypes()[0];
+ if (dt != DT_STRING) {
+ return errors::InvalidArgument(
+ "ToTFRecordOp currently only supports DT_STRING dataypes, but got ",
+ DataTypeString(dt));
+ }
std::vector components;
- components.reserve(dataset->output_dtypes().size());
+ components.reserve(num_output_dtypes);
bool end_of_sequence;
do {
TF_RETURN_IF_ERROR(
diff --git a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc
index 1e3ed53d6c6d6e..212c2b4e96715f 100644
--- a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc
+++ b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc
@@ -237,6 +237,17 @@ class SparseTensorSliceDatasetOp : public DatasetOpKernel {
errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
indices->shape().DebugString()));
+
+ const auto num_indices = indices->NumElements();
+ const auto num_values = values->NumElements();
+ if (num_indices == 0 || num_values == 0) {
+ OP_REQUIRES(ctx, num_indices == num_values,
+ errors::InvalidArgument(
+ "If indices or values are empty, the other one must also "
+ "be. Got indices of shape ",
+ indices->shape().DebugString(), " and values of shape ",
+ values->shape().DebugString()));
+ }
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values->shape()),
errors::InvalidArgument(
"Input values should be a vector but received shape ",
diff --git a/tensorflow/core/kernels/data_format_ops.cc b/tensorflow/core/kernels/data_format_ops.cc
index 181aa1b8a2cab2..771986f2ee84d4 100644
--- a/tensorflow/core/kernels/data_format_ops.cc
+++ b/tensorflow/core/kernels/data_format_ops.cc
@@ -18,16 +18,52 @@ limitations under the License.
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/data_format_ops.h"
+
+#include