Skip to content

Commit 21716d8

Browse files
martinwicketensorflower-gardener
authored andcommitted
Merge changes from github.
Change: 128401884
1 parent ed28197 commit 21716d8

105 files changed

Lines changed: 2585 additions & 1153 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

ISSUE_TEMPLATE.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,10 @@ If installed from binary pip package, provide:
1818
1. Which pip package you installed.
1919
2. The output from `python -c "import tensorflow; print(tensorflow.__version__)"`.
2020

21-
If installed from sources, provide the commit hash:
21+
If installed from source, provide
22+
23+
1. The commit hash (`git rev-parse HEAD`)
24+
2. The output of `bazel version`
2225

2326
### Steps to reproduce
2427
1.

eigen.BUILD

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,8 @@
11
package(default_visibility = ["//visibility:public"])
22

3-
archive_dir = "eigen-eigen-b4fa9622b809"
43
cc_library(
54
name = "eigen",
6-
hdrs = glob([archive_dir+"/**/*.h", archive_dir+"/unsupported/Eigen/*", archive_dir+"/unsupported/Eigen/CXX11/*", archive_dir+"/Eigen/*"]),
7-
includes = [ archive_dir ],
5+
hdrs = glob(["**/*.h", "unsupported/Eigen/*", "unsupported/Eigen/CXX11/*", "Eigen/*"]),
6+
includes = [ '.' ],
87
visibility = ["//visibility:public"],
98
)

gif.BUILD

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
SOURCES = [
2+
"dgif_lib.c",
3+
"egif_lib.c",
4+
"gif_font.c",
5+
"gif_hash.c",
6+
"gifalloc.c",
7+
"openbsd-reallocarray.c",
8+
"gif_err.c",
9+
"quantize.c",
10+
]
11+
12+
prefix_dir = "giflib-5.1.4/lib"
13+
14+
cc_library(
15+
name = "gif",
16+
srcs = [prefix_dir + "/" + source for source in SOURCES],
17+
hdrs = [prefix_dir + "/gif_lib.h"],
18+
includes = [prefix_dir],
19+
defines = [
20+
"HAVE_CONFIG_H",
21+
],
22+
visibility = ["//visibility:public"],
23+
)

tensorflow/contrib/cmake/external/eigen.cmake

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,16 +7,30 @@
77

88
include (ExternalProject)
99

10-
set(eigen_archive_hash "b4fa9622b809")
10+
# We parse the current Eigen version and archive hash from the bazel configuration
11+
file(STRINGS ${PROJECT_SOURCE_DIR}/../../workspace.bzl workspace_contents)
12+
foreach(line ${workspace_contents})
13+
string(REGEX MATCH ".*eigen_version.*=.*\"(.*)\"" has_version ${line})
14+
if(has_version)
15+
set(eigen_version ${CMAKE_MATCH_1})
16+
break()
17+
endif()
18+
endforeach()
19+
foreach(line ${workspace_contents})
20+
string(REGEX MATCH ".*eigen_sha256.*=.*\"(.*)\"" has_hash ${line})
21+
if(has_hash)
22+
set(eigen_hash ${CMAKE_MATCH_1})
23+
break()
24+
endif()
25+
endforeach()
1126

1227
set(eigen_INCLUDE_DIRS
1328
${CMAKE_CURRENT_BINARY_DIR}
1429
${CMAKE_CURRENT_BINARY_DIR}/external/eigen_archive
15-
${CMAKE_CURRENT_BINARY_DIR}/external/eigen_archive/eigen-eigen-${eigen_archive_hash}
1630
${tensorflow_source_dir}/third_party/eigen3
1731
)
18-
set(eigen_URL https://bitbucket.org/eigen/eigen/get/${eigen_archive_hash}.tar.gz)
19-
set(eigen_HASH SHA256=2862840c2de9c0473a4ef20f8678949ae89ab25965352ee53329e63ba46cec62)
32+
set(eigen_URL https://bitbucket.org/eigen/eigen/get/${eigen_version}.tar.gz)
33+
set(eigen_HASH SHA256=${eigen_hash})
2034
set(eigen_BUILD ${CMAKE_CURRENT_BINARY_DIR}/eigen/src/eigen)
2135
set(eigen_INSTALL ${CMAKE_CURRENT_BINARY_DIR}/eigen/install)
2236

@@ -30,5 +44,5 @@ ExternalProject_Add(eigen
3044
-DCMAKE_BUILD_TYPE:STRING=Release
3145
-DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF
3246
-DCMAKE_INSTALL_PREFIX:STRING=${eigen_INSTALL}
33-
-DINCLUDE_INSTALL_DIR:STRING=${CMAKE_CURRENT_BINARY_DIR}/external/eigen_archive/eigen-eigen-${eigen_archive_hash}
47+
-DINCLUDE_INSTALL_DIR:STRING=${CMAKE_CURRENT_BINARY_DIR}/external/eigen_archive
3448
)

tensorflow/contrib/factorization/python/ops/kmeans.py

Lines changed: 7 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -55,12 +55,8 @@ def __init__(self,
5555
distance_metric=clustering_ops.SQUARED_EUCLIDEAN_DISTANCE,
5656
random_seed=0,
5757
use_mini_batch=True,
58-
batch_size=128,
59-
steps=10,
6058
kmeans_plus_plus_num_retries=2,
61-
continue_training=False,
62-
config=None,
63-
verbose=1):
59+
config=None):
6460
"""Creates a model for running KMeans training and inference.
6561
6662
Args:
@@ -73,25 +69,17 @@ def __init__(self,
7369
random_seed: Python integer. Seed for PRNG used to initialize centers.
7470
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
7571
full batch.
76-
batch_size: See TensorFlowEstimator
77-
steps: See TensorFlowEstimator
7872
kmeans_plus_plus_num_retries: For each point that is sampled during
7973
kmeans++ initialization, this parameter specifies the number of
8074
additional points to draw from the current distribution before selecting
8175
the best. If a negative value is specified, a heuristic is used to
8276
sample O(log(num_to_sample)) additional points.
83-
continue_training: See TensorFlowEstimator
84-
config: See TensorFlowEstimator
85-
verbose: See TensorFlowEstimator
77+
config: See Estimator
8678
"""
8779
super(KMeansClustering, self).__init__(
8880
model_dir=model_dir,
8981
config=config)
90-
self.batch_size = batch_size
91-
self.steps = steps
9282
self.kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
93-
self.continue_training = continue_training
94-
self.verbose = verbose
9583
self._num_clusters = num_clusters
9684
self._training_initial_clusters = initial_clusters
9785
self._training_graph = None
@@ -135,11 +123,11 @@ def step_end(self, step, output):
135123
return relative_change < self._tolerance
136124
# pylint: enable=protected-access
137125

138-
def fit(self, x, y=None, monitors=None, logdir=None, steps=None,
126+
def fit(self, x, y=None, monitors=None, logdir=None, steps=None, batch_size=128,
139127
relative_tolerance=None):
140128
"""Trains a k-means clustering on x.
141129
142-
Note: See TensorFlowEstimator for logic for continuous training and graph
130+
Note: See Estimator for logic for continuous training and graph
143131
construction across multiple calls to fit.
144132
145133
Args:
@@ -151,6 +139,7 @@ def fit(self, x, y=None, monitors=None, logdir=None, steps=None,
151139
visualization.
152140
steps: number of training steps. If not None, overrides the value passed
153141
in constructor.
142+
batch_size: mini-batch size to use. Requires `use_mini_batch=True`.
154143
relative_tolerance: A relative tolerance of change in the loss between
155144
iterations. Stops learning if the loss changes less than this amount.
156145
Note that this may not work correctly if use_mini_batch=True.
@@ -162,7 +151,7 @@ def fit(self, x, y=None, monitors=None, logdir=None, steps=None,
162151
if logdir is not None:
163152
self._model_dir = logdir
164153
self._data_feeder = data_feeder.setup_train_data_feeder(
165-
x, None, self._num_clusters, self.batch_size)
154+
x, None, self._num_clusters, batch_size if self._use_mini_batch else None)
166155
if relative_tolerance is not None:
167156
if monitors is not None:
168157
monitors += [self._StopWhenConverged(relative_tolerance)]
@@ -173,7 +162,7 @@ def fit(self, x, y=None, monitors=None, logdir=None, steps=None,
173162
or (self.steps is not None))
174163
self._train_model(input_fn=self._data_feeder.input_builder,
175164
feed_fn=self._data_feeder.get_feed_dict_fn(),
176-
steps=steps or self.steps,
165+
steps=steps,
177166
monitors=monitors,
178167
init_feed_fn=self._data_feeder.get_feed_dict_fn())
179168
return self

tensorflow/contrib/factorization/python/ops/kmeans_test.py

Lines changed: 29 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -53,13 +53,14 @@ def setUp(self):
5353

5454
self.kmeans = KMeans(self.num_centers,
5555
initial_clusters=kmeans_ops.RANDOM_INIT,
56-
batch_size=self.batch_size,
5756
use_mini_batch=self.use_mini_batch,
58-
steps=30,
59-
continue_training=True,
60-
config=run_config.RunConfig(tf_random_seed=14),
57+
config=self.config(14),
6158
random_seed=12)
6259

60+
@staticmethod
61+
def config(tf_random_seed):
62+
return run_config.RunConfig(tf_random_seed=tf_random_seed)
63+
6364
@property
6465
def batch_size(self):
6566
return self.num_points
@@ -86,7 +87,7 @@ def make_random_points(centers, num_points, max_offset=20):
8687

8788
def test_clusters(self):
8889
kmeans = self.kmeans
89-
kmeans.fit(x=self.points, steps=0)
90+
kmeans.fit(x=self.points, steps=1, batch_size=8)
9091
clusters = kmeans.clusters()
9192
self.assertAllEqual(list(clusters.shape),
9293
[self.num_centers, self.num_dims])
@@ -97,10 +98,11 @@ def test_fit(self):
9798
return
9899
kmeans = self.kmeans
99100
kmeans.fit(x=self.points,
100-
steps=1)
101+
steps=1, batch_size=self.batch_size)
101102
score1 = kmeans.score(x=self.points)
102103
kmeans.fit(x=self.points,
103-
steps=15 * self.num_points // self.batch_size)
104+
steps=15 * self.num_points // self.batch_size,
105+
batch_size=self.batch_size)
104106
score2 = kmeans.score(x=self.points)
105107
self.assertTrue(score1 > score2)
106108
self.assertNear(self.true_score, score2, self.true_score * 0.05)
@@ -111,39 +113,36 @@ def test_monitor(self):
111113
return
112114
kmeans = KMeans(self.num_centers,
113115
initial_clusters=kmeans_ops.RANDOM_INIT,
114-
batch_size=self.batch_size,
115116
use_mini_batch=self.use_mini_batch,
116-
# Force it to train forever until the monitor stops it.
117-
steps=None,
118-
continue_training=True,
119117
config=run_config.RunConfig(tf_random_seed=14),
120118
random_seed=12)
121119

122120
kmeans.fit(x=self.points,
123121
# Force it to train forever until the monitor stops it.
124122
steps=None,
123+
batch_size=self.batch_size,
125124
relative_tolerance=1e-4)
126125
score = kmeans.score(x=self.points)
127126
self.assertNear(self.true_score, score, self.true_score * 0.005)
128127

129128
def test_infer(self):
130129
kmeans = self.kmeans
131-
kmeans.fit(x=self.points)
130+
kmeans.fit(x=self.points, steps=10, batch_size=128)
132131
clusters = kmeans.clusters()
133132

134133
# Make a small test set
135134
points, true_assignments, true_offsets = self.make_random_points(clusters,
136135
10)
137136
# Test predict
138-
assignments = kmeans.predict(points)
137+
assignments = kmeans.predict(points, batch_size=self.batch_size)
139138
self.assertAllEqual(assignments, true_assignments)
140139

141140
# Test score
142-
score = kmeans.score(points)
141+
score = kmeans.score(points, batch_size=128)
143142
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
144143

145144
# Test transform
146-
transform = kmeans.transform(points)
145+
transform = kmeans.transform(points, batch_size=128)
147146
true_transform = np.maximum(
148147
0,
149148
np.sum(np.square(points), axis=1, keepdims=True) -
@@ -161,12 +160,9 @@ def test_fit_with_cosine_distance(self):
161160
initial_clusters=kmeans_ops.RANDOM_INIT,
162161
distance_metric=kmeans_ops.COSINE_DISTANCE,
163162
use_mini_batch=self.use_mini_batch,
164-
batch_size=4,
165-
steps=30,
166-
continue_training=True,
167-
config=run_config.RunConfig(tf_random_seed=2),
163+
config=self.config(2),
168164
random_seed=12)
169-
kmeans.fit(x=points)
165+
kmeans.fit(x=points, steps=10, batch_size=4)
170166
centers = normalize(kmeans.clusters())
171167
self.assertAllClose(np.sort(centers, axis=0),
172168
np.sort(true_centers, axis=0))
@@ -184,18 +180,16 @@ def test_transform_with_cosine_distance(self):
184180
initial_clusters=kmeans_ops.RANDOM_INIT,
185181
distance_metric=kmeans_ops.COSINE_DISTANCE,
186182
use_mini_batch=self.use_mini_batch,
187-
batch_size=8,
188-
continue_training=True,
189-
config=run_config.RunConfig(tf_random_seed=3))
190-
kmeans.fit(x=points, steps=30)
183+
config=self.config(3))
184+
kmeans.fit(x=points, steps=30, batch_size=8)
191185

192186
centers = normalize(kmeans.clusters())
193187
self.assertAllClose(np.sort(centers, axis=0),
194188
np.sort(true_centers, axis=0),
195189
atol=1e-2)
196190

197191
true_transform = 1 - cosine_similarity(points, centers)
198-
transform = kmeans.transform(points)
192+
transform = kmeans.transform(points, batch_size=8)
199193
self.assertAllClose(transform, true_transform, atol=1e-3)
200194

201195
def test_predict_with_cosine_distance(self):
@@ -217,20 +211,18 @@ def test_predict_with_cosine_distance(self):
217211
initial_clusters=kmeans_ops.RANDOM_INIT,
218212
distance_metric=kmeans_ops.COSINE_DISTANCE,
219213
use_mini_batch=self.use_mini_batch,
220-
batch_size=8,
221-
continue_training=True,
222-
config=run_config.RunConfig(tf_random_seed=3))
223-
kmeans.fit(x=points, steps=30)
214+
config=self.config(3))
215+
kmeans.fit(x=points, steps=30, batch_size=8)
224216

225217
centers = normalize(kmeans.clusters())
226218
self.assertAllClose(np.sort(centers, axis=0),
227219
np.sort(true_centers, axis=0), atol=1e-2)
228220

229-
assignments = kmeans.predict(points)
221+
assignments = kmeans.predict(points, batch_size=8)
230222
self.assertAllClose(centers[assignments],
231223
true_centers[true_assignments], atol=1e-2)
232224

233-
score = kmeans.score(points)
225+
score = kmeans.score(points, batch_size=8)
234226
self.assertAllClose(score, true_score, atol=1e-2)
235227

236228
def test_predict_with_cosine_distance_and_kmeans_plus_plus(self):
@@ -254,29 +246,27 @@ def test_predict_with_cosine_distance_and_kmeans_plus_plus(self):
254246
initial_clusters=kmeans_ops.KMEANS_PLUS_PLUS_INIT,
255247
distance_metric=kmeans_ops.COSINE_DISTANCE,
256248
use_mini_batch=self.use_mini_batch,
257-
batch_size=12,
258-
continue_training=True,
259-
config=run_config.RunConfig(tf_random_seed=3))
260-
kmeans.fit(x=points, steps=30)
249+
config=self.config(3))
250+
kmeans.fit(x=points, steps=30, batch_size=12)
261251

262252
centers = normalize(kmeans.clusters())
263253
self.assertAllClose(sorted(centers.tolist()),
264254
sorted(true_centers.tolist()),
265255
atol=1e-2)
266256

267-
assignments = kmeans.predict(points)
257+
assignments = kmeans.predict(points, batch_size=12)
268258
self.assertAllClose(centers[assignments],
269259
true_centers[true_assignments], atol=1e-2)
270260

271-
score = kmeans.score(points)
261+
score = kmeans.score(points, batch_size=12)
272262
self.assertAllClose(score, true_score, atol=1e-2)
273263

274264
def test_fit_raise_if_num_clusters_larger_than_num_points_random_init(self):
275265
points = np.array([[2.0, 3.0], [1.6, 8.2]])
276266

277267
with self.assertRaisesOpError('less'):
278268
kmeans = KMeans(num_clusters=3, initial_clusters=kmeans_ops.RANDOM_INIT)
279-
kmeans.fit(x=points)
269+
kmeans.fit(x=points, steps=10, batch_size=8)
280270

281271
def test_fit_raise_if_num_clusters_larger_than_num_points_kmeans_plus_plus(
282272
self):
@@ -285,7 +275,7 @@ def test_fit_raise_if_num_clusters_larger_than_num_points_kmeans_plus_plus(
285275
with self.assertRaisesOpError(AssertionError):
286276
kmeans = KMeans(num_clusters=3,
287277
initial_clusters=kmeans_ops.KMEANS_PLUS_PLUS_INIT)
288-
kmeans.fit(x=points)
278+
kmeans.fit(x=points, steps=10, batch_size=8)
289279

290280

291281
class MiniBatchKMeansTest(KMeansTest):

tensorflow/contrib/ios_examples/README.md

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,5 +72,14 @@ rundown:
7272
unused because no other code references the variables, but in fact their
7373
constructors have the important side effect of registering the class.
7474

75+
- C++11 support (or later) should be enabled by setting `C++ Language Dialect` to
76+
`GNU++11` (or `GNU++14`), and `C++ Standard Library` to `libc++`.
77+
7578
- The library doesn't currently support bitcode, so you'll need to disable that
7679
in your project settings.
80+
81+
- Remove any use of the `-all_load` flag in your project. The protocol buffers
82+
libraries (full and lite versions) contain duplicate symbols, and the `-all_load`
83+
flag will cause these duplicates to become link errors. If you were using
84+
`-all_load` to avoid issues with Objective-C categories in static libraries,
85+
you may be able to replace it with the `-ObjC` flag.

tensorflow/contrib/learn/python/learn/estimators/classifier.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,9 @@ def __init__(self, model_fn, n_classes, model_dir=None, config=None):
4747
Args:
4848
model_fn: (targets, predictions, mode) -> logits, loss, train_op
4949
n_classes: Number of classes
50-
model_dir: Base directory for output data
50+
model_dir: Directory to save model parameters, graph and etc. This can also
51+
be used to load checkpoints from the directory into a estimator to continue
52+
training a previously saved model.
5153
config: Configuration object (optional)
5254
"""
5355
self._n_classes = n_classes

0 commit comments

Comments
 (0)