Skip to content

Commit 78fe194

Browse files
committed
Merge commit for internal changes
2 parents 7fa0cf3 + 697f34c commit 78fe194

File tree

129 files changed

+8279
-1675
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

129 files changed

+8279
-1675
lines changed

tensorflow/BUILD

+1
Original file line numberDiff line numberDiff line change
@@ -342,6 +342,7 @@ filegroup(
342342
"//tensorflow/tensorboard/components/tf_globals:all_files",
343343
"//tensorflow/tensorboard/components/tf_globals_d3v4:all_files",
344344
"//tensorflow/tensorboard/components/tf_graph_common:all_files",
345+
"//tensorflow/tensorboard/components/tf_graph_loader:all_files",
345346
"//tensorflow/tensorboard/components/tf_histogram_dashboard:all_files",
346347
"//tensorflow/tensorboard/components/tf_histogram_dashboard/demo:all_files",
347348
"//tensorflow/tensorboard/components/tf_image_dashboard:all_files",

tensorflow/compiler/aot/BUILD

+1-1
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ cc_library(
7373
"//tensorflow/compiler/xla:util",
7474
"//tensorflow/compiler/xla:xla_data_proto",
7575
"//tensorflow/compiler/xla/client:client_library",
76-
"//tensorflow/compiler/xla/client:local_client",
76+
"//tensorflow/compiler/xla/client:compile_only_client",
7777
"//tensorflow/compiler/xla/service:compiler",
7878
"//tensorflow/compiler/xla/service/cpu:cpu_compiler",
7979
"//tensorflow/core:core_cpu",

tensorflow/compiler/aot/compile.cc

+10-7
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ limitations under the License.
2727
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
2828
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
2929
#include "tensorflow/compiler/xla/client/client_library.h"
30-
#include "tensorflow/compiler/xla/client/local_client.h"
30+
#include "tensorflow/compiler/xla/client/compile_only_client.h"
3131
#include "tensorflow/compiler/xla/service/compiler.h"
3232
#include "tensorflow/compiler/xla/service/cpu/cpu_compiler.h"
3333
#include "tensorflow/compiler/xla/shape_util.h"
@@ -274,7 +274,8 @@ Status CreateXlaArgs(const Graph& graph,
274274

275275
// Converts the TensorFlow graph into an XLA computation, by executing the
276276
// graph symbolically, with each op building up the XLA HLO.
277-
Status ConvertGraphToXla(xla::LocalClient* client, std::unique_ptr<Graph> graph,
277+
Status ConvertGraphToXla(xla::CompileOnlyClient* client,
278+
std::unique_ptr<Graph> graph,
278279
xla::Computation* computation, bool* has_context_arg) {
279280
// Create a device and context to convert the graph into an XLA computation.
280281
XlaOpRegistry::RegisterCompilationKernels();
@@ -333,7 +334,8 @@ Status ConvertGraphToXla(xla::LocalClient* client, std::unique_ptr<Graph> graph,
333334
}
334335

335336
// Compiles the XLA computation into executable code.
336-
Status CompileXla(xla::LocalClient* client, const xla::Computation& computation,
337+
Status CompileXla(xla::CompileOnlyClient* client,
338+
const xla::Computation& computation,
337339
const xla::cpu::CpuAotCompilationOptions& aot_opts,
338340
CompileResult* compile_result) {
339341
// Retrieves arg and result layouts from the computation.
@@ -350,7 +352,7 @@ Status CompileXla(xla::LocalClient* client, const xla::Computation& computation,
350352
for (int i = 0; i < pshape->parameters_size(); ++i) {
351353
arg_layouts.push_back(pshape->mutable_parameters(i));
352354
}
353-
xla::LocalClient::AheadOfTimeComputationInstance instance;
355+
xla::CompileOnlyClient::AotComputationInstance instance;
354356
instance.computation = &computation;
355357
instance.argument_layouts = std::move(arg_layouts);
356358
instance.result_layout = &pshape->result();
@@ -365,7 +367,7 @@ Status CompileXla(xla::LocalClient* client, const xla::Computation& computation,
365367
std::move(aot_or.ValueOrDie().back()));
366368
compile_result->entry_point = aot_opts.entry_point_name();
367369
compile_result->pointer_size =
368-
xla::LocalClient::PointerSizeForTriple(aot_opts.triple());
370+
xla::CompileOnlyClient::PointerSizeForTriple(aot_opts.triple());
369371
return Status::OK();
370372
}
371373

@@ -394,8 +396,9 @@ Status CompileGraph(std::unique_ptr<Graph> graph, const MainFlags& flags,
394396
namespace gpu = perftools::gputools;
395397
gpu::Platform* cpu_platform =
396398
gpu::MultiPlatformManager::PlatformWithName("Host").ValueOrDie();
397-
xla::LocalClient* client =
398-
xla::ClientLibrary::GetOrCreateLocalClient(cpu_platform).ValueOrDie();
399+
xla::CompileOnlyClient* client =
400+
xla::ClientLibrary::GetOrCreateCompileOnlyClient(cpu_platform)
401+
.ValueOrDie();
399402
xla::Computation computation;
400403
TF_RETURN_IF_ERROR(ConvertGraphToXla(client, std::move(graph), &computation,
401404
&compile_result->has_context_arg));

tensorflow/compiler/xla/client/BUILD

+22
Original file line numberDiff line numberDiff line change
@@ -99,19 +99,41 @@ cc_library(
9999
],
100100
)
101101

102+
cc_library(
103+
name = "compile_only_client",
104+
srcs = ["compile_only_client.cc"],
105+
hdrs = ["compile_only_client.h"],
106+
deps = [
107+
":client",
108+
":computation",
109+
"//tensorflow/compiler/xla:status_macros",
110+
"//tensorflow/compiler/xla:statusor",
111+
"//tensorflow/compiler/xla:util",
112+
"//tensorflow/compiler/xla:xla_data_proto",
113+
"//tensorflow/compiler/xla/service:compile_only_service",
114+
"//tensorflow/compiler/xla/service:compiler",
115+
"//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
116+
"//tensorflow/core:lib",
117+
"//tensorflow/core:stream_executor_no_cuda",
118+
"@llvm//:support",
119+
],
120+
)
121+
102122
# This target is used to instantiate the XLA service in-process and create
103123
# a client for it.
104124
cc_library(
105125
name = "client_library",
106126
srcs = ["client_library.cc"],
107127
hdrs = ["client_library.h"],
108128
deps = [
129+
":compile_only_client",
109130
":local_client",
110131
"//tensorflow/compiler/xla:status_macros",
111132
"//tensorflow/compiler/xla:statusor",
112133
"//tensorflow/compiler/xla:types",
113134
"//tensorflow/compiler/xla:util",
114135
"//tensorflow/compiler/xla/service:backend",
136+
"//tensorflow/compiler/xla/service:compile_only_service",
115137
"//tensorflow/compiler/xla/service:device_memory_allocator",
116138
"//tensorflow/compiler/xla/service:local_service",
117139
"//tensorflow/compiler/xla/service:platform_util",

tensorflow/compiler/xla/client/client_library.cc

+32-6
Original file line numberDiff line numberDiff line change
@@ -69,22 +69,22 @@ ClientLibrary::~ClientLibrary() = default;
6969
TF_ASSIGN_OR_RETURN(platform, PlatformUtil::GetDefaultPlatform());
7070
}
7171

72-
auto it = client_library.instances_.find(platform->id());
73-
if (it != client_library.instances_.end()) {
72+
auto it = client_library.local_instances_.find(platform->id());
73+
if (it != client_library.local_instances_.end()) {
7474
return it->second->client.get();
7575
}
7676

7777
ServiceOptions service_options;
7878
service_options.set_platform(platform);
7979
service_options.set_number_of_replicas(replica_count);
8080

81-
std::unique_ptr<LocalInstance> instance = MakeUnique<LocalInstance>();
81+
auto instance = MakeUnique<LocalInstance>();
8282
TF_ASSIGN_OR_RETURN(instance->service,
8383
LocalService::NewService(service_options));
8484
instance->client = MakeUnique<LocalClient>(instance->service.get());
8585
LocalClient* cl = instance->client.get();
8686

87-
client_library.instances_.insert(
87+
client_library.local_instances_.insert(
8888
std::make_pair(platform->id(), std::move(instance)));
8989
return cl;
9090
}
@@ -99,9 +99,35 @@ ClientLibrary::~ClientLibrary() = default;
9999
perftools::gputools::Platform* platform) {
100100
ClientLibrary& client_library = Singleton();
101101
tensorflow::mutex_lock lock(client_library.service_mutex_);
102-
auto it = client_library.instances_.find(platform->id());
103-
CHECK(it != client_library.instances_.end());
102+
auto it = client_library.local_instances_.find(platform->id());
103+
CHECK(it != client_library.local_instances_.end());
104104
return it->second->service.get();
105105
}
106106

107+
/* static */ StatusOr<CompileOnlyClient*>
108+
ClientLibrary::GetOrCreateCompileOnlyClient(
109+
perftools::gputools::Platform* platform) {
110+
ClientLibrary& client_library = Singleton();
111+
tensorflow::mutex_lock lock(client_library.service_mutex_);
112+
113+
if (platform == nullptr) {
114+
TF_ASSIGN_OR_RETURN(platform, PlatformUtil::GetDefaultPlatform());
115+
}
116+
117+
auto it = client_library.compile_only_instances_.find(platform->id());
118+
if (it != client_library.compile_only_instances_.end()) {
119+
return it->second->client.get();
120+
}
121+
122+
auto instance = MakeUnique<CompileOnlyInstance>();
123+
TF_ASSIGN_OR_RETURN(instance->service,
124+
CompileOnlyService::NewService(platform));
125+
instance->client = MakeUnique<CompileOnlyClient>(instance->service.get());
126+
CompileOnlyClient* cl = instance->client.get();
127+
128+
client_library.compile_only_instances_.insert(
129+
std::make_pair(platform->id(), std::move(instance)));
130+
return cl;
131+
}
132+
107133
} // namespace xla

tensorflow/compiler/xla/client/client_library.h

+21-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,9 @@ limitations under the License.
2626
#include <string>
2727
#include <vector>
2828

29+
#include "tensorflow/compiler/xla/client/compile_only_client.h"
2930
#include "tensorflow/compiler/xla/client/local_client.h"
31+
#include "tensorflow/compiler/xla/service/compile_only_service.h"
3032
#include "tensorflow/compiler/xla/service/device_memory_allocator.h"
3133
#include "tensorflow/compiler/xla/service/local_service.h"
3234
#include "tensorflow/compiler/xla/statusor.h"
@@ -76,6 +78,13 @@ class ClientLibrary {
7678
// access user computations from client.
7779
static LocalService* GetXlaService(perftools::gputools::Platform* platform);
7880

81+
// Singleton constructor-or-accessor for compile-only clients. Arguments:
82+
//
83+
// platform : The platform the underlying XLA service should target. If
84+
// null then default platform is used.
85+
static StatusOr<CompileOnlyClient*> GetOrCreateCompileOnlyClient(
86+
perftools::gputools::Platform* platform = nullptr);
87+
7988
private:
8089
// Returns the singleton instance of ClientLibrary.
8190
static ClientLibrary& Singleton();
@@ -90,10 +99,21 @@ class ClientLibrary {
9099
std::unique_ptr<LocalClient> client;
91100
};
92101

102+
struct CompileOnlyInstance {
103+
// Service that is wrapped by the singleton client object.
104+
std::unique_ptr<CompileOnlyService> service;
105+
// Singleton client object.
106+
std::unique_ptr<CompileOnlyClient> client;
107+
};
108+
93109
tensorflow::mutex service_mutex_; // Guards the singleton creation state.
94110
std::unordered_map<perftools::gputools::Platform::Id,
95111
std::unique_ptr<LocalInstance>>
96-
instances_ GUARDED_BY(service_mutex_);
112+
local_instances_ GUARDED_BY(service_mutex_);
113+
114+
std::unordered_map<perftools::gputools::Platform::Id,
115+
std::unique_ptr<CompileOnlyInstance>>
116+
compile_only_instances_ GUARDED_BY(service_mutex_);
97117

98118
TF_DISALLOW_COPY_AND_ASSIGN(ClientLibrary);
99119
};
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License.
14+
==============================================================================*/
15+
16+
#include "tensorflow/compiler/xla/client/compile_only_client.h"
17+
18+
#include "external/llvm/include/llvm/ADT/Triple.h"
19+
#include "tensorflow/compiler/xla/ptr_util.h"
20+
#include "tensorflow/compiler/xla/service/llvm_ir/llvm_util.h"
21+
#include "tensorflow/compiler/xla/status_macros.h"
22+
23+
namespace se = ::perftools::gputools;
24+
25+
namespace xla {
26+
27+
StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
28+
CompileOnlyClient::CompileAheadOfTime(
29+
const tensorflow::gtl::ArraySlice<AotComputationInstance> computations,
30+
const AotCompilationOptions& options) {
31+
std::vector<CompileOnlyService::AotComputationInstance> service_instances;
32+
service_instances.reserve(computations.size());
33+
for (const AotComputationInstance& instance : computations) {
34+
service_instances.push_back({});
35+
CompileOnlyService::AotComputationInstance& service_instance =
36+
service_instances.back();
37+
TF_RET_CHECK(instance.computation != nullptr);
38+
service_instance.computation = instance.computation->handle();
39+
service_instance.argument_layouts = instance.argument_layouts;
40+
service_instance.result_layout = instance.result_layout;
41+
}
42+
return compiler_service_->CompileAheadOfTime(service_instances, options);
43+
}
44+
45+
int64 CompileOnlyClient::PointerSizeForTriple(
46+
tensorflow::StringPiece target_triple) {
47+
llvm::Triple triple(
48+
llvm::Triple::normalize(llvm_ir::AsStringRef(target_triple)));
49+
if (triple.isArch64Bit()) {
50+
return 8;
51+
} else if (triple.isArch32Bit()) {
52+
return 4;
53+
} else {
54+
CHECK(triple.isArch16Bit());
55+
return 2;
56+
}
57+
}
58+
59+
} // namespace xla
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License.
14+
==============================================================================*/
15+
16+
#ifndef TENSORFLOW_COMPILER_XLA_CLIENT_COMPILE_ONLY_CLIENT_H_
17+
#define TENSORFLOW_COMPILER_XLA_CLIENT_COMPILE_ONLY_CLIENT_H_
18+
19+
#include "tensorflow/compiler/xla/client/client.h"
20+
#include "tensorflow/compiler/xla/client/computation.h"
21+
#include "tensorflow/compiler/xla/service/compile_only_service.h"
22+
#include "tensorflow/compiler/xla/service/compiler.h"
23+
#include "tensorflow/compiler/xla/statusor.h"
24+
#include "tensorflow/compiler/xla/xla_data.pb.h"
25+
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
26+
27+
namespace xla {
28+
29+
// An XLA Client specialization for doing ahead-of-time compilation. This does
30+
// not require (or attempt to instantiate) an execution-capable backend for the
31+
// relevant platform.
32+
class CompileOnlyClient : public Client {
33+
public:
34+
explicit CompileOnlyClient(CompileOnlyService* service)
35+
: Client(service), compiler_service_(service) {}
36+
37+
CompileOnlyClient(const CompileOnlyClient&) = delete;
38+
void operator=(const CompileOnlyClient&) = delete;
39+
40+
// A description of a computation to compile using CompileAheadOfTime.
41+
struct AotComputationInstance {
42+
const Computation* computation;
43+
// Inform the compiler of the expected layout for arguments.
44+
std::vector<const Shape*> argument_layouts;
45+
// Specifies the expected result layout.
46+
const Shape* result_layout;
47+
};
48+
49+
// Compiles a list of computations for ahead-of-time execution. This is
50+
// intended for use in static compilation. The |options| parameter describes
51+
// the target for which the compiler should emit code.
52+
StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
53+
CompileAheadOfTime(
54+
const tensorflow::gtl::ArraySlice<AotComputationInstance> computations,
55+
const AotCompilationOptions& options);
56+
57+
// Returns the size of a pointer in bytes for a given triple.
58+
static int64 PointerSizeForTriple(tensorflow::StringPiece triple);
59+
60+
private:
61+
CompileOnlyService* compiler_service_;
62+
};
63+
64+
} // namespace xla
65+
66+
#endif // TENSORFLOW_COMPILER_XLA_CLIENT_COMPILE_ONLY_CLIENT_H_

tensorflow/compiler/xla/client/global_data.h

+4-2
Original file line numberDiff line numberDiff line change
@@ -23,13 +23,15 @@ limitations under the License.
2323

2424
namespace xla {
2525

26-
// Wraps a GlobalDataHandle with a lifetime.
26+
// A GlobalData object represents a globally-accessible allocation of
27+
// data in the associated XLA service.
2728
class GlobalData {
2829
public:
2930
// Gives ownership of the global data handle to this object.
3031
GlobalData(ServiceInterface* parent, GlobalDataHandle handle);
3132

32-
// Unregisters the wrapped handle.
33+
// Unregisters the wrapped handle, which causes the service to
34+
// deallocate the associated data.
3335
~GlobalData();
3436

3537
const GlobalDataHandle& handle() const { return handle_; }

0 commit comments

Comments
 (0)