| # Copyright 2020 The Fuchsia Authors. All rights reserved. |
| # Use of this source code is governed by a BSD-style license that can be |
| # found in the LICENSE file. |
| """Recipe for training ML inliner model for Clang.""" |
| |
| from google.protobuf import json_format as jsonpb |
| |
| from PB.go.chromium.org.luci.buildbucket.proto import build as build_pb2 |
| from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2 |
| from PB.recipes.fuchsia.contrib.clang_ml_training import InputProperties |
| |
| PYTHON_VERSION_COMPATIBILITY = "PY3" |
| |
| DEPS = [ |
| "fuchsia/build", |
| "fuchsia/buildbucket_util", |
| "fuchsia/checkout", |
| "fuchsia/cipd_util", |
| "fuchsia/git", |
| "fuchsia/jiri", |
| "fuchsia/sso", |
| "recipe_engine/buildbucket", |
| "recipe_engine/cas", |
| "recipe_engine/cipd", |
| "recipe_engine/context", |
| "recipe_engine/file", |
| "recipe_engine/json", |
| "recipe_engine/path", |
| "recipe_engine/properties", |
| "recipe_engine/raw_io", |
| "recipe_engine/scheduler", |
| "recipe_engine/step", |
| ] |
| |
| PROPERTIES = InputProperties |
| |
| PLATFORM_TO_TARGET = { |
| "linux-amd64": "x86_64-unknown-linux-gnu", |
| "linux-arm64": "aarch64-unknown-linux-gnu", |
| "mac-amd64": "x86_64-apple-darwin", |
| "mac-arm64": "arm64-apple-darwin", |
| "windows-amd64": "x86_64-pc-windows-msvc", |
| "windows-arm64": "aarch64-pc-windows-msvc", |
| } |
| |
| |
| def RunSteps(api, props): |
| props.step_iterations = props.step_iterations or 1000 |
| props.total_iterations = props.total_iterations or 5000 |
| |
| with api.step.nest("git"), api.context(infra_steps=True): |
| ml_compiler_opt_dir, _ = api.git.checkout( |
| "https://github.com/google/ml-compiler-opt", |
| ref="0d8aee2f84166fc1e84474bcda89cb65b6e9c8a8", |
| ) |
| compiler_opt_dir = ml_compiler_opt_dir.join("compiler_opt") |
| |
| vocab_dir = compiler_opt_dir.join("rl", "inlining", "vocab") |
| api.file.rmcontents("remove default vocab", vocab_dir) |
| |
| if ( |
| props.clang_cas_digest |
| and props.corpus_cas_digest |
| and props.vocab_cas_digest |
| and props.warmstart_cas_digest |
| ): |
| clang_dir = api.path["start_dir"].join("clang") |
| api.cas.download("download clang", props.clang_cas_digest, clang_dir) |
| corpus_dir = api.path["start_dir"].join("corpus") |
| api.cas.download("download corpus", props.corpus_cas_digest, corpus_dir) |
| api.cas.download("download vocab", props.vocab_cas_digest, vocab_dir) |
| warmstart_dir = api.path["start_dir"].join("warmstart") |
| api.cas.download( |
| "download warmstart", props.warmstart_cas_digest, warmstart_dir |
| ) |
| else: |
| props.revision = props.revision or api.git.get_remote_branch_head( |
| api.sso.sso_to_https(props.remote), |
| "refs/heads/releases/canary", |
| ) |
| checkout = api.checkout.fuchsia_with_options( |
| build_input=build_pb2.Build.Input( |
| gitiles_commit=common_pb2.GitilesCommit( |
| id=props.revision, |
| project=props.project, |
| ), |
| ), |
| project=props.project, |
| manifest=props.manifest, |
| remote=props.remote, |
| ) |
| |
| with api.context(cwd=checkout.root_dir): |
| project_data = api.jiri.project(["fuchsia"]).json.output |
| assert len(project_data) == 1 |
| props.fuchsia_git_revision = project_data[0]["revision"] |
| |
| package_data = api.jiri.package( |
| ["fuchsia/third_party/clang/${platform}"], |
| test_data=[ |
| { |
| "name": "fuchsia/third_party/clang/${platform}", |
| "manifest": str( |
| checkout.root_dir.join("integration", "prebuilts") |
| ), |
| "path": str( |
| checkout.root_dir.join( |
| "prebuilt", "third_party", "clang", "linux-x64" |
| ) |
| ), |
| "platforms": [ |
| "linux-amd64", |
| "linux-arm64", |
| "mac-amd64", |
| "windows-amd64", |
| ], |
| "version": "git_revision:f52666985d7011b539f26f54e09a5c89b62dad56", |
| } |
| ], |
| ).json.output |
| assert len(package_data) == 1 |
| clang_version = package_data[0]["version"].split(":", 1)[1] |
| |
| with api.step.nest("llvm"): |
| with api.step.nest("git"), api.context(infra_steps=True): |
| llvm_project_dir, _ = api.git.checkout( |
| "https://llvm.googlesource.com/llvm-project", ref=clang_version |
| ) |
| |
| cipd_dir = api.path["start_dir"].join("cipd") |
| pkgs = api.cipd.EnsureFile() |
| pkgs.add_package("fuchsia/sdk/core/linux-amd64", "latest", "sdk") |
| pkgs.add_package( |
| "fuchsia/third_party/sysroot/linux", |
| "cRfnTYaepVtGDfQL7T9Y-ma4NvFkOY_8Sxs16dzh4_UC", |
| "linux", |
| ) |
| pkgs.add_package( |
| "fuchsia/third_party/libtensorflow/${platform}", |
| "version:1.15.0", |
| "libtensorflow", |
| ) |
| pkgs.add_package( |
| "fuchsia/third_party/cmake/${platform}", |
| "integration", |
| ) |
| pkgs.add_package( |
| "fuchsia/third_party/ninja/${platform}", |
| "integration", |
| ) |
| api.cipd.ensure(cipd_dir, pkgs) |
| sdk_dir = cipd_dir.join("sdk") |
| sysroot_dir = cipd_dir.join("linux") |
| |
| venv_dir = api.path["start_dir"].join("tensorflow-venv") |
| tensorflow_path = api.step( |
| "get tensorflow", |
| cmd=[ |
| "python", |
| "-u", |
| api.resource("get_tensorflow.py"), |
| "-vpython-root", |
| venv_dir, |
| ], |
| stdout=api.raw_io.output_text( |
| name="tensorflow-path", add_output_log=True |
| ), |
| step_test_data=lambda: api.raw_io.test_api.stream_output_text( |
| str( |
| venv_dir.join("lib", "python3.8", "site-packages", "tensorflow") |
| ) |
| ), |
| ).stdout.strip() |
| |
| # Run CMake to configure Clang build. |
| build_dir = api.path["start_dir"].join("llvm_build") |
| api.step( |
| "configure", |
| [ |
| cipd_dir.join("bin", "cmake"), |
| "-G", |
| "Ninja", |
| "-DCMAKE_MAKE_PROGRAM=%s" % cipd_dir.join("ninja"), |
| "-DCMAKE_INSTALL_PREFIX=", |
| "-DLLVM_ENABLE_LTO=OFF", |
| "-DCMAKE_TOOLCHAIN_FILE=%s" |
| % checkout.root_dir.join("scripts", "clang", "ToolChain.cmake"), |
| "-DLINUX_x86_64-unknown-linux-gnu_SYSROOT=%s" % sysroot_dir, |
| "-DLINUX_aarch64-unknown-linux-gnu_SYSROOT=%s" % sysroot_dir, |
| "-DFUCHSIA_SDK=%s" % cipd_dir.join("sdk"), |
| "-DTENSORFLOW_C_LIB_PATH=%s" % cipd_dir.join("libtensorflow"), |
| "-DTF_PROTO_HEADERS=%s" % (tensorflow_path + "/include"), |
| "-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON", |
| "-DCMAKE_FIND_ROOT_PATH=%s" % cipd_dir.join("libtensorflow"), |
| "-DLLVM_STATIC_LINK_CXX_STDLIB=OFF", |
| "-DLLVM_ENABLE_LIBCXX=OFF", |
| "-DCMAKE_CXX_FLAGS=-stdlib=libstdc++", |
| "-DCMAKE_SHARED_LINKER_FLAGS=-stdlib=libstdc++", |
| "-DCMAKE_MODULE_LINKER_FLAGS=-stdlib=libstdc++", |
| "-DCMAKE_EXE_LINKER_FLAGS=-stdlib=libstdc++", |
| "-DCMAKE_SYSROOT=%s" % sysroot_dir, |
| "-C", |
| llvm_project_dir.join( |
| "clang", |
| "cmake", |
| "caches", |
| "Fuchsia-stage2.cmake", |
| ), |
| "-S", |
| llvm_project_dir.join("llvm"), |
| "-B", |
| build_dir, |
| ], |
| ) |
| |
| # Build the Clang distribution. |
| api.step( |
| "build", |
| [ |
| cipd_dir.join("ninja"), |
| "-C", |
| build_dir, |
| "distribution", |
| ], |
| ) |
| |
| # Create the Clang toolchain. |
| clang_dir = api.path["start_dir"].join("clang") |
| with api.context(env={"DESTDIR": clang_dir}): |
| api.step( |
| "install", |
| [ |
| cipd_dir.join("ninja"), |
| "-C", |
| build_dir, |
| "install-distribution", |
| ], |
| ) |
| api.step( |
| "generate runtimes.json", |
| cmd=[ |
| "vpython3", |
| "-vpython-spec", |
| api.resource("tensorflow.vpython"), |
| "-u", |
| checkout.root_dir.join("scripts", "clang", "generate_runtimes.py"), |
| "--clang-prefix=%s" % clang_dir, |
| "--sdk-dir=%s" % sdk_dir, |
| "--build-id-dir=%s" % clang_dir.join("lib", ".build-id"), |
| ], |
| stdout=api.raw_io.output_text( |
| leak_to=clang_dir.join("lib", "runtime.json"), add_output_log=True |
| ), |
| ) |
| for f in api.file.listdir( |
| "tensorflow libraries", |
| cipd_dir.join("libtensorflow", "lib"), |
| test_data=["libtensorflow.so"], |
| ): |
| api.file.copy("copy %s" % f, f, clang_dir.join("lib")) |
| |
| # Use the just built Clang toolchain to build Fuchsia. |
| api.build.clang_toolchain_dir = clang_dir |
| |
| # Upload the Clang binaries to CAS. |
| props.clang_cas_digest = api.cas.archive("archive clang", clang_dir) |
| |
| with api.step.nest("corpus"): |
| corpus_dir = api.path["start_dir"].join("corpus") |
| |
| for target_arch, fint_params_path in props.fint_params_paths.items(): |
| with api.step.nest(target_arch): |
| fint_params = api.file.read_text( |
| "read fint params", |
| checkout.root_dir.join(fint_params_path), |
| test_data='field: "value"', |
| ) |
| fint_params += 'gn_args: "clang_embed_bitcode = true"\n' |
| fint_params += 'gn_args: "clang_ml_inliner = false"\n' |
| api.file.write_text( |
| "write fint params", |
| checkout.root_dir.join(fint_params_path), |
| fint_params, |
| ) |
| |
| # Build Fuchsia for each target architecture... |
| build_result = api.build.with_options( |
| checkout=checkout, |
| fint_params_path=fint_params_path, |
| build_dir=checkout.root_dir.join("out", target_arch), |
| clean_check=False, |
| ) |
| |
| with api.step.nest("check binary sizes") as presentation: |
| presentation.logs["binary sizes JSON output"] = api.json.dumps( |
| build_result.binary_sizes, indent=2 |
| ) |
| |
| # ...and extract IR from the generated object files. |
| api.step( |
| "extract ir", |
| cmd=[ |
| "vpython3", |
| "-vpython-spec", |
| api.resource("tensorflow.vpython"), |
| "-u", |
| compiler_opt_dir.join("tools", "extract_ir.py"), |
| "--cmd_filter=^-O2|-Os|-Oz$", |
| "--input=%s" % build_result.compdb_path, |
| "--input_type=json", |
| "--llvm_objcopy_path=%s" |
| % build_result.tool("llvm-objcopy"), |
| "--output_dir=%s" % corpus_dir.join(target_arch), |
| ], |
| ) |
| |
| # Combine IR from both builds into a single training corpus. |
| api.step( |
| "combine training corpus", |
| cmd=[ |
| "vpython3", |
| "-vpython-spec", |
| api.resource("tensorflow.vpython"), |
| "-u", |
| compiler_opt_dir.join("tools", "combine_training_corpus.py"), |
| "--root_dir=%s" % corpus_dir, |
| ], |
| ) |
| |
| # Upload the corpus to CAS. |
| props.corpus_cas_digest = api.cas.archive("archive corpus", corpus_dir) |
| |
| with api.step.nest("train"), api.context( |
| cwd=ml_compiler_opt_dir, env_prefixes={"PYTHONPATH": [ml_compiler_opt_dir]} |
| ): |
| default_trace_dir = api.path["start_dir"].join("default_trace") |
| |
| # Collect traces from the default heuristic, to kick off the training |
| # process. |
| api.step( |
| "generate default trace", |
| cmd=[ |
| "vpython3", |
| "-vpython-spec", |
| api.resource("tensorflow.vpython"), |
| "-u", |
| compiler_opt_dir.join("tools", "generate_default_trace.py"), |
| "--data_path=%s" % corpus_dir, |
| "--output_path=%s" % default_trace_dir, |
| "--compile_task=inlining", |
| "--clang_path=%s" % clang_dir.join("bin", "clang"), |
| "--llvm_size_path=%s" % clang_dir.join("bin", "llvm-size"), |
| "--sampling_rate=0.2", |
| ], |
| ) |
| |
| default_trace_cas_digest = api.cas.archive( |
| "archive default trace", default_trace_dir |
| ) |
| |
| api.step( |
| "generate default vocab", |
| cmd=[ |
| "vpython3", |
| "-vpython-spec", |
| api.resource("tensorflow.vpython"), |
| "-u", |
| compiler_opt_dir.join("tools", "sparse_bucket_generator.py"), |
| "--input=%s" % default_trace_dir, |
| "--output_dir=%s" % vocab_dir, |
| ], |
| ) |
| |
| props.vocab_cas_digest = api.cas.archive("archive default vocab", vocab_dir) |
| |
| warmstart_dir = api.path["start_dir"].join("warmstart") |
| |
| # Train a behavioral cloning model based on the above trace, that mimics |
| # default inlining behavior. This is the 'warmstart' model. |
| api.step( |
| "train bc", |
| cmd=[ |
| "vpython3", |
| "-vpython-spec", |
| api.resource("tensorflow.vpython"), |
| "-u", |
| compiler_opt_dir.join("rl", "train_bc.py"), |
| "--root_dir=%s" % warmstart_dir, |
| "--data_path=%s" % default_trace_dir, |
| "--gin_files=%s" |
| % compiler_opt_dir.join( |
| "rl", |
| "inlining", |
| "gin_configs", |
| "behavioral_cloning_nn_agent.gin", |
| ), |
| ], |
| ) |
| |
| # Upload the warmstart model to CAS. |
| props.warmstart_cas_digest = api.cas.archive( |
| "archive warmstart", warmstart_dir |
| ) |
| |
| api.scheduler.emit_trigger( |
| api.scheduler.BuildbucketTrigger( |
| properties=jsonpb.MessageToDict( |
| props, preserving_proto_field_name=True |
| ), |
| ), |
| project=api.buildbucket.build.builder.project, |
| jobs=[ |
| "prod-clang-ml-training" |
| if api.buildbucket.builder_id.bucket == "prod" |
| else "toolchain.ci-clang-ml-training" |
| ], |
| ) |
| |
| return |
| |
| model_dir = api.path["start_dir"].join("model") |
| if props.model_cas_digest: |
| api.cas.download("download model", props.model_cas_digest, model_dir) |
| |
| if props.iterations < props.total_iterations: |
| with api.step.nest("train"), api.context( |
| cwd=ml_compiler_opt_dir, |
| env={"TF_CPP_MIN_LOG_LEVEL": 2}, |
| env_prefixes={"PYTHONPATH": [ml_compiler_opt_dir]}, |
| ): |
| props.iterations += props.step_iterations |
| |
| # Starting from the 'warmstart' model, train the optimized model. |
| api.step( |
| "train locally", |
| cmd=[ |
| "vpython3", |
| "-vpython-spec", |
| api.resource("tensorflow.vpython"), |
| "-u", |
| compiler_opt_dir.join("rl", "train_locally.py"), |
| "--root_dir=%s" % model_dir, |
| "--data_path=%s" % corpus_dir, |
| "--clang_path=%s" % clang_dir.join("bin", "clang"), |
| "--llvm_size_path=%s" % clang_dir.join("bin", "llvm-size"), |
| "--num_modules=100", |
| "--gin_files=%s" |
| % compiler_opt_dir.join( |
| "rl", "inlining", "gin_configs", "ppo_nn_agent.gin" |
| ), |
| '--gin_bindings=train_eval.warmstart_policy_dir="%s"' |
| % warmstart_dir.join("saved_policy"), |
| "--gin_bindings=train_eval.num_policy_iterations=%d" |
| % props.iterations, |
| # TODO(phosek): Re-enable this once the issue we've seen on bots is addressed. |
| "--gin_bindings=train_eval.use_random_network_distillation=False", |
| ], |
| ) |
| |
| # Upload the model into CAS... |
| props.model_cas_digest = api.cas.archive("archive model", model_dir) |
| |
| api.scheduler.emit_trigger( |
| api.scheduler.BuildbucketTrigger( |
| properties=jsonpb.MessageToDict( |
| props, preserving_proto_field_name=True |
| ), |
| ), |
| project=api.buildbucket.build.builder.project, |
| jobs=[ |
| "prod-clang-ml-training" |
| if api.buildbucket.builder_id.bucket == "prod" |
| else "toolchain.ci-clang-ml-training" |
| ], |
| ) |
| return |
| |
| if api.buildbucket.builder_id.bucket == "prod": |
| saved_policy_dir = model_dir.join("saved_policy") |
| api.cipd_util.upload_package( |
| "fuchsia/model/inlining", |
| saved_policy_dir, |
| search_tag={"git_revision": props.fuchsia_git_revision}, |
| ) |
| |
| venv_dir = api.path["start_dir"].join("tensorflow-venv") |
| tensorflow_aot_path = api.step( |
| "get tensorflow", |
| cmd=[ |
| "python", |
| "-u", |
| api.resource("get_tensorflow.py"), |
| "-vpython-root", |
| venv_dir, |
| ], |
| stdout=api.raw_io.output_text(name="tensorflow-path", add_output_log=True), |
| step_test_data=lambda: api.raw_io.test_api.stream_output_text( |
| "%s" % venv_dir.join("lib", "python3.8", "site-packages", "tensorflow") |
| ), |
| ).stdout.strip() |
| tensorflow_aot_compiler = tensorflow_aot_path + "/../../../../bin/saved_model_cli" |
| |
| # TODO(phosek): We would ideally read the targets from Jiri manifest as: |
| # for platform in package_data[0]["platforms"]: |
| # if not platform in PLATFORM_TO_TARGET: |
| # continue |
| with api.step.nest("generate"): |
| for platform in sorted(PLATFORM_TO_TARGET.keys()): |
| with api.step.nest(platform): |
| inliner_model_dir = api.path["start_dir"].join( |
| "inliner_model", platform |
| ) |
| api.step( |
| "aot compile model", |
| [ |
| tensorflow_aot_compiler, |
| "aot_compile_cpu", |
| "--multithreading=false", |
| "--dir=%s" % model_dir.join("saved_policy"), |
| "--tag_set=serve", |
| "--signature_def_key=action", |
| "--output_prefix=%s" |
| % inliner_model_dir.join("InlinerSizeModel"), |
| "--cpp_class=llvm::InlinerSizeModel", |
| "--target_triple=%s" % PLATFORM_TO_TARGET[platform], |
| ], |
| ) |
| |
| tensorflow_aot_dir = api.path.abs_to_path(tensorflow_aot_path) |
| api.file.copytree( |
| "copy tensorflow headers", |
| tensorflow_aot_dir.join("include"), |
| inliner_model_dir.join("tensorflow", "include"), |
| ) |
| api.file.copytree( |
| "copy xla_aot_runtime_src", |
| tensorflow_aot_dir.join("xla_aot_runtime_src"), |
| inliner_model_dir.join("tensorflow", "xla_aot_runtime_src"), |
| ) |
| |
| api.cas.archive("archive aot model", inliner_model_dir) |
| |
| if api.buildbucket.builder_id.bucket == "prod": |
| api.cipd_util.upload_package( |
| "fuchsia/model/inlining/%s" % platform, |
| inliner_model_dir, |
| search_tag={"git_revision": props.fuchsia_git_revision}, |
| ) |
| |
| with api.step.nest("check"): |
| # TODO(phosek): This logic is largely the same as the one during the corpus |
| # extraction, we should deduplicate it by extracting the code into a subroutine. |
| checkout = api.checkout.fuchsia_with_options( |
| build_input=build_pb2.Build.Input( |
| gitiles_commit=common_pb2.GitilesCommit( |
| id=props.revision, |
| project=props.project, |
| ), |
| ), |
| project=props.project, |
| manifest=props.manifest, |
| remote=props.remote, |
| ) |
| |
| with api.context(cwd=checkout.root_dir): |
| project_data = api.jiri.project(["fuchsia"]).json.output |
| assert len(project_data) == 1 |
| props.fuchsia_git_revision = project_data[0]["revision"] |
| |
| package_data = api.jiri.package( |
| ["fuchsia/third_party/clang/${platform}"], |
| test_data=[ |
| { |
| "name": "fuchsia/third_party/clang/${platform}", |
| "manifest": str( |
| checkout.root_dir.join("integration", "prebuilts") |
| ), |
| "path": str( |
| checkout.root_dir.join( |
| "prebuilt", "third_party", "clang", "linux-x64" |
| ) |
| ), |
| "platforms": [ |
| "linux-amd64", |
| "linux-arm64", |
| "mac-amd64", |
| "windows-amd64", |
| ], |
| "version": "git_revision:f52666985d7011b539f26f54e09a5c89b62dad56", |
| } |
| ], |
| ).json.output |
| assert len(package_data) == 1 |
| clang_version = package_data[0]["version"].split(":", 1)[1] |
| |
| with api.step.nest("llvm"): |
| with api.step.nest("git"), api.context(infra_steps=True): |
| llvm_project_dir, _ = api.git.checkout( |
| "https://llvm.googlesource.com/llvm-project", ref=clang_version |
| ) |
| |
| cipd_dir = api.path["start_dir"].join("cipd") |
| pkgs = api.cipd.EnsureFile() |
| pkgs.add_package("fuchsia/sdk/core/linux-amd64", "latest", "sdk") |
| pkgs.add_package( |
| "fuchsia/third_party/sysroot/linux", |
| "cRfnTYaepVtGDfQL7T9Y-ma4NvFkOY_8Sxs16dzh4_UC", |
| "linux", |
| ) |
| pkgs.add_package( |
| "fuchsia/third_party/cmake/${platform}", |
| "integration", |
| ) |
| pkgs.add_package( |
| "fuchsia/third_party/ninja/${platform}", |
| "integration", |
| ) |
| api.cipd.ensure(cipd_dir, pkgs) |
| sdk_dir = cipd_dir.join("sdk") |
| sysroot_dir = cipd_dir.join("linux") |
| |
| inliner_model_dir = api.path["start_dir"].join( |
| "inliner_model", "linux-amd64" |
| ) |
| |
| # Run CMake to configure Clang build. |
| build_dir = api.path["start_dir"].join("llvm_build") |
| api.step( |
| "configure", |
| [ |
| cipd_dir.join("bin", "cmake"), |
| "-G", |
| "Ninja", |
| "-DCMAKE_MAKE_PROGRAM=%s" % cipd_dir.join("ninja"), |
| "-DCMAKE_INSTALL_PREFIX=", |
| "-DLLVM_ENABLE_LTO=OFF", |
| "-DCMAKE_TOOLCHAIN_FILE=%s" |
| % checkout.root_dir.join("scripts", "clang", "ToolChain.cmake"), |
| "-DLINUX_x86_64-unknown-linux-gnu_SYSROOT=%s" % sysroot_dir, |
| "-DLINUX_aarch64-unknown-linux-gnu_SYSROOT=%s" % sysroot_dir, |
| "-DFUCHSIA_SDK=%s" % cipd_dir.join("sdk"), |
| "-DTENSORFLOW_AOT_PATH=%s" % inliner_model_dir.join("tensorflow"), |
| "-DLLVM_OVERRIDE_MODEL_HEADER_INLINERSIZEMODEL=%s" |
| % inliner_model_dir.join("InlinerSizeModel.h"), |
| "-DLLVM_OVERRIDE_MODEL_OBJECT_INLINERSIZEMODEL=%s" |
| % inliner_model_dir.join("InlinerSizeModel.o"), |
| "-DLLVM_RAEVICT_MODEL_PATH=none", |
| "-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON", |
| "-DCMAKE_SYSROOT=%s" % sysroot_dir, |
| "-C", |
| llvm_project_dir.join( |
| "clang", |
| "cmake", |
| "caches", |
| "Fuchsia-stage2.cmake", |
| ), |
| "-S", |
| llvm_project_dir.join("llvm"), |
| "-B", |
| build_dir, |
| ], |
| ) |
| |
| # Build the Clang distribution. |
| api.step( |
| "build", |
| [ |
| cipd_dir.join("ninja"), |
| "-C", |
| build_dir, |
| "distribution", |
| ], |
| ) |
| |
| # Create the Clang toolchain. |
| clang_dir = api.path["start_dir"].join("clang") |
| with api.context(env={"DESTDIR": clang_dir}): |
| api.step( |
| "install", |
| [ |
| cipd_dir.join("ninja"), |
| "-C", |
| build_dir, |
| "install-distribution", |
| ], |
| ) |
| api.step( |
| "generate runtimes.json", |
| cmd=[ |
| "vpython3", |
| "-vpython-spec", |
| api.resource("tensorflow.vpython"), |
| "-u", |
| checkout.root_dir.join("scripts", "clang", "generate_runtimes.py"), |
| "--clang-prefix=%s" % clang_dir, |
| "--sdk-dir=%s" % sdk_dir, |
| "--build-id-dir=%s" % clang_dir.join("lib", ".build-id"), |
| ], |
| stdout=api.raw_io.output_text( |
| leak_to=clang_dir.join("lib", "runtime.json"), add_output_log=True |
| ), |
| ) |
| |
| # Use the just built Clang toolchain to build Fuchsia. |
| api.build.clang_toolchain_dir = clang_dir |
| |
| # Upload the Clang binaries to CAS. |
| api.cas.archive("archive clang", clang_dir) |
| |
| with api.step.nest("fuchsia"): |
| corpus_dir = api.path["start_dir"].join("corpus") |
| |
| for target_arch, fint_params_path in props.fint_params_paths.items(): |
| with api.step.nest(target_arch): |
| # Build Fuchsia for each target architecture... |
| build_result = api.build.with_options( |
| checkout=checkout, |
| fint_params_path=fint_params_path, |
| build_dir=checkout.root_dir.join("out", target_arch), |
| clean_check=False, |
| ) |
| |
| with api.step.nest("check binary sizes") as presentation: |
| presentation.logs["binary sizes JSON output"] = api.json.dumps( |
| build_result.binary_sizes, indent=2 |
| ) |
| |
| |
| def GenTests(api): |
| yield ( |
| api.buildbucket_util.test("bootstrap", bucket="prod") |
| + api.properties( |
| manifest="fuchsia", |
| remote="https://fuchsia.googlesource.com/fuchsia", |
| fint_params_paths={ |
| "arm64": "specs/clang-ml-training-arm64.fint.textproto", |
| "x64": "specs/clang-ml-training-x64.fint.textproto", |
| }, |
| ) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("first", bucket="prod") |
| + api.properties( |
| manifest="fuchsia", |
| remote="https://fuchsia.googlesource.com/fuchsia", |
| fint_params_paths={ |
| "arm64": "specs/clang-ml-training-arm64.fint.textproto", |
| "x64": "specs/clang-ml-training-x64.fint.textproto", |
| }, |
| fuchsia_git_revision="a" * 40, |
| clang_cas_digest="e753d52dc007dca2c5cc6db29fa8e261bb62003a962c222e74c6a092926fc368/4750", |
| corpus_cas_digest="e753d52dc007dca2c5cc6db29fa8e261bb62003a962c222e74c6a092926fc368/4750", |
| vocab_cas_digest="e753d52dc007dca2c5cc6db29fa8e261bb62003a962c222e74c6a092926fc368/4750", |
| warmstart_cas_digest="e753d52dc007dca2c5cc6db29fa8e261bb62003a962c222e74c6a092926fc368/4750", |
| step_iterations=100, |
| total_iterations=300, |
| ) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("last", bucket="prod") |
| + api.properties( |
| manifest="fuchsia", |
| remote="https://fuchsia.googlesource.com/fuchsia", |
| fint_params_paths={ |
| "arm64": "fint_params/clang-ml-training-arm64.textproto", |
| "x64": "fint_params/clang-ml-training-x64.textproto", |
| }, |
| fuchsia_git_revision="a" * 40, |
| clang_cas_digest="e753d52dc007dca2c5cc6db29fa8e261bb62003a962c222e74c6a092926fc368/4750", |
| corpus_cas_digest="e753d52dc007dca2c5cc6db29fa8e261bb62003a962c222e74c6a092926fc368/4750", |
| vocab_cas_digest="e753d52dc007dca2c5cc6db29fa8e261bb62003a962c222e74c6a092926fc368/4750", |
| warmstart_cas_digest="e753d52dc007dca2c5cc6db29fa8e261bb62003a962c222e74c6a092926fc368/4750", |
| model_cas_digest="e753d52dc007dca2c5cc6db29fa8e261bb62003a962c222e74c6a092926fc368/4750", |
| iterations=200, |
| step_iterations=100, |
| total_iterations=300, |
| ) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("generate", bucket="prod") |
| + api.properties( |
| manifest="fuchsia", |
| remote="https://fuchsia.googlesource.com/fuchsia", |
| fint_params_paths={ |
| "arm64": "fint_params/clang-ml-training-arm64.textproto", |
| "x64": "fint_params/clang-ml-training-x64.textproto", |
| }, |
| fuchsia_git_revision="a" * 40, |
| clang_cas_digest="e753d52dc007dca2c5cc6db29fa8e261bb62003a962c222e74c6a092926fc368/4750", |
| corpus_cas_digest="e753d52dc007dca2c5cc6db29fa8e261bb62003a962c222e74c6a092926fc368/4750", |
| vocab_cas_digest="e753d52dc007dca2c5cc6db29fa8e261bb62003a962c222e74c6a092926fc368/4750", |
| warmstart_cas_digest="e753d52dc007dca2c5cc6db29fa8e261bb62003a962c222e74c6a092926fc368/4750", |
| model_cas_digest="e753d52dc007dca2c5cc6db29fa8e261bb62003a962c222e74c6a092926fc368/4750", |
| iterations=300, |
| step_iterations=100, |
| total_iterations=300, |
| ) |
| ) |