添加构建Allwinner H616四核2GB RAM SoC WiFi(蓝莓)
This commit is contained in:
69
lib/functions/cli/cli-artifact.sh
Normal file
69
lib/functions/cli/cli-artifact.sh
Normal file
@@ -0,0 +1,69 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
|
||||
#
|
||||
# This file is a part of the Armbian Build Framework
|
||||
# https://github.com/armbian/build/
|
||||
|
||||
function cli_artifact_pre_run() {
|
||||
case "${ARMBIAN_COMMAND}" in
|
||||
download-artifact)
|
||||
display_alert "download-only mode:" "won't build '${WHAT}'" "info"
|
||||
declare -g DONT_BUILD_ARTIFACTS="${WHAT}"
|
||||
declare -g KEEP_HASHED_DEB_ARTIFACTS="yes"
|
||||
;;
|
||||
esac
|
||||
|
||||
initialize_artifact "${WHAT}"
|
||||
# Run the pre run adapter
|
||||
artifact_cli_adapter_pre_run
|
||||
}
|
||||
|
||||
function cli_artifact_run() {
|
||||
: "${chosen_artifact:?chosen_artifact is not set}"
|
||||
: "${chosen_artifact_impl:?chosen_artifact_impl is not set}"
|
||||
|
||||
if [[ "${CONFIG_DEFS_ONLY}" != "yes" ]]; then
|
||||
# Make sure ORAS tooling is installed before starting.
|
||||
run_tool_oras
|
||||
fi
|
||||
|
||||
display_alert "artifact" "${chosen_artifact}" "debug"
|
||||
display_alert "artifact" "${chosen_artifact} :: ${chosen_artifact_impl}()" "debug"
|
||||
declare -g artifact_version_requires_aggregation="no" # marker
|
||||
artifact_cli_adapter_config_prep # only if in cli.
|
||||
|
||||
# if asked by _config_prep to aggregate, and HOSTRELEASE is not set, obtain it.
|
||||
if [[ "${artifact_version_requires_aggregation}" == "yes" ]] && [[ -z "${HOSTRELEASE}" ]]; then
|
||||
obtain_hostrelease_only # Sets HOSTRELEASE
|
||||
fi
|
||||
|
||||
declare deploy_to_remote="no"
|
||||
|
||||
case "${ARMBIAN_COMMAND}" in
|
||||
download-artifact)
|
||||
display_alert "Running in download-artifact mode" "download-artifact" "ext"
|
||||
;;
|
||||
*)
|
||||
# Warn of deprecation...
|
||||
if [[ "${ARTIFACT_USE_CACHE}" == "yes" ]]; then
|
||||
display_alert "deprecated!" "ARTIFACT_USE_CACHE=yes is deprecated, its behaviour is now the default." "warn"
|
||||
fi
|
||||
|
||||
# If UPLOAD_TO_OCI_ONLY=yes is explicitly set; deploy to remote.
|
||||
if [[ "${UPLOAD_TO_OCI_ONLY}" == "yes" ]]; then
|
||||
display_alert "UPLOAD_TO_OCI_ONLY=yes is set" "UPLOAD_TO_OCI_ONLY=yes; ignoring local cache and deploying to remote" "info"
|
||||
deploy_to_remote="yes"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ "${ARTIFACT_BUILD_INTERACTIVE}" == "yes" ]]; then # Set by `kernel-config`, `kernel-patch`, `uboot-config`, `uboot-patch`, etc.
|
||||
display_alert "Running artifact build in interactive mode" "log file will be incomplete" "info"
|
||||
do_with_default_build obtain_complete_artifact
|
||||
else
|
||||
do_with_default_build obtain_complete_artifact < /dev/null
|
||||
fi
|
||||
}
|
||||
27
lib/functions/cli/cli-build.sh
Normal file
27
lib/functions/cli/cli-build.sh
Normal file
@@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
|
||||
#
|
||||
# This file is a part of the Armbian Build Framework
|
||||
# https://github.com/armbian/build/
|
||||
|
||||
function cli_standard_build_pre_run() {
|
||||
declare -g ARMBIAN_COMMAND_REQUIRE_BASIC_DEPS="yes" # Require prepare_host_basic to run before the command.
|
||||
|
||||
# "gimme root on a Linux machine"
|
||||
cli_standard_relaunch_docker_or_sudo
|
||||
}
|
||||
|
||||
function cli_standard_build_run() {
|
||||
declare -g -r BUILDING_IMAGE=yes # Marker; meaning "we are building an image, not just an artifact"
|
||||
declare -g -r NEEDS_BINFMT="yes" # Marker; make sure binfmts are installed during prepare_host_interactive
|
||||
|
||||
# configuration etc - it initializes the extension manager; handles its own logging sections
|
||||
prep_conf_main_build_single
|
||||
|
||||
# the full build. It has its own logging sections.
|
||||
do_with_default_build full_build_packages_rootfs_and_image
|
||||
|
||||
}
|
||||
66
lib/functions/cli/cli-configdump.sh
Normal file
66
lib/functions/cli/cli-configdump.sh
Normal file
@@ -0,0 +1,66 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
|
||||
#
|
||||
# This file is a part of the Armbian Build Framework
|
||||
# https://github.com/armbian/build/
|
||||
|
||||
function cli_config_dump_json_pre_run() {
|
||||
declare -g -r CONFIG_DEFS_ONLY='yes' # @TODO: This is actually too late (early optimizations in logging etc), so callers should also set it in the environment when using CLI. sorry.
|
||||
}
|
||||
|
||||
function cli_config_dump_json_run() {
|
||||
# configuration etc - it initializes the extension manager
|
||||
do_capturing_defs config_board_and_remove_useless < /dev/null # this sets CAPTURED_VARS_NAMES and CAPTURED_VARS_ARRAY; the < /dev/null is take away the terminal from stdin
|
||||
|
||||
if [[ "${ARMBIAN_COMMAND}" == "config-dump-no-json" ]]; then
|
||||
# for debugging the bash-declare-to-JSON parser
|
||||
echo "${CAPTURED_VARS_ARRAY[@]}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# convert to JSON, using python helper; each var is passed via a command line argument; that way we avoid newline/nul-char separation issues
|
||||
display_alert "Dumping JSON" "for ${#CAPTURED_VARS_ARRAY[@]} variables" "ext"
|
||||
python3 "${SRC}/lib/tools/configdump2json.py" "--args" "${CAPTURED_VARS_ARRAY[@]}" # to stdout
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function config_board_and_remove_useless() {
|
||||
skip_host_config=yes use_board=yes skip_kernel=no do_logging=no prep_conf_main_minimal_ni # avoid logging during configdump; it's useless; skip host config
|
||||
determine_artifacts_needed_and_its_inputs_for_configdump
|
||||
|
||||
# Remove unwanted variables from the config dump JSON.
|
||||
unset FINALDEST
|
||||
unset DEB_STORAGE
|
||||
unset ROOTPWD
|
||||
}
|
||||
|
||||
function determine_artifacts_needed_and_its_inputs_for_configdump() {
|
||||
# Determine which artifacts to build.
|
||||
declare -a artifacts_to_build=()
|
||||
determine_artifacts_to_build_for_image
|
||||
display_alert "Artifacts to build:" "${artifacts_to_build[*]}" "info"
|
||||
|
||||
# For each artifact, get the input variables from each.
|
||||
declare -a all_wanted_artifact_names=() all_wanted_artifact_vars=()
|
||||
declare one_artifact one_artifact_package
|
||||
for one_artifact in "${artifacts_to_build[@]}"; do
|
||||
declare -g artifact_input_vars
|
||||
|
||||
WHAT="${one_artifact}" dump_artifact_config
|
||||
|
||||
declare WHAT_UPPERCASE="${one_artifact^^}"
|
||||
declare WHAT_UPPERCASE_REPLACED="${WHAT_UPPERCASE//[-.]/_}"
|
||||
|
||||
all_wanted_artifact_names+=("${one_artifact}")
|
||||
all_wanted_artifact_vars+=("${WHAT_UPPERCASE_REPLACED}")
|
||||
|
||||
eval "declare -r -g WANT_ARTIFACT_${WHAT_UPPERCASE_REPLACED}_INPUTS_ARRAY=\"${artifact_input_vars}\""
|
||||
done
|
||||
|
||||
declare -r -g WANT_ARTIFACT_ALL_NAMES_ARRAY="${all_wanted_artifact_names[*]}"
|
||||
declare -r -g WANT_ARTIFACT_ALL_ARRAY="${all_wanted_artifact_vars[*]}"
|
||||
}
|
||||
42
lib/functions/cli/cli-distccd.sh
Normal file
42
lib/functions/cli/cli-distccd.sh
Normal file
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
|
||||
#
|
||||
# This file is a part of the Armbian Build Framework
|
||||
# https://github.com/armbian/build/
|
||||
|
||||
function cli_distccd_pre_run() {
|
||||
: <<- 'HEADER'
|
||||
Sets up an inline extension to include distccd in dependencies.
|
||||
HEADER
|
||||
display_alert "cli_distccd_pre_run" "func cli_distccd_run :: ${ARMBIAN_COMMAND}" "warn"
|
||||
|
||||
# When being relaunched in Docker, I wanna add port-forwardings to the distccd ports.
|
||||
declare -g DOCKER_EXTRA_ARGS+=("-p" "3632:3632" "-p" "3633:3633")
|
||||
|
||||
# "gimme root on a Linux machine"
|
||||
cli_standard_relaunch_docker_or_sudo
|
||||
}
|
||||
|
||||
function cli_distccd_run() {
|
||||
: <<- 'HEADER'
|
||||
Runs distccd in the foreground.
|
||||
HEADER
|
||||
|
||||
# Initialize the extension manager. distccd has no boards/etc.
|
||||
initialize_extension_manager
|
||||
|
||||
# Install hostdeps. Call directly the requirements cli command, they know what they're doing.
|
||||
cli_requirements_run
|
||||
|
||||
display_alert "cli_distccd_run" "func cli_distccd_run" "warn"
|
||||
|
||||
# remove all bash traps
|
||||
trap - INT TERM EXIT
|
||||
|
||||
# @TODO: --zeroconf (not if under Docker)
|
||||
# @TODO: --jobs (CPU count!)
|
||||
display_alert "Run it yourself" "distccd --allow-private --verbose --no-detach --daemon --stats --log-level info --log-stderr --listen 0.0.0.0 --zeroconf" "warn"
|
||||
}
|
||||
92
lib/functions/cli/cli-docker.sh
Normal file
92
lib/functions/cli/cli-docker.sh
Normal file
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
|
||||
#
|
||||
# This file is a part of the Armbian Build Framework
|
||||
# https://github.com/armbian/build/
|
||||
|
||||
function cli_docker_pre_run() {
|
||||
if [[ "${DOCKERFILE_GENERATE_ONLY}" == "yes" ]]; then
|
||||
display_alert "Dockerfile generation only" "func cli_docker_pre_run" "debug"
|
||||
return 0
|
||||
fi
|
||||
|
||||
case "${DOCKER_SUBCMD}" in
|
||||
shell)
|
||||
# inside-function-function: a dynamic hook, only triggered if this CLI runs.
|
||||
function add_host_dependencies__ssh_client_for_docker_shell_over_ssh() {
|
||||
declare -g EXTRA_BUILD_DEPS="${EXTRA_BUILD_DEPS} openssh-client"
|
||||
}
|
||||
declare -g DOCKER_PASS_SSH_AGENT="yes" # Pass SSH agent to docker
|
||||
;;
|
||||
esac
|
||||
|
||||
# make sure we're not _ALREADY_ running under docker... otherwise eternal loop?
|
||||
if [[ "${ARMBIAN_RUNNING_IN_CONTAINER}" == "yes" ]]; then
|
||||
exit_with_error "asking for docker... inside docker. how did this happen? Tip: you don't need 'docker' to run armbian-next inside Docker; it's automatically detected and used when appropriate."
|
||||
fi
|
||||
}
|
||||
|
||||
function cli_docker_run() {
|
||||
# Docker won't have ${SRC}/.git, so precalculate the git-info header so it can be included in the inside-Docker logs.
|
||||
# It's gonna be picked up by export_ansi_logs() and included in the final log, if it exists.
|
||||
declare -g GIT_INFO_ANSI
|
||||
GIT_INFO_ANSI="$(prepare_ansi_git_info_log_header)"
|
||||
|
||||
# Same stuff for BUILD_REPOSITORY_URL and BUILD_REPOSITORY_COMMIT.
|
||||
if [[ -d "${SRC}/.git" && "${CONFIG_DEFS_ONLY}" != "yes" ]]; then # don't waste time if only gathering config defs
|
||||
set_git_build_repo_url_and_commit_vars "docker launcher"
|
||||
fi
|
||||
|
||||
LOG_SECTION="docker_cli_prepare" do_with_logging docker_cli_prepare
|
||||
|
||||
# @TODO: and can be very well said that in CI, we always want FAST_DOCKER=yes, unless we're building the Docker image itself.
|
||||
if [[ "${FAST_DOCKER:-"no"}" != "yes" ]]; then # "no, I want *slow* docker" -- no one, ever
|
||||
LOG_SECTION="docker_cli_prepare_dockerfile" do_with_logging docker_cli_prepare_dockerfile
|
||||
|
||||
if [[ "${DOCKERFILE_GENERATE_ONLY}" == "yes" ]]; then
|
||||
display_alert "Dockerfile generated" "exiting" "info"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
LOG_SECTION="docker_cli_build_dockerfile" do_with_logging docker_cli_build_dockerfile
|
||||
fi
|
||||
|
||||
LOG_SECTION="docker_cli_prepare_launch" do_with_logging docker_cli_prepare_launch
|
||||
|
||||
ARMBIAN_CLI_RELAUNCH_PARAMS+=(["SET_OWNER_TO_UID"]="${EUID}") # fix the owner of files to our UID
|
||||
ARMBIAN_CLI_RELAUNCH_PARAMS+=(["ARMBIAN_BUILD_UUID"]="${ARMBIAN_BUILD_UUID}") # pass down our uuid to the docker instance
|
||||
ARMBIAN_CLI_RELAUNCH_PARAMS+=(["SKIP_LOG_ARCHIVE"]="yes") # launched docker instance will not cleanup logs.
|
||||
|
||||
case "${DOCKER_SUBCMD}" in
|
||||
shell)
|
||||
display_alert "Launching Docker shell" "docker-shell" "info"
|
||||
docker run -it "${DOCKER_ARGS[@]}" "${DOCKER_ARMBIAN_INITIAL_IMAGE_TAG}" /bin/bash
|
||||
;;
|
||||
|
||||
purge)
|
||||
display_alert "Purging unused Docker volumes" "docker-purge" "info"
|
||||
docker_purge_deprecated_volumes
|
||||
;;
|
||||
|
||||
*)
|
||||
# this does NOT exit with the same exit code as the docker instance.
|
||||
# instead, it sets the docker_exit_code variable.
|
||||
declare -i docker_exit_code docker_produced_logs=0
|
||||
docker_cli_launch # MARK: this "re-launches"
|
||||
|
||||
# Set globals to avoid:
|
||||
# 1) showing the controlling host's log; we only want to show a ref to the Docker logfile, unless it didn't produce one.
|
||||
# If it did produce one, it's "link" is already shown above.
|
||||
if [[ $docker_produced_logs -gt 0 ]]; then
|
||||
declare -g show_message_after_export="skip" # handled by export_ansi_logs()
|
||||
fi
|
||||
# 2) actually exiting with the same error code as the docker instance, but without triggering an error.
|
||||
declare -g -i global_final_exit_code=$docker_exit_code # handled by .... @TODO
|
||||
;;
|
||||
|
||||
esac
|
||||
|
||||
}
|
||||
45
lib/functions/cli/cli-flash.sh
Normal file
45
lib/functions/cli/cli-flash.sh
Normal file
@@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
|
||||
#
|
||||
# This file is a part of the Armbian Build Framework
|
||||
# https://github.com/armbian/build/
|
||||
|
||||
function cli_flash_pre_run() {
|
||||
display_alert "cli_distccd_pre_run" "func cli_distccd_run :: ${ARMBIAN_COMMAND}" "warn"
|
||||
|
||||
# "gimme root on a Linux machine"
|
||||
cli_standard_relaunch_docker_or_sudo
|
||||
}
|
||||
|
||||
function cli_flash_run() {
|
||||
if [[ -n "${BOARD}" ]]; then
|
||||
use_board="yes" prep_conf_main_minimal_ni < /dev/null # no stdin for this, so it bombs if tries to be interactive.
|
||||
else
|
||||
use_board="no" prep_conf_main_minimal_ni < /dev/null # no stdin for this, so it bombs if tries to be interactive.
|
||||
fi
|
||||
|
||||
# the full build. It has its own logging sections.
|
||||
do_with_default_build cli_flash
|
||||
}
|
||||
|
||||
function cli_flash() {
|
||||
declare image_file="${IMAGE:-""}"
|
||||
# If not set, find the latest .img file in ${SRC}/output/images/
|
||||
if [[ -z "${image_file}" ]]; then
|
||||
# shellcheck disable=SC2012
|
||||
image_file="$(ls -1t "${SRC}/output/images"/*"${BOARD^}_${RELEASE}_${BRANCH}"*.img | head -1)"
|
||||
display_alert "cli_flash" "No image file specified. Using latest built image file found: ${image_file}" "info"
|
||||
fi
|
||||
if [[ ! -f "${image_file}" ]]; then
|
||||
exit_with_error "No image file to flash."
|
||||
fi
|
||||
declare image_file_basename
|
||||
image_file_basename="$(basename "${image_file}")"
|
||||
display_alert "cli_flash" "Flashing image file: ${image_file_basename}" "info"
|
||||
countdown_and_continue_if_not_aborted 3
|
||||
|
||||
write_image_to_device_and_run_hooks "${image_file}"
|
||||
}
|
||||
276
lib/functions/cli/cli-jsoninfo.sh
Normal file
276
lib/functions/cli/cli-jsoninfo.sh
Normal file
@@ -0,0 +1,276 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
|
||||
#
|
||||
# This file is a part of the Armbian Build Framework
|
||||
# https://github.com/armbian/build/
|
||||
|
||||
function cli_json_info_pre_run() {
|
||||
# "gimme root on a Linux machine"
|
||||
cli_standard_relaunch_docker_or_sudo
|
||||
}
|
||||
|
||||
function cli_json_info_run() {
|
||||
display_alert "Generating JSON info" "for all boards; wait" "info"
|
||||
|
||||
prep_conf_main_minimal_ni
|
||||
|
||||
function json_info_logged() { # logging wrapper
|
||||
LOG_SECTION="json_info" do_with_logging json_info_only
|
||||
}
|
||||
|
||||
function json_info_only() {
|
||||
prepare_python_and_pip # requires HOSTRELEASE
|
||||
|
||||
declare INFO_TOOLS_DIR="${SRC}"/lib/tools/info
|
||||
|
||||
display_alert "Here we go" "generating JSON info :: ${ARMBIAN_COMMAND} " "info"
|
||||
|
||||
# Targets inventory. Will do all-by-all if no targets file is provided.
|
||||
declare TARGETS_FILE="${TARGETS_FILE-"${USERPATCHES_PATH}/${TARGETS_FILENAME:-"targets.yaml"}"}"
|
||||
|
||||
declare BASE_INFO_OUTPUT_DIR="${SRC}/output/info" # Output dir for info
|
||||
|
||||
if [[ "${CLEAN_INFO:-"yes"}" != "no" ]]; then
|
||||
display_alert "Cleaning info output dir" "${BASE_INFO_OUTPUT_DIR}" "info"
|
||||
rm -rf "${BASE_INFO_OUTPUT_DIR}"
|
||||
fi
|
||||
|
||||
mkdir -p "${BASE_INFO_OUTPUT_DIR}"
|
||||
|
||||
# `gha-template` does not depend on the rest of the info-gatherer, so we can run it first and return.
|
||||
if [[ "${ARMBIAN_COMMAND}" == "gha-template" ]]; then
|
||||
# If we have userpatches/gha/chunks, run the workflow template utility
|
||||
declare user_gha_dir="${USERPATCHES_PATH}/gha"
|
||||
declare wf_template_dir="${user_gha_dir}/chunks"
|
||||
declare GHA_CONFIG_YAML_FILE="${user_gha_dir}/gha_config.yaml"
|
||||
if [[ ! -d "${wf_template_dir}" ]]; then
|
||||
exit_with_error "output-gha-workflow-template :: no ${wf_template_dir} directory found"
|
||||
fi
|
||||
if [[ ! -f "${GHA_CONFIG_YAML_FILE}" ]]; then
|
||||
exit_with_error "output-gha-workflow-template :: no ${GHA_CONFIG_YAML_FILE} file found"
|
||||
fi
|
||||
|
||||
display_alert "Generating GHA workflow template" "output-gha-workflow-template :: ${wf_template_dir}" "info"
|
||||
declare GHA_WORKFLOW_TEMPLATE_OUT_FILE_default="${BASE_INFO_OUTPUT_DIR}/artifact-image-complete-matrix.yml"
|
||||
declare GHA_WORKFLOW_TEMPLATE_OUT_FILE="${GHA_WORKFLOW_TEMPLATE_OUT_FILE:-"${GHA_WORKFLOW_TEMPLATE_OUT_FILE_default}"}"
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/output-gha-workflow-template.py "${GHA_WORKFLOW_TEMPLATE_OUT_FILE}" "${GHA_CONFIG_YAML_FILE}" "${wf_template_dir}" "${MATRIX_ARTIFACT_CHUNKS:-"17"}" "${MATRIX_IMAGE_CHUNKS:-"16"}"
|
||||
|
||||
display_alert "Done with" "gha-template" "info"
|
||||
run_tool_batcat "${GHA_WORKFLOW_TEMPLATE_OUT_FILE}"
|
||||
|
||||
display_alert "Templated workflow file" "${GHA_WORKFLOW_TEMPLATE_OUT_FILE}" "ext"
|
||||
|
||||
return 0 # stop here.
|
||||
fi
|
||||
|
||||
# debs-to-repo-download is also isolated from the rest. It does depend on the debs-to-repo-info, but that's prepared beforehand in a standard pipeline run.
|
||||
if [[ "${ARMBIAN_COMMAND}" == "debs-to-repo-download" ]]; then
|
||||
display_alert "Downloading debs" "debs-to-repo-download" "info"
|
||||
declare DEBS_TO_REPO_INFO_FILE="${BASE_INFO_OUTPUT_DIR}/debs-to-repo-info.json"
|
||||
if [[ ! -f "${DEBS_TO_REPO_INFO_FILE}" ]]; then
|
||||
exit_with_error "debs-to-repo-download :: no ${DEBS_TO_REPO_INFO_FILE} file found; did you restore the pipeline artifacts correctly?"
|
||||
fi
|
||||
declare DEBS_OUTPUT_DIR="${DEB_STORAGE}" # this is different depending if BETA=yes (output/debs-beta) or not (output/debs)
|
||||
display_alert "Downloading debs to" "${DEBS_OUTPUT_DIR}" "info"
|
||||
export PARALLEL_DOWNLOADS_WORKERS="${PARALLEL_DOWNLOADS_WORKERS}"
|
||||
run_host_command_logged mkdir -pv "${DEBS_OUTPUT_DIR}"
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/download-debs.py "${DEBS_TO_REPO_INFO_FILE}" "${DEBS_OUTPUT_DIR}"
|
||||
|
||||
display_alert "Done with" "debs-to-repo-download" "ext"
|
||||
|
||||
return 0 # stop here.
|
||||
fi
|
||||
|
||||
# debs-to-repo-download is also isolated from the rest. It does depend on the debs-to-repo-info, but that's prepared beforehand in a standard pipeline run.
|
||||
if [[ "${ARMBIAN_COMMAND}" == "debs-to-repo-reprepro" ]]; then
|
||||
display_alert "Generating rerepro publishing script" "debs-to-repo-reprepro" "info"
|
||||
declare DEBS_TO_REPO_INFO_FILE="${BASE_INFO_OUTPUT_DIR}/debs-to-repo-info.json"
|
||||
if [[ ! -f "${DEBS_TO_REPO_INFO_FILE}" ]]; then
|
||||
exit_with_error "debs-to-repo-reprepro :: no ${DEBS_TO_REPO_INFO_FILE} file found; did you restore the pipeline artifacts correctly?"
|
||||
fi
|
||||
declare OUTPUT_INFO_REPREPRO_DIR="${BASE_INFO_OUTPUT_DIR}/reprepro"
|
||||
declare OUTPUT_INFO_REPREPRO_CONF_DIR="${OUTPUT_INFO_REPREPRO_DIR}/conf"
|
||||
run_host_command_logged mkdir -pv "${OUTPUT_INFO_REPREPRO_DIR}" "${OUTPUT_INFO_REPREPRO_CONF_DIR}"
|
||||
|
||||
# Export params so Python can see them
|
||||
export REPO_GPG_KEYID="${REPO_GPG_KEYID}"
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/repo-reprepro.py "${DEBS_TO_REPO_INFO_FILE}" "${OUTPUT_INFO_REPREPRO_DIR}" "${OUTPUT_INFO_REPREPRO_CONF_DIR}"
|
||||
|
||||
display_alert "Done with" "debs-to-repo-reprepro" "ext"
|
||||
|
||||
return 0 # stop here.
|
||||
fi
|
||||
|
||||
### --- inventory --- ###
|
||||
|
||||
declare ALL_USERSPACE_INVENTORY_FILE="${BASE_INFO_OUTPUT_DIR}/all_userspace_inventory.json"
|
||||
declare ALL_BOARDS_ALL_BRANCHES_INVENTORY_FILE="${BASE_INFO_OUTPUT_DIR}/all_boards_all_branches.json"
|
||||
declare TARGETS_OUTPUT_FILE="${BASE_INFO_OUTPUT_DIR}/all-targets.json"
|
||||
declare IMAGE_INFO_FILE="${BASE_INFO_OUTPUT_DIR}/image-info.json"
|
||||
declare IMAGE_INFO_CSV_FILE="${BASE_INFO_OUTPUT_DIR}/image-info.csv"
|
||||
declare REDUCED_ARTIFACTS_FILE="${BASE_INFO_OUTPUT_DIR}/artifacts-reduced.json"
|
||||
declare ARTIFACTS_INFO_FILE="${BASE_INFO_OUTPUT_DIR}/artifacts-info.json"
|
||||
declare ARTIFACTS_INFO_UPTODATE_FILE="${BASE_INFO_OUTPUT_DIR}/artifacts-info-uptodate.json"
|
||||
declare OUTDATED_ARTIFACTS_IMAGES_FILE="${BASE_INFO_OUTPUT_DIR}/outdated-artifacts-images.json"
|
||||
|
||||
# Userspace inventory: RELEASES, and DESKTOPS and their possible ARCH'es, names, and support status.
|
||||
if [[ ! -f "${ALL_USERSPACE_INVENTORY_FILE}" ]]; then
|
||||
display_alert "Generating userspace inventory" "all_userspace_inventory.json" "info"
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/userspace-inventory.py ">" "${ALL_USERSPACE_INVENTORY_FILE}"
|
||||
fi
|
||||
|
||||
# Board/branch inventory.
|
||||
if [[ ! -f "${ALL_BOARDS_ALL_BRANCHES_INVENTORY_FILE}" ]]; then
|
||||
display_alert "Generating board/branch inventory" "all_boards_all_branches.json" "info"
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/board-inventory.py ">" "${ALL_BOARDS_ALL_BRANCHES_INVENTORY_FILE}"
|
||||
fi
|
||||
|
||||
if [[ "${ARMBIAN_COMMAND}" == "inventory" ]]; then
|
||||
display_alert "Done with" "inventory" "info"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# if TARGETS_FILE does not exist, one will be provided for you, from a template.
|
||||
if [[ ! -f "${TARGETS_FILE}" ]]; then
|
||||
declare TARGETS_TEMPLATE="${TARGETS_TEMPLATE:-"targets-default.yaml"}"
|
||||
display_alert "No targets file found" "using default targets template ${TARGETS_TEMPLATE}" "warn"
|
||||
TARGETS_FILE="${SRC}/config/templates/${TARGETS_TEMPLATE}"
|
||||
else
|
||||
display_alert "Using targets file" "${TARGETS_FILE}" "warn"
|
||||
fi
|
||||
|
||||
if [[ ! -f "${TARGETS_OUTPUT_FILE}" ]]; then
|
||||
display_alert "Generating targets inventory" "targets-compositor" "info"
|
||||
export TARGETS_BETA="${BETA}" # Read by the Python script, and injected into every target as "BETA=" param.
|
||||
export TARGETS_REVISION="${REVISION}" # Read by the Python script, and injected into every target as "REVISION=" param.
|
||||
export TARGETS_FILTER_INCLUDE="${TARGETS_FILTER_INCLUDE}" # Read by the Python script; used to "only include" targets that match the given string.
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/targets-compositor.py "${ALL_BOARDS_ALL_BRANCHES_INVENTORY_FILE}" "${ALL_USERSPACE_INVENTORY_FILE}" "${TARGETS_FILE}" ">" "${TARGETS_OUTPUT_FILE}"
|
||||
unset TARGETS_BETA
|
||||
unset TARGETS_REVISION
|
||||
unset TARGETS_FILTER_INCLUDE
|
||||
fi
|
||||
|
||||
if [[ "${ARMBIAN_COMMAND}" == "targets-composed" ]]; then
|
||||
display_alert "Done with" "targets-dashboard" "info"
|
||||
return 0
|
||||
fi
|
||||
|
||||
### Images.
|
||||
|
||||
# The image info extractor.
|
||||
if [[ ! -f "${IMAGE_INFO_FILE}" ]]; then
|
||||
display_alert "Generating image info" "info-gatherer-image" "info"
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/info-gatherer-image.py "${TARGETS_OUTPUT_FILE}" ">" "${IMAGE_INFO_FILE}"
|
||||
fi
|
||||
|
||||
# convert image info output to CSV for easy import into Google Sheets etc
|
||||
if [[ ! -f "${IMAGE_INFO_CSV_FILE}" ]]; then
|
||||
display_alert "Generating CSV info" "info.csv" "info"
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/json2csv.py "<" "${IMAGE_INFO_FILE}" ">" ${IMAGE_INFO_CSV_FILE}
|
||||
fi
|
||||
|
||||
if [[ "${ARMBIAN_COMMAND}" == "targets-dashboard" ]]; then
|
||||
display_alert "To load the OpenSearch dashboards:" "
|
||||
pip3 install opensearch-py # install needed lib to talk to OpenSearch
|
||||
sysctl -w vm.max_map_count=262144 # raise limited needed by OpenSearch
|
||||
docker-compose --file tools/dashboards/docker-compose-opensearch.yaml up -d # start up OS in docker-compose
|
||||
python3 lib/tools/index-opensearch.py < output/info/image-info.json # index the JSON into OpenSearch
|
||||
# go check out http://localhost:5601
|
||||
docker-compose --file tools/dashboards/docker-compose-opensearch.yaml down # shut down OpenSearch when you're done
|
||||
" "info"
|
||||
display_alert "Done with" "targets-dashboard" "info"
|
||||
return 0
|
||||
fi
|
||||
|
||||
### Artifacts.
|
||||
|
||||
# Reducer: artifacts.
|
||||
if [[ ! -f "${REDUCED_ARTIFACTS_FILE}" ]]; then
|
||||
display_alert "Reducing info into artifacts" "artifact-reducer" "info"
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/artifact-reducer.py "${IMAGE_INFO_FILE}" ">" "${REDUCED_ARTIFACTS_FILE}"
|
||||
fi
|
||||
|
||||
# The artifact info extractor.
|
||||
if [[ ! -f "${ARTIFACTS_INFO_FILE}" ]]; then
|
||||
display_alert "Generating artifact info" "info-gatherer-artifact" "info"
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/info-gatherer-artifact.py "${REDUCED_ARTIFACTS_FILE}" ">" "${ARTIFACTS_INFO_FILE}"
|
||||
fi
|
||||
|
||||
# Now a mapper, check each OCI coordinate to see if it's up-to-date or not. _cache_ (eternally) the positives, but _never_ cache the negatives.
|
||||
# This should ideally use the authentication info and other stuff that ORAS.land would.
|
||||
# this is controlled by "CHECK_OCI=yes". most people are not interested in what is or not in the cache when generating a build plan, and it is slow to do.
|
||||
if [[ ! -f "${ARTIFACTS_INFO_UPTODATE_FILE}" ]]; then
|
||||
display_alert "Gathering OCI info" "mapper-oci-uptodate :: real lookups (CHECK_OCI): ${CHECK_OCI:-"no"}" "info"
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/mapper-oci-uptodate.py "${ARTIFACTS_INFO_FILE}" "${CHECK_OCI:-"no"}" ">" "${ARTIFACTS_INFO_UPTODATE_FILE}"
|
||||
fi
|
||||
|
||||
# A combinator/reducer: image + artifact; outdated artifacts plus the images that depend on them.
|
||||
if [[ ! -f "${OUTDATED_ARTIFACTS_IMAGES_FILE}" ]]; then
|
||||
display_alert "Combining image and artifact info" "outdated-artifact-image-reducer" "info"
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/outdated-artifact-image-reducer.py "${ARTIFACTS_INFO_UPTODATE_FILE}" "${IMAGE_INFO_FILE}" ">" "${OUTDATED_ARTIFACTS_IMAGES_FILE}"
|
||||
fi
|
||||
|
||||
if [[ "${ARMBIAN_COMMAND}" == "targets" ]]; then
|
||||
display_alert "Done with" "targets" "info"
|
||||
return 0
|
||||
fi
|
||||
|
||||
### CI/CD Outputs.
|
||||
|
||||
# output stage: deploy debs to repo.
|
||||
# Artifacts-to-repo output. Takes all artifacts, and produces info necessary for:
|
||||
# 1) getting the artifact from OCI only (not build it)
|
||||
# 2) getting the list of .deb's to be published to the repo for that artifact
|
||||
display_alert "Generating deb-to-repo JSON output" "output-debs-to-repo-json" "info"
|
||||
# This produces debs-to-repo-info.json
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/output-debs-to-repo-json.py "${BASE_INFO_OUTPUT_DIR}" "${OUTDATED_ARTIFACTS_IMAGES_FILE}"
|
||||
if [[ "${ARMBIAN_COMMAND}" == "debs-to-repo-json" ]]; then
|
||||
display_alert "Done with" "output-debs-to-repo-json" "ext"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Output stage: GHA simplest possible two-matrix worflow.
|
||||
# A prepare job running this, prepares two matrixes:
|
||||
# One for artifacts. One for images.
|
||||
# If the image or artifact is up-to-date, it is still included in matrix, but the job is skipped.
|
||||
# If any of the matrixes is bigger than 255 items, an error is generated.
|
||||
if [[ "${ARMBIAN_COMMAND}" == "gha-matrix" ]]; then
|
||||
if [[ "${CLEAN_MATRIX:-"yes"}" != "no" ]]; then
|
||||
display_alert "Cleaning GHA matrix output" "clean-matrix" "info"
|
||||
run_host_command_logged rm -fv "${BASE_INFO_OUTPUT_DIR}"/gha-*-matrix.json
|
||||
fi
|
||||
|
||||
display_alert "Generating GHA matrix for artifacts" "output-gha-matrix :: artifacts" "info"
|
||||
declare GHA_ALL_ARTIFACTS_JSON_MATRIX_FILE="${BASE_INFO_OUTPUT_DIR}/gha-all-artifacts-matrix.json"
|
||||
if [[ ! -f "${GHA_ALL_ARTIFACTS_JSON_MATRIX_FILE}" ]]; then
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/output-gha-matrix.py artifacts "${OUTDATED_ARTIFACTS_IMAGES_FILE}" "${MATRIX_ARTIFACT_CHUNKS}" ">" "${GHA_ALL_ARTIFACTS_JSON_MATRIX_FILE}"
|
||||
fi
|
||||
github_actions_add_output "artifact-matrix" "$(cat "${GHA_ALL_ARTIFACTS_JSON_MATRIX_FILE}")"
|
||||
|
||||
display_alert "Generating GHA matrix for images" "output-gha-matrix :: images" "info"
|
||||
declare GHA_ALL_IMAGES_JSON_MATRIX_FILE="${BASE_INFO_OUTPUT_DIR}/gha-all-images-matrix.json"
|
||||
if [[ ! -f "${GHA_ALL_IMAGES_JSON_MATRIX_FILE}" ]]; then
|
||||
# export env vars used by the Python script.
|
||||
export SKIP_IMAGES="${SKIP_IMAGES:-"no"}"
|
||||
export IMAGES_ONLY_OUTDATED_ARTIFACTS="${IMAGES_ONLY_OUTDATED_ARTIFACTS:-"no"}"
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/output-gha-matrix.py images "${OUTDATED_ARTIFACTS_IMAGES_FILE}" "${MATRIX_IMAGE_CHUNKS}" ">" "${GHA_ALL_IMAGES_JSON_MATRIX_FILE}"
|
||||
fi
|
||||
github_actions_add_output "image-matrix" "$(cat "${GHA_ALL_IMAGES_JSON_MATRIX_FILE}")"
|
||||
fi
|
||||
|
||||
### a secondary stage, which only makes sense to be run inside GHA, and as such should be split in a different CLI or under a flag.
|
||||
if [[ "${ARMBIAN_COMMAND}" == "gha-workflow" ]]; then
|
||||
# GHA Workflow output. A delusion. Maybe.
|
||||
display_alert "Generating GHA workflow" "output-gha-workflow :: complete" "info"
|
||||
declare GHA_WORKFLOW_FILE="${BASE_INFO_OUTPUT_DIR}/gha-workflow.yaml"
|
||||
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/output-gha-workflow.py "${OUTDATED_ARTIFACTS_IMAGES_FILE}" "${GHA_WORKFLOW_FILE}"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
do_with_default_build json_info_logged
|
||||
|
||||
}
|
||||
39
lib/functions/cli/cli-oras.sh
Normal file
39
lib/functions/cli/cli-oras.sh
Normal file
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
|
||||
#
|
||||
# This file is a part of the Armbian Build Framework
|
||||
# https://github.com/armbian/build/
|
||||
|
||||
function cli_oras_pre_run() {
|
||||
: # Empty, no need to do anything.
|
||||
}
|
||||
|
||||
function cli_oras_run() {
|
||||
case "${ORAS_OPERATION}" in
|
||||
upload)
|
||||
display_alert "Uploading using ORAS" "OCI_TARGET: '${OCI_TARGET}' UPLOAD_FILE='${UPLOAD_FILE}'" "info"
|
||||
display_alert "OCI_TARGET" "${OCI_TARGET}" "info"
|
||||
display_alert "UPLOAD_FILE" "${UPLOAD_FILE}" "info"
|
||||
# if OCI_TARGET is not set, exit_with_error
|
||||
if [[ -z "${OCI_TARGET}" ]]; then
|
||||
exit_with_error "OCI_TARGET is not set"
|
||||
fi
|
||||
if [[ -z "${UPLOAD_FILE}" ]]; then
|
||||
exit_with_error "UPLOAD_FILE is not set"
|
||||
fi
|
||||
if [[ ! -f "${UPLOAD_FILE}" ]]; then
|
||||
exit_with_error "File to upload not found '${UPLOAD_FILE}'"
|
||||
fi
|
||||
# This will download & install ORAS and run it.
|
||||
oras_push_artifact_file "${OCI_TARGET}" "${UPLOAD_FILE}" "uploaded from command line - this is NOT a Docker image"
|
||||
;;
|
||||
|
||||
*)
|
||||
exit_with_error "Unknown ORAS_OPERATION '${ORAS_OPERATION}'"
|
||||
;;
|
||||
esac
|
||||
return 0
|
||||
}
|
||||
81
lib/functions/cli/cli-patch.sh
Normal file
81
lib/functions/cli/cli-patch.sh
Normal file
@@ -0,0 +1,81 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
|
||||
#
|
||||
# This file is a part of the Armbian Build Framework
|
||||
# https://github.com/armbian/build/
|
||||
|
||||
function cli_patch_kernel_pre_run() {
|
||||
declare -g ARMBIAN_COMMAND_REQUIRE_BASIC_DEPS="yes" # Require prepare_host_basic to run before the command.
|
||||
declare -g DOCKER_PASS_SSH_AGENT="yes" # Pass SSH agent to docker
|
||||
declare -g DOCKER_PASS_GIT="yes" # mount .git dir to docker; for archeology
|
||||
|
||||
# inside-function-function: a dynamic hook, only triggered if this CLI runs.
|
||||
# install openssh-client, we'll need it to push the patched tree.
|
||||
function add_host_dependencies__ssh_client_for_patch_pushing_over_ssh() {
|
||||
declare -g EXTRA_BUILD_DEPS="${EXTRA_BUILD_DEPS} openssh-client"
|
||||
}
|
||||
|
||||
# "gimme root on a Linux machine"
|
||||
cli_standard_relaunch_docker_or_sudo
|
||||
}
|
||||
|
||||
function cli_patch_kernel_run() {
|
||||
display_alert "Patching kernel" "$BRANCH" "info"
|
||||
declare -g SYNC_CLOCK=no # don't waste time syncing the clock
|
||||
declare -g JUST_KERNEL=yes # only for kernel.
|
||||
declare -g PATCHES_TO_GIT=yes # commit to git.
|
||||
declare -g PATCH_ONLY=yes # stop after patching.
|
||||
declare -g DEBUG_PATCHING=yes # debug patching.
|
||||
declare -g GIT_ARCHEOLOGY=yes # do archeology
|
||||
declare -g FAST_ARCHEOLOGY=yes # do archeology, but only for the exact path we need.
|
||||
#declare -g REWRITE_PATCHES=yes # rewrite the patches after git commiting. Very cheap compared to the rest.
|
||||
declare -g KERNEL_CONFIGURE=no # no menuconfig
|
||||
declare -g RELEASE=jammy # or whatever, not relevant, just fool the configuration
|
||||
declare -g SHOW_LOG=yes # show the log
|
||||
prep_conf_main_build_single
|
||||
|
||||
declare ymd vendor_lc target_repo_url summary_url
|
||||
ymd="$(date +%Y%m%d)"
|
||||
# lowercase ${VENDOR} and replace spaces with underscores
|
||||
vendor_lc="$(tr '[:upper:]' '[:lower:]' <<< "${VENDOR}" | tr ' ' '_')-next"
|
||||
target_branch="${vendor_lc}-${LINUXFAMILY}-${KERNEL_MAJOR_MINOR}-${ymd}${PUSH_BRANCH_POSTFIX:-""}"
|
||||
target_repo_url="git@github.com:${PUSH_TO_REPO:-"${PUSH_TO_USER:-"rpardini"}/${PUSH_TO_REPO:-"linux"}"}.git"
|
||||
summary_url="https://${PUSH_TO_USER:-"rpardini"}.github.io/${PUSH_TO_REPO:-"linux"}/${target_branch}.html"
|
||||
|
||||
declare -a push_command
|
||||
push_command=(git -C "${SRC}/cache/git-bare/kernel" push "--force" "--verbose"
|
||||
"${target_repo_url}"
|
||||
"kernel-${LINUXFAMILY}-${KERNEL_MAJOR_MINOR}:${target_branch}")
|
||||
|
||||
# Prepare the host and build kernel; without using standard build
|
||||
prepare_host # This handles its own logging sections, and is possibly interactive.
|
||||
compile_kernel # This handles its own logging sections.
|
||||
|
||||
display_alert "Done patching kernel" "${BRANCH} - ${LINUXFAMILY} - ${KERNEL_MAJOR_MINOR}" "cachehit"
|
||||
|
||||
declare do_push="no"
|
||||
if git -C "${SRC}" remote get-url origin &> /dev/null; then
|
||||
declare src_origin_url
|
||||
src_origin_url="$(git -C "${SRC}" remote get-url origin | xargs echo -n)"
|
||||
|
||||
declare prefix="git@github.com:${PUSH_TO_USER:-"rpardini"}/" # @TODO refactor var
|
||||
# if the src_origin_url begins with the prefix
|
||||
if [[ "${src_origin_url}" == "${prefix}"* ]]; then
|
||||
do_push="yes"
|
||||
fi
|
||||
fi
|
||||
|
||||
display_alert "Git push command: " "${push_command[*]}" "info"
|
||||
if [[ "${do_push}" == "yes" ]]; then
|
||||
display_alert "Pushing to ${target_branch}" "${target_repo_url}" "info"
|
||||
git_ensure_safe_directory "${SRC}/cache/git-bare/kernel"
|
||||
# @TODO: do NOT allow shallow trees here, we need the full history to be able to push
|
||||
GIT_SSH_COMMAND="ssh -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" "${push_command[@]}"
|
||||
display_alert "Done pushing to ${target_branch}" "${summary_url}" "info"
|
||||
fi
|
||||
|
||||
display_alert "Summary URL (after push & gh-pages deploy): " "${summary_url}" "info"
|
||||
}
|
||||
50
lib/functions/cli/cli-requirements.sh
Normal file
50
lib/functions/cli/cli-requirements.sh
Normal file
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
|
||||
#
|
||||
# This file is a part of the Armbian Build Framework
|
||||
# https://github.com/armbian/build/
|
||||
|
||||
function cli_requirements_pre_run() {
|
||||
declare -g ARMBIAN_COMMAND_REQUIRE_BASIC_DEPS="yes" # Require prepare_host_basic to run before the command.
|
||||
|
||||
if [[ "$(uname)" != "Linux" ]]; then
|
||||
display_alert "Not running on Linux" "refusing to run 'requirements'" "err"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${EUID}" == "0" ]]; then # we're already root. Either running as real root, or already sudo'ed.
|
||||
display_alert "Already running as root" "great" "debug"
|
||||
else
|
||||
# Fail, installing requirements is not allowed as non-root.
|
||||
exit_with_error "This command requires root privileges - refusing to run"
|
||||
fi
|
||||
}
|
||||
|
||||
function cli_requirements_run() {
|
||||
initialize_extension_manager # initialize the extension manager.
|
||||
declare -a -g host_dependencies=()
|
||||
|
||||
obtain_and_check_host_release_and_arch # Sets HOSTRELEASE & validates it for sanity; also HOSTARCH
|
||||
host_release="${HOSTRELEASE}" host_arch="${HOSTARCH}" early_prepare_host_dependencies
|
||||
|
||||
LOG_SECTION="install_host_dependencies" do_with_logging install_host_dependencies "for requirements command"
|
||||
declare -i -g -r prepare_host_has_already_run=1 # global, readonly. fool the rest of the script into thinking we've already run prepare_host.
|
||||
|
||||
if [[ "${ARMBIAN_INSIDE_DOCKERFILE_BUILD}" == "yes" ]]; then
|
||||
# Include python/pip packages in the Dockerfile build.
|
||||
deploy_to_non_cache_dir="yes" prepare_python_and_pip
|
||||
|
||||
# During the Dockerfile build, we want to pre-download ORAS/shellcheck/shfmt so it's included in the image.
|
||||
# We need to change the deployment directory to something not in ./cache, so it's baked into the image.
|
||||
deploy_to_non_cache_dir="yes" run_tool_oras # download-only, to non-cache dir.
|
||||
deploy_to_non_cache_dir="yes" run_tool_shellcheck # download-only, to non-cache dir.
|
||||
deploy_to_non_cache_dir="yes" run_tool_batcat # download-only, to non-cache dir.
|
||||
|
||||
# @TODO: shfmt
|
||||
fi
|
||||
|
||||
display_alert "Done with" "@host dependencies" "cachehit"
|
||||
}
|
||||
18
lib/functions/cli/cli-undecided.sh
Normal file
18
lib/functions/cli/cli-undecided.sh
Normal file
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
|
||||
#
|
||||
# This file is a part of the Armbian Build Framework
|
||||
# https://github.com/armbian/build/
|
||||
|
||||
function cli_undecided_pre_run() {
|
||||
# If undecided, run the 'build' command.
|
||||
display_alert "cli_undecided_pre_run" "func cli_undecided_pre_run go to build" "debug"
|
||||
ARMBIAN_CHANGE_COMMAND_TO="build"
|
||||
}
|
||||
|
||||
function cli_undecided_run() {
|
||||
exit_with_error "Should never run the undecided command. How did this happen?"
|
||||
}
|
||||
143
lib/functions/cli/commands.sh
Normal file
143
lib/functions/cli/commands.sh
Normal file
@@ -0,0 +1,143 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
|
||||
#
|
||||
# This file is a part of the Armbian Build Framework
|
||||
# https://github.com/armbian/build/
|
||||
|
||||
function armbian_register_commands() {
|
||||
# More than one command can map to the same handler. In that case, use ARMBIAN_COMMANDS_TO_VARS_DICT for specific vars.
|
||||
declare -g -A ARMBIAN_COMMANDS_TO_HANDLERS_DICT=(
|
||||
["docker"]="docker" # thus requires cli_docker_pre_run and cli_docker_run
|
||||
["docker-purge"]="docker"
|
||||
["dockerpurge"]="docker"
|
||||
["docker-shell"]="docker"
|
||||
["dockershell"]="docker"
|
||||
["generate-dockerfile"]="docker"
|
||||
|
||||
["requirements"]="requirements" # implemented in cli_requirements_pre_run and cli_requirements_run
|
||||
|
||||
# Given a board/config/exts, dump out the (non-userspace) JSON of configuration
|
||||
["configdump"]="config_dump_json" # implemented in cli_config_dump_json_pre_run and cli_config_dump_json_run
|
||||
["config-dump"]="config_dump_json" # implemented in cli_config_dump_json_pre_run and cli_config_dump_json_run
|
||||
["config-dump-json"]="config_dump_json" # implemented in cli_config_dump_json_pre_run and cli_config_dump_json_run
|
||||
["config-dump-no-json"]="config_dump_json" # implemented in cli_config_dump_json_pre_run and cli_config_dump_json_run
|
||||
|
||||
["inventory"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
|
||||
["targets"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
|
||||
["targets-dashboard"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
|
||||
["targets-composed"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
|
||||
["debs-to-repo-json"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
|
||||
["gha-matrix"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
|
||||
["gha-workflow"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
|
||||
["gha-template"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
|
||||
|
||||
# These probably should be in their own separate CLI commands file, but for now they're together in jsoninfo.
|
||||
["debs-to-repo-download"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
|
||||
["debs-to-repo-reprepro"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
|
||||
|
||||
["kernel-patches-to-git"]="patch_kernel" # implemented in cli_patch_kernel_pre_run and cli_patch_kernel_run
|
||||
|
||||
["build"]="standard_build" # implemented in cli_standard_build_pre_run and cli_standard_build_run
|
||||
["distccd"]="distccd" # implemented in cli_distccd_pre_run and cli_distccd_run
|
||||
["flash"]="flash" # implemented in cli_flash_pre_run and cli_flash_run
|
||||
|
||||
# external tooling, made easy.
|
||||
["oras-upload"]="oras" # implemented in cli_oras_pre_run and cli_oras_run; up/down/info are the same, see vars below
|
||||
|
||||
# all-around artifact wrapper
|
||||
["artifact"]="artifact" # implemented in cli_artifact_pre_run and cli_artifact_run
|
||||
["artifact-config-dump-json"]="artifact" # implemented in cli_artifact_pre_run and cli_artifact_run
|
||||
["download-artifact"]="artifact" # implemented in cli_artifact_pre_run and cli_artifact_run
|
||||
|
||||
# shortcuts, see vars set below. the use legacy single build, and try to control it via variables
|
||||
["rootfs"]="artifact"
|
||||
|
||||
["kernel"]="artifact"
|
||||
["kernel-patch"]="artifact"
|
||||
["kernel-config"]="artifact"
|
||||
|
||||
["uboot"]="artifact"
|
||||
["uboot-patch"]="artifact"
|
||||
["atf-patch"]="artifact"
|
||||
["crust-patch"]="artifact"
|
||||
["uboot-config"]="artifact"
|
||||
|
||||
["firmware"]="artifact"
|
||||
["firmware-full"]="artifact"
|
||||
["armbian-config"]="artifact"
|
||||
["armbian-zsh"]="artifact"
|
||||
["armbian-plymouth-theme"]="artifact"
|
||||
["fake-ubuntu-advantage-tools"]="artifact"
|
||||
|
||||
["armbian-base-files"]="artifact"
|
||||
["armbian-bsp-cli"]="artifact"
|
||||
["armbian-bsp-desktop"]="artifact"
|
||||
["armbian-desktop"]="artifact"
|
||||
|
||||
["undecided"]="undecided" # implemented in cli_undecided_pre_run and cli_undecided_run - relaunches either build or docker
|
||||
)
|
||||
|
||||
# common for all CLI-based artifact shortcuts
|
||||
declare common_cli_artifact_vars=""
|
||||
|
||||
# common for interactive artifact shortcuts (configure, patch, etc)
|
||||
declare common_cli_artifact_interactive_vars="ARTIFACT_WILL_NOT_BUILD='yes' ARTIFACT_BUILD_INTERACTIVE='yes' ARTIFACT_IGNORE_CACHE='yes'"
|
||||
|
||||
# Vars to be set for each command. Optional.
|
||||
declare -g -A ARMBIAN_COMMANDS_TO_VARS_DICT=(
|
||||
["docker-purge"]="DOCKER_SUBCMD='purge'"
|
||||
["dockerpurge"]="DOCKER_SUBCMD='purge'"
|
||||
["docker-shell"]="DOCKER_SUBCMD='shell'"
|
||||
["dockershell"]="DOCKER_SUBCMD='shell'"
|
||||
|
||||
["generate-dockerfile"]="DOCKERFILE_GENERATE_ONLY='yes'"
|
||||
|
||||
["artifact-config-dump-json"]='CONFIG_DEFS_ONLY="yes"'
|
||||
|
||||
# repo pipeline stuff is usually run on saved/restored artifacts for output/info, so don't clean them by default
|
||||
["debs-to-repo-download"]="CLEAN_MATRIX='no' CLEAN_INFO='no'"
|
||||
["debs-to-repo-reprepro"]="CLEAN_MATRIX='no' CLEAN_INFO='no'"
|
||||
|
||||
# artifact shortcuts
|
||||
["rootfs"]="WHAT='rootfs' ${common_cli_artifact_vars}"
|
||||
|
||||
["kernel"]="WHAT='kernel' ${common_cli_artifact_vars}"
|
||||
["kernel-config"]="WHAT='kernel' KERNEL_CONFIGURE='yes' ${common_cli_artifact_interactive_vars} ${common_cli_artifact_vars}"
|
||||
["kernel-patch"]="WHAT='kernel' CREATE_PATCHES='yes' ${common_cli_artifact_interactive_vars} ${common_cli_artifact_vars}"
|
||||
|
||||
["uboot"]="WHAT='uboot' ${common_cli_artifact_vars}"
|
||||
["uboot-config"]="WHAT='uboot' UBOOT_CONFIGURE='yes' ${common_cli_artifact_interactive_vars} ${common_cli_artifact_vars}"
|
||||
["uboot-patch"]="WHAT='uboot' CREATE_PATCHES='yes' ${common_cli_artifact_interactive_vars} ${common_cli_artifact_vars}"
|
||||
["atf-patch"]="WHAT='uboot' CREATE_PATCHES_ATF='yes' ${common_cli_artifact_interactive_vars} ${common_cli_artifact_vars}"
|
||||
["crust-patch"]="WHAT='uboot' CREATE_PATCHES_CRUST='yes' ${common_cli_artifact_interactive_vars} ${common_cli_artifact_vars}"
|
||||
|
||||
["firmware"]="WHAT='firmware' ${common_cli_artifact_vars}"
|
||||
["firmware-full"]="WHAT='full_firmware' ${common_cli_artifact_vars}"
|
||||
["armbian-config"]="WHAT='armbian-config' ${common_cli_artifact_vars}"
|
||||
["armbian-zsh"]="WHAT='armbian-zsh' ${common_cli_artifact_vars}"
|
||||
["armbian-plymouth-theme"]="WHAT='armbian-plymouth-theme' ${common_cli_artifact_vars}"
|
||||
["fake-ubuntu-advantage-tools"]="WHAT='fake_ubuntu_advantage_tools' ${common_cli_artifact_vars}"
|
||||
|
||||
["armbian-base-files"]="WHAT='armbian-base-files' ${common_cli_artifact_vars}"
|
||||
["armbian-bsp-cli"]="WHAT='armbian-bsp-cli' ${common_cli_artifact_vars}"
|
||||
["armbian-bsp-desktop"]="WHAT='armbian-bsp-desktop' BUILD_DESKTOP='yes' ${common_cli_artifact_vars}"
|
||||
["armbian-desktop"]="WHAT='armbian-desktop' BUILD_DESKTOP='yes' ${common_cli_artifact_vars}"
|
||||
|
||||
["oras-upload"]="ORAS_OPERATION='upload'"
|
||||
|
||||
["undecided"]="UNDECIDED='yes'"
|
||||
)
|
||||
# Override the LOG_CLI_ID to change the log file name.
|
||||
# Will be set to ARMBIAN_COMMAND if not set after all pre-runs done.
|
||||
declare -g ARMBIAN_LOG_CLI_ID
|
||||
|
||||
# Keep a running dict of params/variables. Can't repeat stuff here. Dict.
|
||||
declare -g -A ARMBIAN_CLI_RELAUNCH_PARAMS=(["ARMBIAN_RELAUNCHED"]="yes")
|
||||
declare -g -A ARMBIAN_CLI_RELAUNCH_ENVS=(["ARMBIAN_RELAUNCHED"]="yes")
|
||||
|
||||
# Keep a running array of config files needed for relaunch.
|
||||
declare -g -a ARMBIAN_CLI_RELAUNCH_CONFIGS=()
|
||||
}
|
||||
182
lib/functions/cli/entrypoint.sh
Normal file
182
lib/functions/cli/entrypoint.sh
Normal file
@@ -0,0 +1,182 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
|
||||
#
|
||||
# This file is a part of the Armbian Build Framework
|
||||
# https://github.com/armbian/build/
|
||||
|
||||
function cli_entrypoint() {
|
||||
# array, readonly, global, for future reference, "exported" to shutup shellcheck
|
||||
declare -rg -x -a ARMBIAN_ORIGINAL_ARGV=("${@}")
|
||||
|
||||
if [[ "${ARMBIAN_ENABLE_CALL_TRACING}" == "yes" ]]; then
|
||||
set -T # inherit return/debug traps
|
||||
mkdir -p "${SRC}"/output/call-traces
|
||||
echo -n "" > "${SRC}"/output/call-traces/calls.txt
|
||||
# See https://www.gnu.org/software/bash/manual/html_node/Bash-Variables.html
|
||||
trap 'echo "${FUNCNAME[*]}|${BASH_LINENO[*]}|${BASH_SOURCE[*]}|${LINENO}" >> ${SRC}/output/call-traces/calls.txt ;' RETURN
|
||||
fi
|
||||
|
||||
# @TODO: allow for a super-early userpatches/config-000.custom.conf.sh to be loaded, before anything else.
|
||||
# This would allow for custom commands and interceptors.
|
||||
|
||||
# Decide what we're gonna do. We've a few hardcoded, 1st-argument "commands".
|
||||
declare -g -A ARMBIAN_COMMANDS_TO_HANDLERS_DICT ARMBIAN_COMMANDS_TO_VARS_DICT
|
||||
armbian_register_commands # this defines the above two dictionaries
|
||||
|
||||
# Process the command line, separating params (XX=YY) from non-params arguments.
|
||||
# That way they can be set in any order.
|
||||
declare -A -g ARMBIAN_PARSED_CMDLINE_PARAMS=() # A dict of PARAM=VALUE
|
||||
declare -a -g ARMBIAN_NON_PARAM_ARGS=() # An array of all non-param arguments
|
||||
parse_cmdline_params "${@}" # which fills the above vars.
|
||||
|
||||
# Now load the key=value pairs from cmdline into environment, before loading config or executing commands.
|
||||
# This will be done _again_ later, to make sure cmdline params override config et al.
|
||||
apply_cmdline_params_to_env "early" # which uses ARMBIAN_PARSED_CMDLINE_PARAMS
|
||||
# From here on, no more ${1} or stuff. We've parsed it all into ARMBIAN_PARSED_CMDLINE_PARAMS or ARMBIAN_NON_PARAM_ARGS and ARMBIAN_COMMAND.
|
||||
|
||||
# Re-initialize logging, to take into account the new environment after parsing cmdline params.
|
||||
logging_init
|
||||
|
||||
declare -a -g ARMBIAN_CONFIG_FILES=() # fully validated, complete paths to config files.
|
||||
declare -g ARMBIAN_COMMAND_HANDLER="" ARMBIAN_COMMAND="" ARMBIAN_COMMAND_VARS="" # only valid command and handler will ever be set here.
|
||||
declare -g ARMBIAN_HAS_UNKNOWN_ARG="no" # if any unknown params, bomb.
|
||||
for argument in "${ARMBIAN_NON_PARAM_ARGS[@]}"; do # loop over all non-param arguments, find commands and configs.
|
||||
parse_each_cmdline_arg_as_command_param_or_config "${argument}" # sets all the vars above
|
||||
done
|
||||
|
||||
# More sanity checks.
|
||||
# If unknowns, bail.
|
||||
if [[ "${ARMBIAN_HAS_UNKNOWN_ARG}" == "yes" ]]; then
|
||||
exit_with_error "Unknown arguments found. Please check the output above and fix them."
|
||||
fi
|
||||
|
||||
# @TODO: Have a config that is always included? "${SRC}/userpatches/config-default.conf" ?
|
||||
|
||||
# If we don't have a command decided yet, use the undecided command.
|
||||
if [[ "${ARMBIAN_COMMAND}" == "" ]]; then
|
||||
display_alert "No command found, using default" "undecided" "debug"
|
||||
ARMBIAN_COMMAND="undecided"
|
||||
fi
|
||||
|
||||
# If we don't have a command at this stage, we should default either to 'build' or 'docker', depending on OS.
|
||||
# Give the chosen command a chance to refuse running, or, even, change the final command to run.
|
||||
# This allows for example the 'build' command to auto-launch under docker, even without specifying it.
|
||||
# Also allows for launchers to keep themselves when re-launched, yet do something diferent. (eg: docker under docker does build).
|
||||
# Or: build under Darwin does docker...
|
||||
# each _pre_run can change the command and vars to run too, so do it in a loop until it stops changing.
|
||||
declare -g ARMBIAN_CHANGE_COMMAND_TO="${ARMBIAN_COMMAND}"
|
||||
while [[ "${ARMBIAN_CHANGE_COMMAND_TO}" != "" ]]; do
|
||||
display_alert "Still a command to pre-run, this time:" "${ARMBIAN_CHANGE_COMMAND_TO}" "debug"
|
||||
|
||||
declare -g ARMBIAN_COMMAND_REQUIRE_BASIC_DEPS="no" # reset this before every pre_run, so only the last one wins.
|
||||
ARMBIAN_COMMAND="${ARMBIAN_CHANGE_COMMAND_TO}"
|
||||
armbian_prepare_cli_command_to_run "${ARMBIAN_COMMAND}"
|
||||
|
||||
ARMBIAN_CHANGE_COMMAND_TO=""
|
||||
armbian_cli_pre_run_command
|
||||
done
|
||||
|
||||
# IMPORTANT!!!: it is INVALID to relaunch compile.sh from here. It will cause logging mistakes.
|
||||
# So the last possible moment to relaunch is in xxxxx_pre_run!
|
||||
# Also form here, UUID will be generated, output created, logging enabled, etc.
|
||||
|
||||
# Init basic dirs.
|
||||
declare -g -r DEST="${SRC}/output" USERPATCHES_PATH="${SRC}"/userpatches # DEST is the main output dir, and USERPATCHES_PATH is the userpatches dir. read-only.
|
||||
mkdir -p "${DEST}" "${USERPATCHES_PATH}" # Create output and userpatches directory if not already there
|
||||
display_alert "Output directory created! DEST:" "${DEST}" "debug"
|
||||
|
||||
# set unique mounting directory for this execution.
|
||||
# basic deps, which include "uuidgen", will be installed _after_ this, so we gotta tolerate it not being there yet.
|
||||
declare -g ARMBIAN_BUILD_UUID
|
||||
if [[ "${ARMBIAN_BUILD_UUID}" != "" ]]; then
|
||||
display_alert "Using passed-in ARMBIAN_BUILD_UUID" "${ARMBIAN_BUILD_UUID}" "debug"
|
||||
else
|
||||
if command -v uuidgen 1> /dev/null; then
|
||||
ARMBIAN_BUILD_UUID="$(uuidgen)"
|
||||
else
|
||||
display_alert "uuidgen not found" "uuidgen not installed yet" "info"
|
||||
ARMBIAN_BUILD_UUID="no-uuidgen-yet-${RANDOM}-$((1 + $RANDOM % 10))$((1 + $RANDOM % 10))$((1 + $RANDOM % 10))$((1 + $RANDOM % 10))"
|
||||
fi
|
||||
display_alert "Generated ARMBIAN_BUILD_UUID" "${ARMBIAN_BUILD_UUID}" "debug"
|
||||
fi
|
||||
declare -g -r ARMBIAN_BUILD_UUID="${ARMBIAN_BUILD_UUID}" # Make read-only
|
||||
display_alert "Build UUID:" "${ARMBIAN_BUILD_UUID}" "debug"
|
||||
|
||||
# Super-global variables, used everywhere. The directories are NOT _created_ here, since this very early stage. They are all readonly, for sanity.
|
||||
declare -g -r WORKDIR_BASE_TMP="${SRC}/.tmp" # a.k.a. ".tmp" dir. it is a shared base dir for all builds, but each build gets its own WORKDIR/TMPDIR.
|
||||
|
||||
declare -g -r WORKDIR="${WORKDIR_BASE_TMP}/work-${ARMBIAN_BUILD_UUID}" # WORKDIR at this stage. It will become TMPDIR later. It has special significance to `mktemp` and others!
|
||||
declare -g -r LOGDIR="${WORKDIR_BASE_TMP}/logs-${ARMBIAN_BUILD_UUID}" # Will be initialized very soon, literally, below.
|
||||
declare -g -r EXTENSION_MANAGER_TMP_DIR="${WORKDIR_BASE_TMP}/extensions-${ARMBIAN_BUILD_UUID}" # EXTENSION_MANAGER_TMP_DIR used to store extension-composed functions
|
||||
|
||||
# @TODO: These are used only by rootfs/image actual build, move there...
|
||||
declare -g -r SDCARD="${WORKDIR_BASE_TMP}/rootfs-${ARMBIAN_BUILD_UUID}" # SDCARD (which is NOT an sdcard, but will be, maybe, one day) is where we work the rootfs before final imaging. "rootfs" stage.
|
||||
declare -g -r MOUNT="${WORKDIR_BASE_TMP}/mount-${ARMBIAN_BUILD_UUID}" # MOUNT ("mounted on the loop") is the mounted root on final image (via loop). "image" stage
|
||||
declare -g -r DESTIMG="${WORKDIR_BASE_TMP}/image-${ARMBIAN_BUILD_UUID}" # DESTIMG is where the backing image (raw, huge, sparse file) is kept (not the final destination)
|
||||
|
||||
# Make sure ARMBIAN_LOG_CLI_ID is set, and unique, and readonly.
|
||||
# Pre-runs might change it before this, but if not set, default to ARMBIAN_COMMAND.
|
||||
declare -r -g ARMBIAN_LOG_CLI_ID="${ARMBIAN_LOG_CLI_ID:-${ARMBIAN_COMMAND}}"
|
||||
|
||||
# If we're on Linux & root, mount tmpfs on LOGDIR. This has it's own cleanup handler.
|
||||
# It also _creates_ the LOGDIR, and the cleanup handler will delete.
|
||||
prepare_tmpfs_for "LOGDIR" "${LOGDIR}"
|
||||
|
||||
LOG_SECTION="entrypoint" start_logging_section # This will create LOGDIR if it does not exist. @TODO: also maybe causes a spurious group to be created in the log file
|
||||
add_cleanup_handler trap_handler_cleanup_logging # cleanup handler for logs; it rolls it up from LOGDIR into DEST/logs
|
||||
add_cleanup_handler trap_handler_reset_output_owner # make sure output folder is owned by pre-sudo/pre-Docker user if that's the case
|
||||
|
||||
# @TODO: So gigantic contention point here about logging the basic deps installation.
|
||||
if [[ "${ARMBIAN_COMMAND_REQUIRE_BASIC_DEPS}" == "yes" ]]; then
|
||||
if [[ "${OFFLINE_WORK}" == "yes" ]]; then
|
||||
display_alert "* " "You are working offline!"
|
||||
display_alert "* " "Sources, time and host will not be checked"
|
||||
else
|
||||
# check and install the basic utilities;
|
||||
LOG_SECTION="prepare_host_basic" do_with_logging prepare_host_basic # This includes the 'docker' case.
|
||||
fi
|
||||
fi
|
||||
|
||||
# Legacy. We used to source the extension manager here, but now it's included in the library.
|
||||
# @TODO: a quick check on the globals in extensions.sh would get rid of this.
|
||||
extension_manager_declare_globals
|
||||
|
||||
# Loop over the ARMBIAN_CONFIG_FILES array and source each. The order is important.
|
||||
for config_file in "${ARMBIAN_CONFIG_FILES[@]}"; do
|
||||
local config_filename="${config_file##*/}" config_dir="${config_file%/*}"
|
||||
display_alert "Sourcing config file" "${config_filename}" "debug"
|
||||
|
||||
# use pushd/popd to change directory to the config file's directory, so that relative paths in the config file work.
|
||||
pushd "${config_dir}" > /dev/null || exit_with_error "Failed to pushd to ${config_dir}"
|
||||
|
||||
# shellcheck source=/dev/null
|
||||
LOG_SECTION="userpatches_config:${config_filename}" do_with_logging source "${config_file}"
|
||||
|
||||
# reset completely after sourcing config file
|
||||
set -e
|
||||
#set -o pipefail # trace ERR through pipes - will be enabled "soon"
|
||||
#set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled
|
||||
set -o errtrace # trace ERR through - enabled
|
||||
set -o errexit ## set -e : exit the script if any statement returns a non-true return value - enabled
|
||||
|
||||
popd > /dev/null || exit_with_error "Failed to popd from ${config_dir}"
|
||||
|
||||
# Apply the params received from the command line _again_ after running the config.
|
||||
# This ensures that params take precedence over stuff possibly defined in the config.
|
||||
apply_cmdline_params_to_env "after config '${config_filename}'" # which uses ARMBIAN_PARSED_CMDLINE_PARAMS
|
||||
done
|
||||
|
||||
# Early check for deprecations
|
||||
error_if_lib_tag_set # make sure users are not thrown off by using old parameter which does nothing anymore; explain
|
||||
|
||||
display_alert "Executing final CLI command" "${ARMBIAN_COMMAND}" "debug"
|
||||
armbian_cli_run_command
|
||||
display_alert "Done Executing final CLI command" "${ARMBIAN_COMMAND}" "debug"
|
||||
|
||||
# Build done, run the cleanup handlers explicitly.
|
||||
# This zeroes out the list of cleanups, so it"s not done again when the main script exits normally and trap = 0 runs.
|
||||
run_cleanup_handlers
|
||||
}
|
||||
280
lib/functions/cli/utils-cli.sh
Normal file
280
lib/functions/cli/utils-cli.sh
Normal file
@@ -0,0 +1,280 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
|
||||
#
|
||||
# This file is a part of the Armbian Build Framework
|
||||
# https://github.com/armbian/build/
|
||||
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This is called like this:
|
||||
# declare -A -g ARMBIAN_PARSED_CMDLINE_PARAMS=()
|
||||
# declare -a -g ARMBIAN_NON_PARAM_ARGS=()
|
||||
# parse_cmdline_params "${@}" # which fills the vars above, being global.
|
||||
function parse_cmdline_params() {
|
||||
declare -A -g ARMBIAN_PARSED_CMDLINE_PARAMS=()
|
||||
declare -a -g ARMBIAN_NON_PARAM_ARGS=()
|
||||
|
||||
# loop over the arguments parse them out
|
||||
local arg
|
||||
for arg in "${@}"; do
|
||||
if [[ "${arg}" == *=* ]]; then # contains an equal sign. it's a param.
|
||||
local param_name param_value param_value_desc
|
||||
param_name=${arg%%=*}
|
||||
param_value=${arg##*=}
|
||||
param_value_desc="${param_value:-(empty)}"
|
||||
# Sanity check for the param name; it must be a valid bash variable name.
|
||||
if [[ "${param_name}" =~ ^[a-zA-Z_][a-zA-Z0-9_]*$ ]]; then
|
||||
ARMBIAN_PARSED_CMDLINE_PARAMS["${param_name}"]="${param_value}" # For current run.
|
||||
ARMBIAN_CLI_RELAUNCH_PARAMS["${param_name}"]="${param_value}" # For relaunch.
|
||||
display_alert "Command line: parsed parameter '$param_name' to" "${param_value_desc}" "debug"
|
||||
else
|
||||
exit_with_error "Invalid cmdline param '${param_name}=${param_value_desc}'"
|
||||
fi
|
||||
elif [[ "x${arg}x" != "xx" ]]; then # not a param, not empty, store it in the non-param array for later usage
|
||||
local non_param_value="${arg}"
|
||||
local non_param_value_desc="${non_param_value:-(empty)}"
|
||||
display_alert "Command line: storing non-param argument" "${non_param_value_desc}" "debug"
|
||||
ARMBIAN_NON_PARAM_ARGS+=("${non_param_value}")
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# This can be called early on, or later after having sourced the config. Show what is happening.
|
||||
# This is called:
|
||||
# apply_cmdline_params_to_env "reason" # reads from global ARMBIAN_PARSED_CMDLINE_PARAMS
|
||||
function apply_cmdline_params_to_env() {
|
||||
declare -A -g ARMBIAN_PARSED_CMDLINE_PARAMS # Hopefully this has values
|
||||
declare __my_reason="${1}"
|
||||
shift
|
||||
|
||||
# Loop over the dictionary and apply the values to the environment.
|
||||
for param_name in "${!ARMBIAN_PARSED_CMDLINE_PARAMS[@]}"; do
|
||||
local param_value param_value_desc current_env_value
|
||||
# get the current value from the environment
|
||||
current_env_value="${!param_name}"
|
||||
current_env_value_desc="${!param_name-(unset)}"
|
||||
current_env_value_desc="${current_env_value_desc:-(empty)}"
|
||||
# get the new value from the dictionary
|
||||
param_value="${ARMBIAN_PARSED_CMDLINE_PARAMS[${param_name}]}"
|
||||
param_value_desc="${param_value:-(empty)}"
|
||||
|
||||
# Compare, log, and apply.
|
||||
if [[ -z "${!param_name+x}" ]] || [[ "${current_env_value}" != "${param_value}" ]]; then
|
||||
display_alert "Applying cmdline param" "'$param_name': '${current_env_value_desc}' --> '${param_value_desc}' ${__my_reason}" "cmdline"
|
||||
# use `declare -g` to make it global, we're in a function.
|
||||
eval "declare -g $param_name=\"$param_value\""
|
||||
else
|
||||
# rpardini: strategic amount of spacing in log files show the kinda neuroticism that drives me.
|
||||
display_alert "Skip cmdline param" "'$param_name': already set to '${param_value_desc}' ${__my_reason}" "info"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function armbian_prepare_cli_command_to_run() {
|
||||
local command_id="${1}"
|
||||
display_alert "Preparing to run command" "${command_id}" "debug"
|
||||
ARMBIAN_COMMAND="${command_id}"
|
||||
ARMBIAN_COMMAND_HANDLER="${ARMBIAN_COMMANDS_TO_HANDLERS_DICT[${command_id}]}"
|
||||
ARMBIAN_COMMAND_VARS="${ARMBIAN_COMMANDS_TO_VARS_DICT[${command_id}]}"
|
||||
# @TODO: actually set the vars...
|
||||
|
||||
local set_vars_for_command=""
|
||||
if [[ "x${ARMBIAN_COMMAND_VARS}x" != "xx" ]]; then
|
||||
# Loop over them, expanding...
|
||||
for var_piece in ${ARMBIAN_COMMAND_VARS}; do
|
||||
local var_decl="declare -g ${var_piece};"
|
||||
display_alert "Command handler: setting variable" "${var_decl}" "debug"
|
||||
set_vars_for_command+=" ${var_decl}"
|
||||
done
|
||||
fi
|
||||
|
||||
local pre_run_function_name="cli_${ARMBIAN_COMMAND_HANDLER}_pre_run"
|
||||
local run_function_name="cli_${ARMBIAN_COMMAND_HANDLER}_run"
|
||||
|
||||
# Reset the functions.
|
||||
function armbian_cli_pre_run_command() {
|
||||
display_alert "No pre-run function for command" "${ARMBIAN_COMMAND}" "warn"
|
||||
}
|
||||
function armbian_cli_run_command() {
|
||||
display_alert "No run function for command" "${ARMBIAN_COMMAND}" "warn"
|
||||
}
|
||||
|
||||
# Materialize functions to call that specific command.
|
||||
if [[ $(type -t "${pre_run_function_name}" || true) == function ]]; then
|
||||
eval "$(
|
||||
cat <<- EOF
|
||||
display_alert "Setting up pre-run function for command" "${ARMBIAN_COMMAND}: ${pre_run_function_name}" "debug"
|
||||
function armbian_cli_pre_run_command() {
|
||||
# Set the variables defined in ARMBIAN_COMMAND_VARS
|
||||
${set_vars_for_command}
|
||||
display_alert "Calling pre-run function for command" "${ARMBIAN_COMMAND}: ${pre_run_function_name}" "debug"
|
||||
${pre_run_function_name}
|
||||
}
|
||||
EOF
|
||||
)"
|
||||
fi
|
||||
|
||||
if [[ $(type -t "${run_function_name}" || true) == function ]]; then
|
||||
eval "$(
|
||||
cat <<- EOF
|
||||
display_alert "Setting up run function for command" "${ARMBIAN_COMMAND}: ${run_function_name}" "debug"
|
||||
function armbian_cli_run_command() {
|
||||
# Set the variables defined in ARMBIAN_COMMAND_VARS
|
||||
${set_vars_for_command}
|
||||
display_alert "Calling run function for command" "${ARMBIAN_COMMAND}: ${run_function_name}" "debug"
|
||||
${run_function_name}
|
||||
}
|
||||
EOF
|
||||
)"
|
||||
fi
|
||||
}
|
||||
|
||||
function parse_each_cmdline_arg_as_command_param_or_config() {
|
||||
local is_command="no" is_config="no" command_handler conf_path conf_sh_path config_file=""
|
||||
local argument="${1}"
|
||||
|
||||
# lookup if it is a command.
|
||||
if [[ -n "${ARMBIAN_COMMANDS_TO_HANDLERS_DICT[${argument}]}" ]]; then
|
||||
is_command="yes"
|
||||
command_handler="${ARMBIAN_COMMANDS_TO_HANDLERS_DICT[${argument}]}"
|
||||
display_alert "Found command!" "${argument} is handled by '${command_handler}'" "debug"
|
||||
fi
|
||||
|
||||
# see if we can find config file in userpatches. can be either config-${argument}.conf or config-${argument}.conf.sh
|
||||
conf_path="${SRC}/userpatches/config-${argument}.conf"
|
||||
conf_sh_path="${SRC}/userpatches/config-${argument}.conf.sh"
|
||||
|
||||
# early safety net: immediately bomb if we find both forms of config. it's too confusing. choose one.
|
||||
if [[ -f ${conf_path} && -f ${conf_sh_path} ]]; then
|
||||
exit_with_error "Found both config-${argument}.conf and config-${argument}.conf.sh in userpatches. Please remove one."
|
||||
exit 1
|
||||
elif [[ -f ${conf_sh_path} ]]; then
|
||||
config_file="${conf_sh_path}"
|
||||
is_config="yes"
|
||||
elif [[ -f ${conf_path} ]]; then
|
||||
config_file="${conf_path}"
|
||||
is_config="yes"
|
||||
fi
|
||||
|
||||
# Sanity check. If we have both a command and a config, bomb.
|
||||
if [[ "${is_command}" == "yes" && "${is_config}" == "yes" ]]; then
|
||||
exit_with_error "You cannot have a configuration file named '${config_file}'. '${argument}' is a command name and is reserved for internal Armbian usage. Sorry. Please rename your config file and pass its name it an argument, and I'll use it. PS: You don't need a config file for 'docker' anymore, Docker is all managed by Armbian now."
|
||||
elif [[ "${is_config}" == "yes" ]]; then # we have a config only
|
||||
display_alert "Adding config file to list" "${config_file}" "debug"
|
||||
ARMBIAN_CONFIG_FILES+=("${config_file}") # full path to be sourced
|
||||
ARMBIAN_CLI_RELAUNCH_CONFIGS+=("${argument}") # name reference to be relaunched
|
||||
elif [[ "${is_command}" == "yes" ]]; then # we have a command, only.
|
||||
# sanity check. we can't have more than one command. decide!
|
||||
if [[ -n "${ARMBIAN_COMMAND}" ]]; then
|
||||
exit_with_error "You cannot specify more than one command. You have '${ARMBIAN_COMMAND}' and '${argument}'. Please decide which one you want to run and pass only that one."
|
||||
exit 1
|
||||
fi
|
||||
ARMBIAN_COMMAND="${argument}" # too early for armbian_prepare_cli_command_to_run "${argument}"
|
||||
else
|
||||
# We've an unknown argument. Alert now, bomb later.
|
||||
ARMBIAN_HAS_UNKNOWN_ARG="yes"
|
||||
display_alert "Unknown argument" "${argument}" "err"
|
||||
fi
|
||||
}
|
||||
|
||||
# Produce relaunch parameters. Add the running configs, arguments, and command.
|
||||
# Declare and use ARMBIAN_CLI_FINAL_RELAUNCH_ARGS as "${ARMBIAN_CLI_FINAL_RELAUNCH_ARGS[@]}"
|
||||
# Also ARMBIAN_CLI_FINAL_RELAUNCH_ENVS as "${ARMBIAN_CLI_FINAL_RELAUNCH_ENVS[@]}"
|
||||
function produce_relaunch_parameters() {
|
||||
declare -g -a ARMBIAN_CLI_FINAL_RELAUNCH_ARGS=()
|
||||
declare -g -a ARMBIAN_CLI_FINAL_RELAUNCH_ENVS=()
|
||||
|
||||
declare hide_repeat_params=()
|
||||
|
||||
# add the running parameters from ARMBIAN_CLI_RELAUNCH_PARAMS dict
|
||||
for param in "${!ARMBIAN_CLI_RELAUNCH_PARAMS[@]}"; do
|
||||
ARMBIAN_CLI_FINAL_RELAUNCH_ARGS+=("${param}=${ARMBIAN_CLI_RELAUNCH_PARAMS[${param}]}")
|
||||
# If the param is not a key of ARMBIAN_PARSED_CMDLINE_PARAMS (eg was added for re-launching), add it to the hide list
|
||||
if [[ -z "${ARMBIAN_PARSED_CMDLINE_PARAMS[${param}]}" ]]; then
|
||||
hide_repeat_params+=("${param}")
|
||||
fi
|
||||
done
|
||||
# add the running configs
|
||||
for config in "${ARMBIAN_CLI_RELAUNCH_CONFIGS[@]}"; do
|
||||
ARMBIAN_CLI_FINAL_RELAUNCH_ARGS+=("${config}")
|
||||
done
|
||||
# add the command; defaults to the last command, but can be changed by the last pre-run.
|
||||
if [[ -n "${ARMBIAN_CLI_RELAUNCH_COMMAND}" ]]; then
|
||||
ARMBIAN_CLI_FINAL_RELAUNCH_ARGS+=("${ARMBIAN_CLI_RELAUNCH_COMMAND}")
|
||||
else
|
||||
ARMBIAN_CLI_FINAL_RELAUNCH_ARGS+=("${ARMBIAN_COMMAND}")
|
||||
fi
|
||||
|
||||
# These two envs are always included.
|
||||
ARMBIAN_CLI_FINAL_RELAUNCH_ENVS+=("ARMBIAN_ORIGINAL_BUILD_UUID=${ARMBIAN_BUILD_UUID}")
|
||||
ARMBIAN_CLI_FINAL_RELAUNCH_ENVS+=("ARMBIAN_HIDE_REPEAT_PARAMS=${hide_repeat_params[*]}")
|
||||
|
||||
# Add all values from ARMBIAN_CLI_RELAUNCH_ENVS dict
|
||||
for env in "${!ARMBIAN_CLI_RELAUNCH_ENVS[@]}"; do
|
||||
ARMBIAN_CLI_FINAL_RELAUNCH_ENVS+=("${env}=${ARMBIAN_CLI_RELAUNCH_ENVS[${env}]}")
|
||||
done
|
||||
|
||||
display_alert "Produced relaunch args:" "ARMBIAN_CLI_FINAL_RELAUNCH_ARGS: ${ARMBIAN_CLI_FINAL_RELAUNCH_ARGS[*]}" "debug"
|
||||
display_alert "Produced relaunch envs:" "ARMBIAN_CLI_FINAL_RELAUNCH_ENVS: ${ARMBIAN_CLI_FINAL_RELAUNCH_ENVS[*]}" "debug"
|
||||
}
|
||||
|
||||
function cli_standard_relaunch_docker_or_sudo() {
|
||||
display_alert "Gonna relaunch" "EUID: ${EUID} -- PREFER_DOCKER:${PREFER_DOCKER}" "debug"
|
||||
if [[ "${EUID}" == "0" ]]; then # we're already root. Either running as real root, or already sudo'ed.
|
||||
if [[ "${ARMBIAN_RELAUNCHED}" != "yes" && "${ALLOW_ROOT}" != "yes" ]]; then
|
||||
display_alert "PROBLEM: don't run ./compile.sh as root or with sudo" "PROBLEM: don't run ./compile.sh as root or with sudo" "err"
|
||||
if [[ -t 0 ]]; then # so... non-interactive builds *can* run as root. It's not supported, you'll get an error, but we'll proceed.
|
||||
exit_if_countdown_not_aborted 10 "directly called as root"
|
||||
fi
|
||||
fi
|
||||
display_alert "Already running as root" "great, running '${ARMBIAN_COMMAND}' normally" "debug"
|
||||
else # not root.
|
||||
# add params when relaunched under docker or sudo
|
||||
ARMBIAN_CLI_RELAUNCH_PARAMS+=(["SET_OWNER_TO_UID"]="${EUID}") # Pass the current UID to any further relaunchings
|
||||
ARMBIAN_CLI_RELAUNCH_PARAMS+=(["PREFER_DOCKER"]="no") # make sure we don't loop forever when relaunching.
|
||||
|
||||
# We've a few options.
|
||||
# 1) We could check if Docker is working, and do everything under Docker. Users who can use Docker, can "become" root inside a container.
|
||||
# 2) We could ask for sudo (which _might_ require a password)...
|
||||
# @TODO: GitHub actions can do both. Sudo without password _and_ Docker; should we prefer Docker? Might have unintended consequences...
|
||||
|
||||
get_docker_info_once # Get Docker info once, and cache it; calling "docker info" is expensive
|
||||
|
||||
if [[ "${DOCKER_INFO_OK}" == "yes" ]]; then
|
||||
if [[ "${PREFER_DOCKER:-yes}" == "yes" ]]; then
|
||||
display_alert "not root, but Docker is ready to go" "delegating to Docker" "debug"
|
||||
ARMBIAN_CHANGE_COMMAND_TO="docker"
|
||||
ARMBIAN_CLI_RELAUNCH_COMMAND="${ARMBIAN_COMMAND}" # add params when relaunched under docker
|
||||
return 0
|
||||
else
|
||||
display_alert "not root, but Docker is ready to go" "but PREFER_DOCKER is set to 'no', so can't use it" "warn"
|
||||
fi
|
||||
else
|
||||
if [[ "${DOCKER_IN_PATH:-no}" == "yes" ]]; then
|
||||
if [[ "${PREFER_DOCKER:-yes}" == "no" ]]; then
|
||||
: # congrats, don't have it, didn't wanna it.
|
||||
else
|
||||
display_alert "Docker is installed, but not usable" "can't use Docker; check your Docker config / groups / etc" "warn"
|
||||
exit_if_countdown_not_aborted 10 "Docker installed but not usable"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# check if we're on Linux via uname. if not, refuse to do anything.
|
||||
if [[ "$(uname)" != "Linux" ]]; then
|
||||
display_alert "Not running on Linux; Docker is not available" "refusing to run" "err"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
display_alert "This script requires root privileges; Docker is unavailable" "trying to use sudo" "wrn"
|
||||
declare -g ARMBIAN_CLI_FINAL_RELAUNCH_ARGS=()
|
||||
declare -g ARMBIAN_CLI_FINAL_RELAUNCH_ENVS=()
|
||||
produce_relaunch_parameters # produces ARMBIAN_CLI_FINAL_RELAUNCH_ARGS and ARMBIAN_CLI_FINAL_RELAUNCH_ENVS
|
||||
# shellcheck disable=SC2093 # re-launching under sudo: replace the current shell, and never return.
|
||||
exec sudo --preserve-env "${ARMBIAN_CLI_FINAL_RELAUNCH_ENVS[@]}" bash "${SRC}/compile.sh" "${ARMBIAN_CLI_FINAL_RELAUNCH_ARGS[@]}" # MARK: relaunch done here!
|
||||
display_alert "AFTER SUDO!!!" "AFTER SUDO!!!" "warn" # This should _never_ happen
|
||||
fi
|
||||
}
|
||||
Reference in New Issue
Block a user