Merge "Revert "Build: Fix test data no present in test suite zip files.""
diff --git a/CleanSpec.mk b/CleanSpec.mk
index cbfca3e..284ef39 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -614,6 +614,10 @@
# Clean up old location of system_other.avbpubkey
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/etc/security/avb/)
+# Clean up bufferhub files
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/bin/hw/android.frameworks.bufferhub@1.0-service)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/etc/init/android.frameworks.bufferhub@1.0-service.rc)
+
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/super.img)
$(call add-clean-step, find $(PRODUCT_OUT) -type f -name "generated_*_image_info.txt" -print0 | xargs -0 rm -f)
@@ -635,9 +639,33 @@
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib*/libc_malloc*)
+# Clean up old location of soft OMX plugins
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib*/libstagefright_soft*)
+
# Move odm build.prop to /odm/etc/.
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/odm/build.prop)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/vendor/odm/build.prop)
+
+# Move product and system_ext to root for emulators
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/generic*/*/product)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/generic*/*/system_ext)
+
+# link_type and jni_link_type files are no longer needed
+$(call add-clean-step, find $(OUT_DIR) -type f -name "*link_type" -print0 | xargs -0 rm -f)
+
+# import_includes and export_includes files are no longer needed
+$(call add-clean-step, find $(OUT_DIR) -type f -name "import_includes" -o -name "export_includes" -print0 | xargs -0 rm -f)
+
+# Recreate product and system_ext partitions for emulator
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/generic*/*product*)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/generic*/*system_ext*)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/generic*/*/product)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/generic*/*/system_ext)
+
+# Move GSI-specific files from /system to /system/system_ext
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/etc/init/init.gsi.rc)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/etc/init/config/)
+
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
# ************************************************
diff --git a/Usage.txt b/Usage.txt
index 558329b..ea4788a 100644
--- a/Usage.txt
+++ b/Usage.txt
@@ -26,12 +26,6 @@
If no targets are specified, the build system will build the images
for the configured product and variant.
- An alternative to setting $TARGET_PRODUCT and $TARGET_BUILD_VARIANT,
- which you may see in build servers, is to execute:
-
- m PRODUCT-<product>-<variant>
-
-
A target may be a file path. For example, out/host/linux-x86/bin/adb .
Note that when giving a relative file path as a target, that path is
interpreted relative to the root of the source tree (rather than relative
diff --git a/common/core.mk b/common/core.mk
index e5264b0..7d505c0 100644
--- a/common/core.mk
+++ b/common/core.mk
@@ -42,6 +42,9 @@
backslash := \a
backslash := $(patsubst %a,%,$(backslash))
+TOP :=$= .
+TOPDIR :=$=
+
# Prevent accidentally changing these variables
.KATI_READONLY := SHELL empty space comma newline pound backslash
diff --git a/core/Makefile b/core/Makefile
index 838a96c..6c90b51 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -502,6 +502,7 @@
$(hide) echo ro.vendor.product.cpu.abilist64="$(TARGET_CPU_ABI_LIST_64_BIT)">>$@
$(hide) echo ro.product.board="$(TARGET_BOOTLOADER_BOARD_NAME)">>$@
$(hide) echo ro.board.platform="$(TARGET_BOARD_PLATFORM)">>$@
+ $(hide) echo ro.hwui.use_vulkan="$(TARGET_USES_VULKAN)">>$@
ifdef TARGET_SCREEN_DENSITY
$(hide) echo ro.sf.lcd_density="$(TARGET_SCREEN_DENSITY)">>$@
endif
@@ -592,33 +593,42 @@
$(hide) $(POST_PROCESS_PROPS) $@
# -----------------------------------------------------------------
-# product_services build.prop (unless it's merged into /product)
-ifdef MERGE_PRODUCT_SERVICES_INTO_PRODUCT
- ifneq (,$(PRODUCT_PRODUCT_SERVICES_PROPERTIES))
- $(error PRODUCT_PRODUCT_SERVICES_PROPERTIES is not supported in this build.)
- endif
-else
-INSTALLED_PRODUCT_SERVICES_BUILD_PROP_TARGET := $(TARGET_OUT_PRODUCT_SERVICES)/build.prop
-ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_PRODUCT_SERVICES_BUILD_PROP_TARGET)
+# system_ext build.prop
+INSTALLED_SYSTEM_EXT_BUILD_PROP_TARGET := $(TARGET_OUT_SYSTEM_EXT)/build.prop
+ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_SYSTEM_EXT_BUILD_PROP_TARGET)
-FINAL_PRODUCT_SERVICES_PROPERTIES += \
- $(call collapse-pairs, $(PRODUCT_PRODUCT_SERVICES_PROPERTIES))
-FINAL_PRODUCT_SERVICES_PROPERTIES := $(call uniq-pairs-by-first-component, \
- $(FINAL_PRODUCT_SERVICES_PROPERTIES),=)
-$(INSTALLED_PRODUCT_SERVICES_BUILD_PROP_TARGET): $(BUILDINFO_COMMON_SH) $(POST_PROCESS_PROPS)
- @echo Target product_services buildinfo: $@
+ifdef TARGET_SYSTEM_EXT_PROP
+system_ext_prop_files := $(TARGET_SYSTEM_EXT_PROP)
+else
+system_ext_prop_files := $(wildcard $(TARGET_DEVICE_DIR)/system_ext.prop)
+endif
+
+FINAL_SYSTEM_EXT_PROPERTIES += \
+ $(call collapse-pairs, $(PRODUCT_SYSTEM_EXT_PROPERTIES))
+FINAL_SYSTEM_EXT_PROPERTIES := $(call uniq-pairs-by-first-component, \
+ $(FINAL_SYSTEM_EXT_PROPERTIES),=)
+
+$(INSTALLED_SYSTEM_EXT_BUILD_PROP_TARGET): $(BUILDINFO_COMMON_SH) $(POST_PROCESS_PROPS) $(system_ext_prop_files)
+ @echo Target system_ext buildinfo: $@
@mkdir -p $(dir $@)
$(hide) echo > $@
-ifdef BOARD_USES_PRODUCT_SERVICESIMAGE
- $(hide) $(call generate-common-build-props,product_services,$@)
-endif # BOARD_USES_PRODUCT_SERVICESIMAGE
+ $(hide) $(call generate-common-build-props,system_ext,$@)
+ $(hide) $(foreach file,$(system_ext_prop_files), \
+ if [ -f "$(file)" ]; then \
+ echo Target system_ext properties from: "$(file)"; \
+ echo "" >> $@; \
+ echo "#" >> $@; \
+ echo "# from $(file)" >> $@; \
+ echo "#" >> $@; \
+ cat $(file) >> $@; \
+ echo "# end of $(file)" >> $@; \
+ fi;)
$(hide) echo "#" >> $@; \
- echo "# ADDITIONAL PRODUCT_SERVICES PROPERTIES" >> $@; \
+ echo "# ADDITIONAL SYSTEM_EXT BUILD PROPERTIES" >> $@; \
echo "#" >> $@;
- $(hide) $(foreach line,$(FINAL_PRODUCT_SERVICES_PROPERTIES), \
+ $(hide) $(foreach line,$(FINAL_SYSTEM_EXT_PROPERTIES), \
echo "$(line)" >> $@;)
$(hide) $(POST_PROCESS_PROPS) $@
-endif # MERGE_PRODUCT_SERVICES_INTO_PRODUCT
# ----------------------------------------------------------------
@@ -666,6 +676,7 @@
$(eval $(call build-image-kernel-modules-depmod,$(1),$(3),$(4),$(5),$(6))) \
$(4)/$(DEPMOD_STAGING_SUBDIR)/modules.dep:$(2)/lib/modules/modules.dep \
$(4)/$(DEPMOD_STAGING_SUBDIR)/modules.alias:$(2)/lib/modules/modules.alias \
+ $(4)/$(DEPMOD_STAGING_SUBDIR)/modules.softdep:$(2)/lib/modules/modules.softdep \
$(4)/$(DEPMOD_STAGING_SUBDIR)/$(6):$(2)/lib/modules/$(6)
endef
@@ -675,7 +686,7 @@
# $(4): module load list
# $(5): module load list filename
define build-image-kernel-modules-depmod
-$(3)/$(DEPMOD_STAGING_SUBDIR)/modules.dep: .KATI_IMPLICIT_OUTPUTS := $(3)/$(DEPMOD_STAGING_SUBDIR)/modules.alias $(3)/$(DEPMOD_STAGING_SUBDIR)/$(5)
+$(3)/$(DEPMOD_STAGING_SUBDIR)/modules.dep: .KATI_IMPLICIT_OUTPUTS := $(3)/$(DEPMOD_STAGING_SUBDIR)/modules.alias $(3)/$(DEPMOD_STAGING_SUBDIR)/modules.softdep $(3)/$(DEPMOD_STAGING_SUBDIR)/$(5)
$(3)/$(DEPMOD_STAGING_SUBDIR)/modules.dep: $(DEPMOD)
$(3)/$(DEPMOD_STAGING_SUBDIR)/modules.dep: PRIVATE_MODULES := $(1)
$(3)/$(DEPMOD_STAGING_SUBDIR)/modules.dep: PRIVATE_MOUNT_POINT := $(2)
@@ -716,32 +727,40 @@
(for MODULE in $$(PRIVATE_LOAD_MODULES); do basename $$$$MODULE >> $$@; done)
endef
+# Until support for a vendor-boot/vendor-ramdisk is added, store vendor ramdisk
+# kernel modules on the generic ramdisk as a stopgap.
+ifneq ($(BOARD_VENDOR_RAMDISK_KERNEL_MODULES),)
+ BOARD_GENERIC_RAMDISK_KERNEL_MODULES += $(BOARD_VENDOR_RAMDISK_KERNEL_MODULES)
+endif
+ifneq ($(BOARD_VENDOR_RAMDISK_KERNEL_MODULES_LOAD),)
+ BOARD_GENERIC_RAMDISK_KERNEL_MODULES_LOAD += $(BOARD_VENDOR_RAMDISK_KERNEL_MODULES_LOAD)
+endif
ifeq ($(BOARD_RECOVERY_KERNEL_MODULES_LOAD),)
BOARD_RECOVERY_KERNEL_MODULES_LOAD := $(BOARD_RECOVERY_KERNEL_MODULES)
endif
-ifeq ($(BOARD_RAMDISK_KERNEL_MODULES_LOAD),)
- BOARD_RAMDISK_KERNEL_MODULES_LOAD := $(BOARD_RAMDISK_KERNEL_MODULES)
+ifeq ($(BOARD_GENERIC_RAMDISK_KERNEL_MODULES_LOAD),)
+ BOARD_GENERIC_RAMDISK_KERNEL_MODULES_LOAD := $(BOARD_GENERIC_RAMDISK_KERNEL_MODULES)
endif
-ifdef BOARD_RAMDISK_KERNEL_MODULES
+ifdef BOARD_GENERIC_RAMDISK_KERNEL_MODULES
ifeq ($(BOARD_USES_RECOVERY_AS_BOOT), true)
- BOARD_RECOVERY_KERNEL_MODULES += $(BOARD_RAMDISK_KERNEL_MODULES)
+ BOARD_RECOVERY_KERNEL_MODULES += $(BOARD_GENERIC_RAMDISK_KERNEL_MODULES)
endif
endif
ifdef BOARD_RECOVERY_KERNEL_MODULES
ifeq ($(BOARD_USES_RECOVERY_AS_BOOT), true)
- ifdef BOARD_RAMDISK_KERNEL_MODULES_LOAD
- ALL_DEFAULT_INSTALLED_MODULES += $(call copy-many-files,$(call module-load-list-copy-paths,$(call intermediates-dir-for,PACKAGING,ramdisk_modules),$(BOARD_RAMDISK_KERNEL_MODULES_LOAD),modules.load,$(TARGET_RECOVERY_ROOT_OUT)))
+ ifdef BOARD_GENERIC_RAMDISK_KERNEL_MODULES_LOAD
+ ALL_DEFAULT_INSTALLED_MODULES += $(call copy-many-files,$(call module-load-list-copy-paths,$(call intermediates-dir-for,PACKAGING,ramdisk_modules),$(BOARD_GENERIC_RAMDISK_KERNEL_MODULES_LOAD),modules.load,$(TARGET_RECOVERY_ROOT_OUT)))
endif
endif
ALL_DEFAULT_INSTALLED_MODULES += $(call copy-many-files,$(call build-image-kernel-modules,$(BOARD_RECOVERY_KERNEL_MODULES),$(TARGET_RECOVERY_ROOT_OUT),,$(call intermediates-dir-for,PACKAGING,depmod_recovery),$(BOARD_RECOVERY_KERNEL_MODULES_LOAD),modules.load.recovery))
endif
ifneq ($(BOARD_USES_RECOVERY_AS_BOOT), true)
- ifdef BOARD_RAMDISK_KERNEL_MODULES
- ALL_DEFAULT_INSTALLED_MODULES += $(call copy-many-files,$(call build-image-kernel-modules,$(BOARD_RAMDISK_KERNEL_MODULES),$(TARGET_RAMDISK_OUT),,$(call intermediates-dir-for,PACKAGING,depmod_ramdisk),$(BOARD_RAMDISK_KERNEL_MODULES_LOAD),modules.load))
+ ifdef BOARD_GENERIC_RAMDISK_KERNEL_MODULES
+ ALL_DEFAULT_INSTALLED_MODULES += $(call copy-many-files,$(call build-image-kernel-modules,$(BOARD_GENERIC_RAMDISK_KERNEL_MODULES),$(TARGET_RAMDISK_OUT),,$(call intermediates-dir-for,PACKAGING,depmod_ramdisk),$(BOARD_GENERIC_RAMDISK_KERNEL_MODULES_LOAD),modules.load))
endif
endif
@@ -760,30 +779,6 @@
endif
# -----------------------------------------------------------------
-# package stats
-ifdef BUILDING_SYSTEM_IMAGE
-
-PACKAGE_STATS_FILE := $(PRODUCT_OUT)/package-stats.txt
-PACKAGES_TO_STAT := \
- $(sort $(filter $(TARGET_OUT)/% $(TARGET_OUT_DATA)/%, \
- $(filter %.jar %.apk, $(ALL_DEFAULT_INSTALLED_MODULES))))
-$(PACKAGE_STATS_FILE): $(PACKAGES_TO_STAT)
- @echo Package stats: $@
- @mkdir -p $(dir $@)
- $(hide) rm -f $@
-ifeq ($(PACKAGES_TO_STAT),)
-# Create empty package stats file if target builds no jar(s) or apk(s).
- $(hide) touch $@
-else
- $(hide) build/make/tools/dump-package-stats $^ > $@
-endif
-
-.PHONY: package-stats
-package-stats: $(PACKAGE_STATS_FILE)
-
-endif # BUILDING_SYSTEM_IMAGE
-
-# -----------------------------------------------------------------
# Cert-to-package mapping. Used by the post-build signing tools.
# Use a macro to add newline to each echo command
define _apkcerts_write_line
@@ -1170,7 +1165,8 @@
.PHONY: notice_files
# Create the rule to combine the files into text and html/xml forms
-# $(1) - xml_excluded_vendor_product|xml_vendor|xml_product|html
+# $(1) - xml_excluded_system_product_odm|xml_excluded_vendor_product_odm
+# xml_product|xml_odm|xml_system_ext|xml_system|html
# $(2) - Plain text output file
# $(3) - HTML/XML output file
# $(4) - File title
@@ -1195,11 +1191,13 @@
$(2) : $(3)
$(3) : $(6) $(BUILD_SYSTEM)/Makefile build/make/tools/generate-notice-files.py
build/make/tools/generate-notice-files.py --text-output $(2) \
- $(if $(filter $(1),xml_excluded_extra_partitions),-e vendor -e product -e product_services --xml-output, \
- $(if $(filter $(1),xml_vendor),-i vendor --xml-output, \
+ $(if $(filter $(1),xml_excluded_vendor_product_odm),-e vendor -e product -e system_ext -e odm --xml-output, \
+ $(if $(filter $(1),xml_excluded_system_product_odm),-e system -e product -e system_ext -e odm --xml-output, \
$(if $(filter $(1),xml_product),-i product --xml-output, \
- $(if $(filter $(1),xml_product_services),-i product_services --xml-output, \
- --html-output)))) $(3) \
+ $(if $(filter $(1),xml_system_ext),-i system_ext --xml-output, \
+ $(if $(filter $(1),xml_system),-i system --xml-output, \
+ $(if $(filter $(1),xml_odm),-i odm --xml-output, \
+ --html-output)))))) $(3) \
-t $$(PRIVATE_MESSAGE) -s $$(PRIVATE_DIR)/src
notice_files: $(2) $(3)
endef
@@ -1247,10 +1245,15 @@
target_product_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE_PRODUCT.xml.gz
installed_product_notice_xml_gz := $(TARGET_OUT_PRODUCT)/etc/NOTICE.xml.gz
-target_product_services_notice_file_txt := $(TARGET_OUT_INTERMEDIATES)/NOTICE_PRODUCT_SERVICES.txt
-target_product_services_notice_file_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE_PRODUCT_SERVICES.xml
-target_product_services_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE_PRODUCT_SERVICES.xml.gz
-installed_product_services_notice_xml_gz := $(TARGET_OUT_PRODUCT_SERVICES)/etc/NOTICE.xml.gz
+target_system_ext_notice_file_txt := $(TARGET_OUT_INTERMEDIATES)/NOTICE_SYSTEM_EXT.txt
+target_system_ext_notice_file_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE_SYSTEM_EXT.xml
+target_system_ext_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE_SYSTEM_EXT.xml.gz
+installed_system_ext_notice_xml_gz := $(TARGET_OUT_SYSTEM_EXT)/etc/NOTICE.xml.gz
+
+target_odm_notice_file_txt := $(TARGET_OUT_INTERMEDIATES)/NOTICE_ODM.txt
+target_odm_notice_file_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE_ODM.xml
+target_odm_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE_ODM.xml.gz
+installed_odm_notice_xml_gz := $(TARGET_OUT_ODM)/etc/NOTICE.xml.gz
# Notice files are copied to TARGET_OUT_NOTICE_FILES as a side-effect of their module
# being built. A notice xml file must depend on all modules that could potentially
@@ -1260,22 +1263,40 @@
license_modules := $(filter-out $(TARGET_OUT_FAKE)/%,$(license_modules))
# testcases are not relevant to the system image.
license_modules := $(filter-out $(TARGET_OUT_TESTCASES)/%,$(license_modules))
+license_modules_system := $(filter $(TARGET_OUT)/%,$(license_modules))
license_modules_vendor := $(filter $(TARGET_OUT_VENDOR)/%,$(license_modules))
license_modules_product := $(filter $(TARGET_OUT_PRODUCT)/%,$(license_modules))
-license_modules_product_services := $(filter $(TARGET_OUT_PRODUCT_SERVICES)/%,$(license_modules))
-license_modules_agg := $(license_modules_vendor) $(license_modules_product) $(license_modules_product_services)
+license_modules_system_ext := $(filter $(TARGET_OUT_SYSTEM_EXT)/%,$(license_modules))
+license_modules_odm := $(filter $(TARGET_OUT_ODM)/%,$(license_modules))
+license_modules_agg := $(license_modules_system) \
+ $(license_modules_vendor) \
+ $(license_modules_product) \
+ $(license_modules_system_ext) \
+ $(license_modules_odm)
license_modules_rest := $(filter-out $(license_modules_agg),$(license_modules))
-$(eval $(call combine-notice-files, xml_excluded_extra_partitions, \
+# If we are building in a configuration that includes a prebuilt vendor.img, we can't
+# update its notice file, so include those notices in the system partition instead
+ifdef BOARD_PREBUILT_VENDORIMAGE
+license_modules_system += $(license_modules_rest)
+system_xml_directories := xml_excluded_vendor_product_odm
+system_notice_file_message := "Notices for files contained in all filesystem images except vendor/system_ext/product/odm in this directory:"
+else
+license_modules_vendor += $(license_modules_rest)
+system_xml_directories := xml_system
+system_notice_file_message := "Notices for files contained in the system filesystem image in this directory:"
+endif
+
+$(eval $(call combine-notice-files, $(system_xml_directories), \
$(target_notice_file_txt), \
$(target_notice_file_xml), \
- "Notices for files contained in the filesystem images in this directory:", \
+ $(system_notice_file_message), \
$(TARGET_OUT_NOTICE_FILES), \
- $(license_modules_rest)))
-$(eval $(call combine-notice-files, xml_vendor, \
+ $(license_modules_system)))
+$(eval $(call combine-notice-files, xml_excluded_system_product_odm, \
$(target_vendor_notice_file_txt), \
$(target_vendor_notice_file_xml), \
- "Notices for files contained in the vendor filesystem image in this directory:", \
+ "Notices for files contained in all filesystem images except system/system_ext/product/odm in this directory:", \
$(TARGET_OUT_NOTICE_FILES), \
$(license_modules_vendor)))
$(eval $(call combine-notice-files, xml_product, \
@@ -1284,12 +1305,18 @@
"Notices for files contained in the product filesystem image in this directory:", \
$(TARGET_OUT_NOTICE_FILES), \
$(license_modules_product)))
-$(eval $(call combine-notice-files, xml_product_services, \
- $(target_product_services_notice_file_txt), \
- $(target_product_services_notice_file_xml), \
- "Notices for files contained in the product_services filesystem image in this directory:", \
+$(eval $(call combine-notice-files, xml_system_ext, \
+ $(target_system_ext_notice_file_txt), \
+ $(target_system_ext_notice_file_xml), \
+ "Notices for files contained in the system_ext filesystem image in this directory:", \
$(TARGET_OUT_NOTICE_FILES), \
- $(license_modules_product_services)))
+ $(license_modules_system_ext)))
+$(eval $(call combine-notice-files, xml_odm, \
+ $(target_odm_notice_file_txt), \
+ $(target_odm_notice_file_xml), \
+ "Notices for files contained in the odm filesystem image in this directory:", \
+ $(TARGET_OUT_NOTICE_FILES), \
+ $(license_modules_odm)))
$(target_notice_file_xml_gz): $(target_notice_file_xml) | $(MINIGZIP)
$(hide) $(MINIGZIP) -9 < $< > $@
@@ -1297,7 +1324,9 @@
$(hide) $(MINIGZIP) -9 < $< > $@
$(target_product_notice_file_xml_gz): $(target_product_notice_file_xml) | $(MINIGZIP)
$(hide) $(MINIGZIP) -9 < $< > $@
-$(target_product_services_notice_file_xml_gz): $(target_product_services_notice_file_xml) | $(MINIGZIP)
+$(target_system_ext_notice_file_xml_gz): $(target_system_ext_notice_file_xml) | $(MINIGZIP)
+ $(hide) $(MINIGZIP) -9 < $< > $@
+$(target_odm_notice_file_xml_gz): $(target_odm_notice_file_xml) | $(MINIGZIP)
$(hide) $(MINIGZIP) -9 < $< > $@
$(installed_notice_html_or_xml_gz): $(target_notice_file_xml_gz)
$(copy-file-to-target)
@@ -1305,26 +1334,19 @@
$(copy-file-to-target)
$(installed_product_notice_xml_gz): $(target_product_notice_file_xml_gz)
$(copy-file-to-target)
-
-# No notice file for product_services if its contents are merged into /product.
-# The notices will be part of the /product notice file.
-ifndef MERGE_PRODUCT_SERVICES_INTO_PRODUCT
-$(installed_product_services_notice_xml_gz): $(target_product_services_notice_file_xml_gz)
+$(installed_system_ext_notice_xml_gz): $(target_system_ext_notice_file_xml_gz)
$(copy-file-to-target)
-endif
+$(installed_odm_notice_xml_gz): $(target_odm_notice_file_xml_gz)
+ $(copy-file-to-target)
-# if we've been run my mm, mmm, etc, don't reinstall this every time
-ifeq ($(ONE_SHOT_MAKEFILE),)
- ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
- ALL_DEFAULT_INSTALLED_MODULES += $(installed_vendor_notice_xml_gz)
- ALL_DEFAULT_INSTALLED_MODULES += $(installed_product_notice_xml_gz)
- ALL_DEFAULT_INSTALLED_MODULES += $(installed_product_services_notice_xml_gz)
-endif
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_vendor_notice_xml_gz)
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_product_notice_xml_gz)
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_system_ext_notice_xml_gz)
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_odm_notice_xml_gz)
endif # PRODUCT_NOTICE_SPLIT
-ifeq ($(ONE_SHOT_MAKEFILE),)
- ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
-endif
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
$(eval $(call combine-notice-files, html, \
$(tools_notice_file_txt), \
@@ -1376,18 +1398,15 @@
endif
INTERNAL_USERIMAGES_DEPS := \
- $(BLK_ALLOC_TO_BASE_FS) \
- $(E2FSCK) \
+ $(BUILD_IMAGE) \
$(MKE2FS_CONF) \
- $(MKEXTUSERIMG) \
- $(SIMG2IMG) \
- $(TUNE2FS)
+ $(MKEXTUSERIMG)
ifeq ($(TARGET_USERIMAGES_USE_F2FS),true)
INTERNAL_USERIMAGES_DEPS += $(MKF2FSUSERIMG)
endif
-ifneq ($(filter $(BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE) $(BOARD_PRODUCT_SERVICESIMAGE_FILE_SYSTEM_TYPE) $(BOARD_ODMIMAGE_FILE_SYSTEM_TYPE) $(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE) $(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE),squashfs),)
+ifneq ($(filter $(BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE) $(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE) $(BOARD_ODMIMAGE_FILE_SYSTEM_TYPE) $(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE) $(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE),squashfs),)
INTERNAL_USERIMAGES_DEPS += $(MKSQUASHFSUSERIMG)
endif
@@ -1414,13 +1433,10 @@
$(error vboot 1.0 doesn't support logical partition)
endif
-# TODO(b/80195851): Should not define BOARD_AVB_SYSTEM_KEY_PATH without
-# BOARD_AVB_SYSTEM_DETACHED_VBMETA.
-
endif # PRODUCT_USE_DYNAMIC_PARTITIONS
# $(1): the path of the output dictionary file
-# $(2): a subset of "system vendor cache userdata product product_services oem odm"
+# $(2): a subset of "system vendor cache userdata product system_ext oem odm"
# $(3): additional "key=value" pairs to append to the dictionary file.
define generate-image-prop-dictionary
$(if $(filter $(2),system),\
@@ -1477,18 +1493,18 @@
$(if $(BOARD_PRODUCTIMAGE_PARTITION_RESERVED_SIZE),$(hide) echo "product_reserved_size=$(BOARD_PRODUCTIMAGE_PARTITION_RESERVED_SIZE)" >> $(1))
$(hide) echo "product_selinux_fc=$(SELINUX_FC)" >> $(1)
)
-$(if $(filter $(2),product_services),\
- $(if $(BOARD_PRODUCT_SERVICESIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "product_services_fs_type=$(BOARD_PRODUCT_SERVICESIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
- $(if $(BOARD_PRODUCT_SERVICESIMAGE_EXTFS_INODE_COUNT),$(hide) echo "product_services_extfs_inode_count=$(BOARD_PRODUCT_SERVICESIMAGE_EXTFS_INODE_COUNT)" >> $(1))
- $(if $(BOARD_PRODUCT_SERVICESIMAGE_EXTFS_RSV_PCT),$(hide) echo "product_services_extfs_rsv_pct=$(BOARD_PRODUCT_SERVICESIMAGE_EXTFS_RSV_PCT)" >> $(1))
- $(if $(BOARD_PRODUCT_SERVICESIMAGE_PARTITION_SIZE),$(hide) echo "product_services_size=$(BOARD_PRODUCT_SERVICESIMAGE_PARTITION_SIZE)" >> $(1))
- $(if $(BOARD_PRODUCT_SERVICESIMAGE_JOURNAL_SIZE),$(hide) echo "product_services_journal_size=$(BOARD_PRODUCT_SERVICESIMAGE_JOURNAL_SIZE)" >> $(1))
- $(if $(BOARD_PRODUCT_SERVICESIMAGE_SQUASHFS_COMPRESSOR),$(hide) echo "product_services_squashfs_compressor=$(BOARD_PRODUCT_SERVICESIMAGE_SQUASHFS_COMPRESSOR)" >> $(1))
- $(if $(BOARD_PRODUCT_SERVICESIMAGE_SQUASHFS_COMPRESSOR_OPT),$(hide) echo "product_services_squashfs_compressor_opt=$(BOARD_PRODUCT_SERVICESIMAGE_SQUASHFS_COMPRESSOR_OPT)" >> $(1))
- $(if $(BOARD_PRODUCT_SERVICESIMAGE_SQUASHFS_BLOCK_SIZE),$(hide) echo "product_services_squashfs_block_size=$(BOARD_PRODUCT_SERVICESIMAGE_SQUASHFS_BLOCK_SIZE)" >> $(1))
- $(if $(BOARD_PRODUCT_SERVICESIMAGE_SQUASHFS_DISABLE_4K_ALIGN),$(hide) echo "product_services_squashfs_disable_4k_align=$(BOARD_PRODUCT_SERVICESIMAGE_SQUASHFS_DISABLE_4K_ALIGN)" >> $(1))
- $(if $(BOARD_PRODUCT_SERVICESIMAGE_PARTITION_RESERVED_SIZE),$(hide) echo "product_services_reserved_size=$(BOARD_PRODUCT_SERVICESIMAGE_PARTITION_RESERVED_SIZE)" >> $(1))
- $(hide) echo "product_services_selinux_fc=$(SELINUX_FC)" >> $(1)
+$(if $(filter $(2),system_ext),\
+ $(if $(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "system_ext_fs_type=$(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
+ $(if $(BOARD_SYSTEM_EXTIMAGE_EXTFS_INODE_COUNT),$(hide) echo "system_ext_extfs_inode_count=$(BOARD_SYSTEM_EXTIMAGE_EXTFS_INODE_COUNT)" >> $(1))
+ $(if $(BOARD_SYSTEM_EXTIMAGE_EXTFS_RSV_PCT),$(hide) echo "system_ext_extfs_rsv_pct=$(BOARD_SYSTEM_EXTIMAGE_EXTFS_RSV_PCT)" >> $(1))
+ $(if $(BOARD_SYSTEM_EXTIMAGE_PARTITION_SIZE),$(hide) echo "system_ext_size=$(BOARD_SYSTEM_EXTIMAGE_PARTITION_SIZE)" >> $(1))
+ $(if $(BOARD_SYSTEM_EXTIMAGE_JOURNAL_SIZE),$(hide) echo "system_ext_journal_size=$(BOARD_SYSTEM_EXTIMAGE_JOURNAL_SIZE)" >> $(1))
+ $(if $(BOARD_SYSTEM_EXTIMAGE_SQUASHFS_COMPRESSOR),$(hide) echo "system_ext_squashfs_compressor=$(BOARD_SYSTEM_EXTIMAGE_SQUASHFS_COMPRESSOR)" >> $(1))
+ $(if $(BOARD_SYSTEM_EXTIMAGE_SQUASHFS_COMPRESSOR_OPT),$(hide) echo "system_ext_squashfs_compressor_opt=$(BOARD_SYSTEM_EXTIMAGE_SQUASHFS_COMPRESSOR_OPT)" >> $(1))
+ $(if $(BOARD_SYSTEM_EXTIMAGE_SQUASHFS_BLOCK_SIZE),$(hide) echo "system_ext_squashfs_block_size=$(BOARD_SYSTEM_EXTIMAGE_SQUASHFS_BLOCK_SIZE)" >> $(1))
+ $(if $(BOARD_SYSTEM_EXTIMAGE_SQUASHFS_DISABLE_4K_ALIGN),$(hide) echo "system_ext_squashfs_disable_4k_align=$(BOARD_SYSTEM_EXTIMAGE_SQUASHFS_DISABLE_4K_ALIGN)" >> $(1))
+ $(if $(BOARD_SYSTEM_EXTIMAGE_PARTITION_RESERVED_SIZE),$(hide) echo "system_ext_reserved_size=$(BOARD_SYSTEM_EXTIMAGE_PARTITION_RESERVED_SIZE)" >> $(1))
+ $(hide) echo "system_ext_selinux_fc=$(SELINUX_FC)" >> $(1)
)
$(if $(filter $(2),odm),\
$(if $(BOARD_ODMIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "odm_fs_type=$(BOARD_ODMIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
@@ -1528,7 +1544,7 @@
$(if $(PRODUCT_SYSTEM_VERITY_PARTITION),$(hide) echo "system_verity_block_device=$(PRODUCT_SYSTEM_VERITY_PARTITION)" >> $(1))
$(if $(PRODUCT_VENDOR_VERITY_PARTITION),$(hide) echo "vendor_verity_block_device=$(PRODUCT_VENDOR_VERITY_PARTITION)" >> $(1))
$(if $(PRODUCT_PRODUCT_VERITY_PARTITION),$(hide) echo "product_verity_block_device=$(PRODUCT_PRODUCT_VERITY_PARTITION)" >> $(1))
-$(if $(PRODUCT_PRODUCT_SERVICES_VERITY_PARTITION),$(hide) echo "product_services_verity_block_device=$(PRODUCT_PRODUCT_SERVICES_VERITY_PARTITION)" >> $(1))
+$(if $(PRODUCT_SYSTEM_EXT_VERITY_PARTITION),$(hide) echo "system_ext_verity_block_device=$(PRODUCT_SYSTEM_EXT_VERITY_PARTITION)" >> $(1))
$(if $(PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot=$(PRODUCT_SUPPORTS_VBOOT)" >> $(1))
$(if $(PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot_key=$(PRODUCT_VBOOT_SIGNING_KEY)" >> $(1))
$(if $(PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot_subkey=$(PRODUCT_VBOOT_SIGNING_SUBKEY)" >> $(1))
@@ -1562,14 +1578,14 @@
$(hide) echo "avb_product_key_path=$(BOARD_AVB_PRODUCT_KEY_PATH)" >> $(1)
$(hide) echo "avb_product_algorithm=$(BOARD_AVB_PRODUCT_ALGORITHM)" >> $(1)
$(hide) echo "avb_product_rollback_index_location=$(BOARD_AVB_PRODUCT_ROLLBACK_INDEX_LOCATION)" >> $(1)))
-$(if $(BOARD_AVB_ENABLE),$(hide) echo "avb_product_services_hashtree_enable=$(BOARD_AVB_ENABLE)" >> $(1))
+$(if $(BOARD_AVB_ENABLE),$(hide) echo "avb_system_ext_hashtree_enable=$(BOARD_AVB_ENABLE)" >> $(1))
$(if $(BOARD_AVB_ENABLE),\
- $(hide) echo "avb_product_services_add_hashtree_footer_args=$(BOARD_AVB_PRODUCT_SERVICES_ADD_HASHTREE_FOOTER_ARGS)" >> $(1))
+ $(hide) echo "avb_system_ext_add_hashtree_footer_args=$(BOARD_AVB_SYSTEM_EXT_ADD_HASHTREE_FOOTER_ARGS)" >> $(1))
$(if $(BOARD_AVB_ENABLE),\
- $(if $(BOARD_AVB_PRODUCT_SERVICES_KEY_PATH),\
- $(hide) echo "avb_product_services_key_path=$(BOARD_AVB_PRODUCT_SERVICES_KEY_PATH)" >> $(1)
- $(hide) echo "avb_product_services_algorithm=$(BOARD_AVB_PRODUCT_SERVICES_ALGORITHM)" >> $(1)
- $(hide) echo "avb_product_services_rollback_index_location=$(BOARD_AVB_PRODUCT_SERVICES_ROLLBACK_INDEX_LOCATION)" >> $(1)))
+ $(if $(BOARD_AVB_SYSTEM_EXT_KEY_PATH),\
+ $(hide) echo "avb_system_ext_key_path=$(BOARD_AVB_SYSTEM_EXT_KEY_PATH)" >> $(1)
+ $(hide) echo "avb_system_ext_algorithm=$(BOARD_AVB_SYSTEM_EXT_ALGORITHM)" >> $(1)
+ $(hide) echo "avb_system_ext_rollback_index_location=$(BOARD_AVB_SYSTEM_EXT_ROLLBACK_INDEX_LOCATION)" >> $(1)))
$(if $(BOARD_AVB_ENABLE),$(hide) echo "avb_odm_hashtree_enable=$(BOARD_AVB_ENABLE)" >> $(1))
$(if $(BOARD_AVB_ENABLE),$(hide) echo "avb_odm_add_hashtree_footer_args=$(BOARD_AVB_ODM_ADD_HASHTREE_FOOTER_ARGS)" >> $(1))
$(if $(BOARD_AVB_ENABLE),\
@@ -1604,8 +1620,8 @@
ifdef BUILDING_PRODUCT_IMAGE
PROP_DICTIONARY_IMAGES += product
endif
-ifdef BUILDING_PRODUCT_SERVICES_IMAGE
- PROP_DICTIONARY_IMAGES += product_services
+ifdef BUILDING_SYSTEM_EXT_IMAGE
+ PROP_DICTIONARY_IMAGES += system_ext
endif
ifdef BUILDING_ODM_IMAGE
PROP_DICTIONARY_IMAGES += odm
@@ -1881,7 +1897,7 @@
$(INSTALLED_VENDOR_BUILD_PROP_TARGET) \
$(INSTALLED_ODM_BUILD_PROP_TARGET) \
$(INSTALLED_PRODUCT_BUILD_PROP_TARGET) \
- $(INSTALLED_PRODUCT_SERVICES_BUILD_PROP_TARGET)
+ $(INSTALLED_SYSTEM_EXT_BUILD_PROP_TARGET)
@echo "Target recovery buildinfo: $@"
$(hide) mkdir -p $(dir $@)
$(hide) rm -f $@
@@ -1891,7 +1907,7 @@
$(hide) cat $(INSTALLED_VENDOR_BUILD_PROP_TARGET) >> $@
$(hide) cat $(INSTALLED_ODM_BUILD_PROP_TARGET) >> $@
$(hide) cat $(INSTALLED_PRODUCT_BUILD_PROP_TARGET) >> $@
- $(hide) cat $(INSTALLED_PRODUCT_SERVICES_BUILD_PROP_TARGET) >> $@
+ $(hide) cat $(INSTALLED_SYSTEM_EXT_BUILD_PROP_TARGET) >> $@
$(call append-recovery-ui-properties,$(PRIVATE_RECOVERY_UI_PROPERTIES),$@)
INTERNAL_RECOVERYIMAGE_ARGS := \
@@ -2174,7 +2190,8 @@
$(AVBTOOL) add_hash_footer \
--image $@ \
--partition_size $(BOARD_BOOTIMAGE_PARTITION_SIZE) \
- --partition_name boot $(PRIVATE_AVB_DEBUG_BOOT_SIGNING_ARGS), \
+ --partition_name boot $(PRIVATE_AVB_DEBUG_BOOT_SIGNING_ARGS) \
+ $(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS), \
$(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE)))
.PHONY: bootimage_debug-nodeps
@@ -2186,7 +2203,8 @@
$(AVBTOOL) add_hash_footer \
--image $(INSTALLED_DEBUG_BOOTIMAGE_TARGET) \
--partition_size $(BOARD_BOOTIMAGE_PARTITION_SIZE) \
- --partition_name boot $(PRIVATE_AVB_DEBUG_BOOT_SIGNING_ARGS), \
+ --partition_name boot $(PRIVATE_AVB_DEBUG_BOOT_SIGNING_ARGS) \
+ $(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS), \
$(call assert-max-image-size,$(INSTALLED_DEBUG_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE)))
endif # TARGET_NO_KERNEL
@@ -2275,33 +2293,18 @@
endef
endif
-# Create symlink /system/product_services to /product_services if necessary.
-ifdef BOARD_USES_PRODUCT_SERVICESIMAGE
-define create-system-product_services-symlink
-$(hide) if [ -d $(TARGET_OUT)/product_services ] && [ ! -h $(TARGET_OUT)/product_services ]; then \
- echo 'Non-symlink $(TARGET_OUT)/product_services detected!' 1>&2; \
- echo 'You cannot install files to $(TARGET_OUT)/product_services while building a separate product_services.img!' 1>&2; \
+# Create symlink /system/system_ext to /system_ext if necessary.
+ifdef BOARD_USES_SYSTEM_EXTIMAGE
+define create-system-system_ext-symlink
+$(hide) if [ -d $(TARGET_OUT)/system_ext ] && [ ! -h $(TARGET_OUT)/system_ext ]; then \
+ echo 'Non-symlink $(TARGET_OUT)/system_ext detected!' 1>&2; \
+ echo 'You cannot install files to $(TARGET_OUT)/system_ext while building a separate system_ext.img!' 1>&2; \
exit 1; \
fi
-$(hide) ln -sf /product_services $(TARGET_OUT)/product_services
+$(hide) ln -sf /system_ext $(TARGET_OUT)/system_ext
endef
else
-define create-system-product_services-symlink
-endef
-endif
-
-# Create symlink /vendor/odm to /odm if necessary.
-ifdef BOARD_USES_ODMIMAGE
-define create-vendor-odm-symlink
-$(hide) if [ -d $(TARGET_OUT_VENDOR)/odm ] && [ ! -h $(TARGET_OUT_VENDOR)/odm ]; then \
- echo 'Non-symlink $(TARGET_OUT_VENDOR)/odm detected!' 1>&2; \
- echo 'You cannot install files to $(TARGET_OUT_VENDOR)/odm while building a separate odm.img!' 1>&2; \
- exit 1; \
-fi
-$(hide) ln -sf /odm $(TARGET_OUT_VENDOR)/odm
-endef
-else
-define create-vendor-odm-symlink
+define create-system-system_ext-symlink
endef
endif
@@ -2310,22 +2313,23 @@
@echo "Target system fs image: $(1)"
$(call create-system-vendor-symlink)
$(call create-system-product-symlink)
- $(call create-system-product_services-symlink)
+ $(call create-system-system_ext-symlink)
$(call check-apex-libs-absence-on-disk)
@mkdir -p $(dir $(1)) $(systemimage_intermediates) && rm -rf $(systemimage_intermediates)/system_image_info.txt
$(call generate-image-prop-dictionary, $(systemimage_intermediates)/system_image_info.txt,system, \
skip_fsck=true)
PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
- build/make/tools/releasetools/build_image.py \
- $(TARGET_OUT) $(systemimage_intermediates)/system_image_info.txt $(1) $(TARGET_OUT) \
- || ( mkdir -p $${DIST_DIR}; cp $(INSTALLED_FILES_FILE) $${DIST_DIR}/installed-files-rescued.txt; \
- exit 1 )
+ $(BUILD_IMAGE) \
+ $(TARGET_OUT) $(systemimage_intermediates)/system_image_info.txt $(1) $(TARGET_OUT) \
+ || ( mkdir -p $${DIST_DIR}; \
+ cp $(INSTALLED_FILES_FILE) $${DIST_DIR}/installed-files-rescued.txt; \
+ exit 1 )
endef
ifeq ($(BOARD_AVB_ENABLE),true)
$(BUILT_SYSTEMIMAGE): $(BOARD_AVB_SYSTEM_KEY_PATH)
endif
-$(BUILT_SYSTEMIMAGE): $(FULL_SYSTEMIMAGE_DEPS) $(INSTALLED_FILES_FILE) $(BUILD_IMAGE_SRCS)
+$(BUILT_SYSTEMIMAGE): $(FULL_SYSTEMIMAGE_DEPS) $(INSTALLED_FILES_FILE)
$(call build-systemimage-target,$@)
INSTALLED_SYSTEMIMAGE_TARGET := $(PRODUCT_OUT)/system.img
@@ -2389,36 +2393,6 @@
.PHONY: sync syncsys
sync syncsys: $(INTERNAL_SYSTEMIMAGE_FILES)
-#######
-## system tarball
-define build-systemtarball-target
- $(call pretty,"Target system fs tarball: $(INSTALLED_SYSTEMTARBALL_TARGET)")
- $(call create-system-vendor-symlink)
- $(call create-system-product-symlink)
- $(call create-system-product_services-symlink)
- $(MKTARBALL) $(FS_GET_STATS) \
- $(PRODUCT_OUT) system $(PRIVATE_SYSTEM_TAR) \
- $(INSTALLED_SYSTEMTARBALL_TARGET) $(TARGET_OUT)
-endef
-
-ifndef SYSTEM_TARBALL_FORMAT
- SYSTEM_TARBALL_FORMAT := bz2
-endif
-
-system_tar := $(PRODUCT_OUT)/system.tar
-INSTALLED_SYSTEMTARBALL_TARGET := $(system_tar).$(SYSTEM_TARBALL_FORMAT)
-$(INSTALLED_SYSTEMTARBALL_TARGET): PRIVATE_SYSTEM_TAR := $(system_tar)
-$(INSTALLED_SYSTEMTARBALL_TARGET): $(FS_GET_STATS) $(INTERNAL_SYSTEMIMAGE_FILES)
- $(build-systemtarball-target)
-
-.PHONY: systemtarball-nodeps
-systemtarball-nodeps: $(FS_GET_STATS) \
- $(filter-out systemtarball-nodeps stnod,$(MAKECMDGOALS))
- $(build-systemtarball-target)
-
-.PHONY: stnod
-stnod: systemtarball-nodeps
-
# -----------------------------------------------------------------
## platform.zip: system, plus other files to be used in PDK fusion build,
## in a zip file
@@ -2482,8 +2456,8 @@
ifdef BUILDING_PRODUCT_IMAGE
echo "-D $(TARGET_OUT_PRODUCT)" >> $@.lst
endif
-ifdef BUILDING_PRODUCT_SERVICES_IMAGE
- echo "-D $(TARGET_OUT_PRODUCT_SERVICES)" >> $@.lst
+ifdef BUILDING_SYSTEM_EXT_IMAGE
+ echo "-D $(TARGET_OUT_SYSTEM_EXT)" >> $@.lst
endif
ifdef BUILDING_ODM_IMAGE
echo "-D $(TARGET_OUT_ODM)" >> $@.lst
@@ -2521,34 +2495,6 @@
endif # BUILD_PLATFORM_ZIP
# -----------------------------------------------------------------
-## boot tarball
-define build-boottarball-target
- $(hide) echo "Target boot fs tarball: $(INSTALLED_BOOTTARBALL_TARGET)"
- $(hide) mkdir -p $(PRODUCT_OUT)/boot
- $(hide) cp -f $(INTERNAL_BOOTIMAGE_FILES) $(PRODUCT_OUT)/boot/.
- $(hide) echo $(INTERNAL_KERNEL_CMDLINE) > $(PRODUCT_OUT)/boot/cmdline
- $(hide) $(MKTARBALL) $(FS_GET_STATS) \
- $(PRODUCT_OUT) boot $(PRIVATE_BOOT_TAR) \
- $(INSTALLED_BOOTTARBALL_TARGET) $(TARGET_OUT)
-endef
-
-ifndef BOOT_TARBALL_FORMAT
- BOOT_TARBALL_FORMAT := bz2
-endif
-
-boot_tar := $(PRODUCT_OUT)/boot.tar
-INSTALLED_BOOTTARBALL_TARGET := $(boot_tar).$(BOOT_TARBALL_FORMAT)
-$(INSTALLED_BOOTTARBALL_TARGET): PRIVATE_BOOT_TAR := $(boot_tar)
-$(INSTALLED_BOOTTARBALL_TARGET): $(FS_GET_STATS) $(INTERNAL_BOOTIMAGE_FILES)
- $(build-boottarball-target)
-
-.PHONY: boottarball-nodeps btnod
-boottarball-nodeps btnod: $(FS_GET_STATS) \
- $(filter-out boottarball-nodeps btnod,$(MAKECMDGOALS))
- $(build-boottarball-target)
-
-
-# -----------------------------------------------------------------
# data partition image
INTERNAL_USERDATAIMAGE_FILES := \
$(filter $(TARGET_OUT_DATA)/%,$(ALL_DEFAULT_INSTALLED_MODULES))
@@ -2564,17 +2510,17 @@
@mkdir -p $(userdataimage_intermediates) && rm -rf $(userdataimage_intermediates)/userdata_image_info.txt
$(call generate-image-prop-dictionary, $(userdataimage_intermediates)/userdata_image_info.txt,userdata,skip_fsck=true)
PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
- build/make/tools/releasetools/build_image.py \
- $(TARGET_OUT_DATA) $(userdataimage_intermediates)/userdata_image_info.txt $(INSTALLED_USERDATAIMAGE_TARGET) $(TARGET_OUT)
- $(hide) $(call assert-max-image-size,$(INSTALLED_USERDATAIMAGE_TARGET),$(BOARD_USERDATAIMAGE_PARTITION_SIZE))
+ $(BUILD_IMAGE) \
+ $(TARGET_OUT_DATA) $(userdataimage_intermediates)/userdata_image_info.txt \
+ $(INSTALLED_USERDATAIMAGE_TARGET) $(TARGET_OUT)
+ $(call assert-max-image-size,$(INSTALLED_USERDATAIMAGE_TARGET),$(BOARD_USERDATAIMAGE_PARTITION_SIZE))
endef
# We just build this directly to the install location.
INSTALLED_USERDATAIMAGE_TARGET := $(BUILT_USERDATAIMAGE_TARGET)
INSTALLED_USERDATAIMAGE_TARGET_DEPS := \
$(INTERNAL_USERIMAGES_DEPS) \
- $(INTERNAL_USERDATAIMAGE_FILES) \
- $(BUILD_IMAGE_SRCS)
+ $(INTERNAL_USERDATAIMAGE_FILES)
$(INSTALLED_USERDATAIMAGE_TARGET): $(INSTALLED_USERDATAIMAGE_TARGET_DEPS)
$(build-userdataimage-target)
@@ -2597,29 +2543,6 @@
$(ASAN_IN_SYSTEM_INSTALLED): $(INSTALLED_USERDATAIMAGE_TARGET_DEPS)
tar cfj $(ASAN_IN_SYSTEM_INSTALLED) $(ASAN_SYSTEM_INSTALL_OPTIONS) -C $(TARGET_OUT_DATA)/.. $(ASAN_OUT_DIRS_FOR_SYSTEM_INSTALL) >/dev/null
-#######
-## data partition tarball
-define build-userdatatarball-target
- $(call pretty,"Target userdata fs tarball: " \
- "$(INSTALLED_USERDATATARBALL_TARGET)")
- $(MKTARBALL) $(FS_GET_STATS) \
- $(PRODUCT_OUT) data $(PRIVATE_USERDATA_TAR) \
- $(INSTALLED_USERDATATARBALL_TARGET) $(TARGET_OUT)
-endef
-
-userdata_tar := $(PRODUCT_OUT)/userdata.tar
-INSTALLED_USERDATATARBALL_TARGET := $(userdata_tar).bz2
-$(INSTALLED_USERDATATARBALL_TARGET): PRIVATE_USERDATA_TAR := $(userdata_tar)
-$(INSTALLED_USERDATATARBALL_TARGET): $(FS_GET_STATS) $(INTERNAL_USERDATAIMAGE_FILES)
- $(build-userdatatarball-target)
-
-$(call dist-for-goals,userdatatarball,$(INSTALLED_USERDATATARBALL_TARGET))
-
-.PHONY: userdatatarball-nodeps
-userdatatarball-nodeps: $(FS_GET_STATS)
- $(build-userdatatarball-target)
-
-
# -----------------------------------------------------------------
# partition table image
ifdef BOARD_BPT_INPUT_FILES
@@ -2670,14 +2593,15 @@
@mkdir -p $(cacheimage_intermediates) && rm -rf $(cacheimage_intermediates)/cache_image_info.txt
$(call generate-image-prop-dictionary, $(cacheimage_intermediates)/cache_image_info.txt,cache,skip_fsck=true)
PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
- build/make/tools/releasetools/build_image.py \
- $(TARGET_OUT_CACHE) $(cacheimage_intermediates)/cache_image_info.txt $(INSTALLED_CACHEIMAGE_TARGET) $(TARGET_OUT)
- $(hide) $(call assert-max-image-size,$(INSTALLED_CACHEIMAGE_TARGET),$(BOARD_CACHEIMAGE_PARTITION_SIZE))
+ $(BUILD_IMAGE) \
+ $(TARGET_OUT_CACHE) $(cacheimage_intermediates)/cache_image_info.txt \
+ $(INSTALLED_CACHEIMAGE_TARGET) $(TARGET_OUT)
+ $(call assert-max-image-size,$(INSTALLED_CACHEIMAGE_TARGET),$(BOARD_CACHEIMAGE_PARTITION_SIZE))
endef
# We just build this directly to the install location.
INSTALLED_CACHEIMAGE_TARGET := $(BUILT_CACHEIMAGE_TARGET)
-$(INSTALLED_CACHEIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_CACHEIMAGE_FILES) $(BUILD_IMAGE_SRCS)
+$(INSTALLED_CACHEIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_CACHEIMAGE_FILES)
$(build-cacheimage-target)
.PHONY: cacheimage-nodeps
@@ -2741,9 +2665,10 @@
@mkdir -p $(systemotherimage_intermediates) && rm -rf $(systemotherimage_intermediates)/system_other_image_info.txt
$(call generate-image-prop-dictionary, $(systemotherimage_intermediates)/system_other_image_info.txt,system,skip_fsck=true)
PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
- build/make/tools/releasetools/build_image.py \
- $(TARGET_OUT_SYSTEM_OTHER) $(systemotherimage_intermediates)/system_other_image_info.txt $(INSTALLED_SYSTEMOTHERIMAGE_TARGET) $(TARGET_OUT)
- $(hide) $(call assert-max-image-size,$(INSTALLED_SYSTEMOTHERIMAGE_TARGET),$(BOARD_SYSTEMIMAGE_PARTITION_SIZE))
+ $(BUILD_IMAGE) \
+ $(TARGET_OUT_SYSTEM_OTHER) $(systemotherimage_intermediates)/system_other_image_info.txt \
+ $(INSTALLED_SYSTEMOTHERIMAGE_TARGET) $(TARGET_OUT)
+ $(call assert-max-image-size,$(INSTALLED_SYSTEMOTHERIMAGE_TARGET),$(BOARD_SYSTEMIMAGE_PARTITION_SIZE))
endef
# We just build this directly to the install location.
@@ -2790,12 +2715,19 @@
# -- Kernel version and configurations.
ifeq ($(PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS),true)
+intermediates := $(call intermediates-dir-for,ETC,$(notdir $(BUILT_ASSEMBLED_VENDOR_MANIFEST)))
+BUILT_KERNEL_CONFIGS_FILE := $(intermediates)/kernel_configs.txt
+BUILT_KERNEL_VERSION_FILE := $(intermediates)/kernel_version.txt
+
# BOARD_KERNEL_CONFIG_FILE and BOARD_KERNEL_VERSION can be used to override the values extracted
# from INSTALLED_KERNEL_TARGET.
ifdef BOARD_KERNEL_CONFIG_FILE
ifdef BOARD_KERNEL_VERSION
-$(BUILT_ASSEMBLED_VENDOR_MANIFEST): $(BOARD_KERNEL_CONFIG_FILE)
-$(BUILT_ASSEMBLED_VENDOR_MANIFEST): PRIVATE_FLAGS += --kernel $(BOARD_KERNEL_VERSION):$(BOARD_KERNEL_CONFIG_FILE)
+$(BUILT_KERNEL_CONFIGS_FILE): $(BOARD_KERNEL_CONFIG_FILE)
+ cp $< $@
+$(BUILT_KERNEL_VERSION_FILE):
+ echo $(BOARD_KERNEL_VERSION) > $@
+
my_board_extracted_kernel := true
endif # BOARD_KERNEL_VERSION
endif # BOARD_KERNEL_CONFIG_FILE
@@ -2810,7 +2742,6 @@
BOARD_KERNEL_VERSION manually; or (3) unsetting PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS \
manually.)
else
-intermediates := $(call intermediates-dir-for,ETC,$(notdir $(BUILT_ASSEMBLED_VENDOR_MANIFEST)))
# Tools for decompression that is not in PATH.
# Check $(EXTRACT_KERNEL) for decompression algorithms supported by the script.
@@ -2818,29 +2749,25 @@
my_decompress_tools := \
lz4:$(HOST_OUT_EXECUTABLES)/lz4 \
-my_kernel_configs := $(intermediates)/kernel_configs.txt
-my_kernel_version := $(intermediates)/kernel_version.txt
-$(my_kernel_configs): .KATI_IMPLICIT_OUTPUTS := $(my_kernel_version)
-$(my_kernel_configs): PRIVATE_KERNEL_VERSION_FILE := $(my_kernel_version)
-$(my_kernel_configs): PRIVATE_DECOMPRESS_TOOLS := $(my_decompress_tools)
-$(my_kernel_configs): $(foreach pair,$(my_decompress_tools),$(call word-colon,2,$(pair)))
-$(my_kernel_configs): $(EXTRACT_KERNEL) $(INSTALLED_KERNEL_TARGET)
+$(BUILT_KERNEL_CONFIGS_FILE): .KATI_IMPLICIT_OUTPUTS := $(BUILT_KERNEL_VERSION_FILE)
+$(BUILT_KERNEL_CONFIGS_FILE): PRIVATE_DECOMPRESS_TOOLS := $(my_decompress_tools)
+$(BUILT_KERNEL_CONFIGS_FILE): $(foreach pair,$(my_decompress_tools),$(call word-colon,2,$(pair)))
+$(BUILT_KERNEL_CONFIGS_FILE): $(EXTRACT_KERNEL) $(INSTALLED_KERNEL_TARGET)
$< --tools $(PRIVATE_DECOMPRESS_TOOLS) --input $(INSTALLED_KERNEL_TARGET) \
--output-configs $@ \
- --output-version $(PRIVATE_KERNEL_VERSION_FILE)
-
-$(BUILT_ASSEMBLED_VENDOR_MANIFEST): $(my_kernel_configs) $(my_kernel_version)
-$(BUILT_ASSEMBLED_VENDOR_MANIFEST): PRIVATE_FLAGS += --kernel $$(cat $(my_kernel_version)):$(my_kernel_configs)
+ --output-version $(BUILT_KERNEL_VERSION_FILE)
intermediates :=
-my_kernel_configs :=
-my_kernel_version :=
my_decompress_tools :=
endif # my_board_extracted_kernel
my_board_extracted_kernel :=
endif # INSTALLED_KERNEL_TARGET
+
+$(BUILT_ASSEMBLED_VENDOR_MANIFEST): $(BUILT_KERNEL_CONFIGS_FILE) $(BUILT_KERNEL_VERSION_FILE)
+$(BUILT_ASSEMBLED_VENDOR_MANIFEST): PRIVATE_FLAGS += --kernel $$(cat $(BUILT_KERNEL_VERSION_FILE)):$(BUILT_KERNEL_CONFIGS_FILE)
+
endif # PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS
$(BUILT_ASSEMBLED_VENDOR_MANIFEST):
@@ -2869,6 +2796,21 @@
$(hide) $(FILESLIST) $(TARGET_OUT_VENDOR) > $(@:.txt=.json)
$(hide) $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+# Create symlink /vendor/odm to /odm if necessary.
+ifdef BOARD_USES_ODMIMAGE
+define create-vendor-odm-symlink
+$(hide) if [ -d $(TARGET_OUT_VENDOR)/odm ] && [ ! -h $(TARGET_OUT_VENDOR)/odm ]; then \
+ echo 'Non-symlink $(TARGET_OUT_VENDOR)/odm detected!' 1>&2; \
+ echo 'You cannot install files to $(TARGET_OUT_VENDOR)/odm while building a separate odm.img!' 1>&2; \
+ exit 1; \
+fi
+$(hide) ln -sf /odm $(TARGET_OUT_VENDOR)/odm
+endef
+else
+define create-vendor-odm-symlink
+endef
+endif
+
vendorimage_intermediates := \
$(call intermediates-dir-for,PACKAGING,vendor)
BUILT_VENDORIMAGE_TARGET := $(PRODUCT_OUT)/vendor.img
@@ -2879,9 +2821,10 @@
@mkdir -p $(vendorimage_intermediates) && rm -rf $(vendorimage_intermediates)/vendor_image_info.txt
$(call generate-image-prop-dictionary, $(vendorimage_intermediates)/vendor_image_info.txt,vendor,skip_fsck=true)
PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
- build/make/tools/releasetools/build_image.py \
- $(TARGET_OUT_VENDOR) $(vendorimage_intermediates)/vendor_image_info.txt $(INSTALLED_VENDORIMAGE_TARGET) $(TARGET_OUT)
- $(hide) $(call assert-max-image-size,$(INSTALLED_VENDORIMAGE_TARGET),$(BOARD_VENDORIMAGE_PARTITION_SIZE))
+ $(BUILD_IMAGE) \
+ $(TARGET_OUT_VENDOR) $(vendorimage_intermediates)/vendor_image_info.txt \
+ $(INSTALLED_VENDORIMAGE_TARGET) $(TARGET_OUT)
+ $(call assert-max-image-size,$(INSTALLED_VENDORIMAGE_TARGET),$(BOARD_VENDORIMAGE_PARTITION_SIZE))
endef
# We just build this directly to the install location.
@@ -2889,7 +2832,10 @@
ifdef BUILT_VENDOR_MANIFEST
$(INSTALLED_VENDORIMAGE_TARGET): $(BUILT_ASSEMBLED_VENDOR_MANIFEST)
endif
-$(INSTALLED_VENDORIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_VENDORIMAGE_FILES) $(INSTALLED_FILES_FILE_VENDOR) $(BUILD_IMAGE_SRCS)
+$(INSTALLED_VENDORIMAGE_TARGET): \
+ $(INTERNAL_USERIMAGES_DEPS) \
+ $(INTERNAL_VENDORIMAGE_FILES) \
+ $(INSTALLED_FILES_FILE_VENDOR)
$(build-vendorimage-target)
.PHONY: vendorimage-nodeps vnod
@@ -2934,14 +2880,18 @@
@mkdir -p $(productimage_intermediates) && rm -rf $(productimage_intermediates)/product_image_info.txt
$(call generate-image-prop-dictionary, $(productimage_intermediates)/product_image_info.txt,product,skip_fsck=true)
PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
- ./build/tools/releasetools/build_image.py \
- $(TARGET_OUT_PRODUCT) $(productimage_intermediates)/product_image_info.txt $(INSTALLED_PRODUCTIMAGE_TARGET) $(TARGET_OUT)
- $(hide) $(call assert-max-image-size,$(INSTALLED_PRODUCTIMAGE_TARGET),$(BOARD_PRODUCTIMAGE_PARTITION_SIZE))
+ $(BUILD_IMAGE) \
+ $(TARGET_OUT_PRODUCT) $(productimage_intermediates)/product_image_info.txt \
+ $(INSTALLED_PRODUCTIMAGE_TARGET) $(TARGET_OUT)
+ $(call assert-max-image-size,$(INSTALLED_PRODUCTIMAGE_TARGET),$(BOARD_PRODUCTIMAGE_PARTITION_SIZE))
endef
# We just build this directly to the install location.
INSTALLED_PRODUCTIMAGE_TARGET := $(BUILT_PRODUCTIMAGE_TARGET)
-$(INSTALLED_PRODUCTIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_PRODUCTIMAGE_FILES) $(INSTALLED_FILES_FILE_PRODUCT) $(BUILD_IMAGE_SRCS)
+$(INSTALLED_PRODUCTIMAGE_TARGET): \
+ $(INTERNAL_USERIMAGES_DEPS) \
+ $(INTERNAL_PRODUCTIMAGE_FILES) \
+ $(INSTALLED_FILES_FILE_PRODUCT)
$(build-productimage-target)
.PHONY: productimage-nodeps pnod
@@ -3002,55 +2952,61 @@
endif # BUILDING_SYSTEM_IMAGE
# -----------------------------------------------------------------
-# product_services partition image
-ifdef BUILDING_PRODUCT_SERVICES_IMAGE
-INTERNAL_PRODUCT_SERVICESIMAGE_FILES := \
- $(filter $(TARGET_OUT_PRODUCT_SERVICES)/%,\
+# system_ext partition image
+ifdef BUILDING_SYSTEM_EXT_IMAGE
+INTERNAL_SYSTEM_EXTIMAGE_FILES := \
+ $(filter $(TARGET_OUT_SYSTEM_EXT)/%,\
$(ALL_DEFAULT_INSTALLED_MODULES)\
$(ALL_PDK_FUSION_FILES)) \
$(PDK_FUSION_SYMLINK_STAMP)
-# platform.zip depends on $(INTERNAL_PRODUCT_SERVICESIMAGE_FILES).
-$(INSTALLED_PLATFORM_ZIP) : $(INTERNAL_PRODUCT_SERVICESIMAGE_FILES)
+# platform.zip depends on $(INTERNAL_SYSTEM_EXTIMAGE_FILES).
+$(INSTALLED_PLATFORM_ZIP) : $(INTERNAL_SYSTEM_EXTIMAGE_FILES)
-INSTALLED_FILES_FILE_PRODUCT_SERVICES := $(PRODUCT_OUT)/installed-files-product_services.txt
-INSTALLED_FILES_JSON_PRODUCT_SERVICES := $(INSTALLED_FILES_FILE_PRODUCT_SERVICES:.txt=.json)
-$(INSTALLED_FILES_FILE_PRODUCT_SERVICES): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_PRODUCT_SERVICES)
-$(INSTALLED_FILES_FILE_PRODUCT_SERVICES) : $(INTERNAL_PRODUCT_SERVICESIMAGE_FILES) $(FILESLIST) $(FILESLIST_UTIL)
+INSTALLED_FILES_FILE_SYSTEM_EXT := $(PRODUCT_OUT)/installed-files-system_ext.txt
+INSTALLED_FILES_JSON_SYSTEM_EXT := $(INSTALLED_FILES_FILE_SYSTEM_EXT:.txt=.json)
+$(INSTALLED_FILES_FILE_SYSTEM_EXT): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_SYSTEM_EXT)
+$(INSTALLED_FILES_FILE_SYSTEM_EXT) : $(INTERNAL_SYSTEM_EXTIMAGE_FILES) $(FILESLIST) $(FILESLIST_UTIL)
@echo Installed file list: $@
@mkdir -p $(dir $@)
@rm -f $@
- $(hide) $(FILESLIST) $(TARGET_OUT_PRODUCT_SERVICES) > $(@:.txt=.json)
+ $(hide) $(FILESLIST) $(TARGET_OUT_SYSTEM_EXT) > $(@:.txt=.json)
$(hide) $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
-product_servicesimage_intermediates := \
- $(call intermediates-dir-for,PACKAGING,product_services)
-BUILT_PRODUCT_SERVICESIMAGE_TARGET := $(PRODUCT_OUT)/product_services.img
-define build-product_servicesimage-target
- $(call pretty,"Target product_services fs image: $(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET)")
- @mkdir -p $(TARGET_OUT_PRODUCT_SERVICES)
- @mkdir -p $(product_servicesimage_intermediates) && rm -rf $(product_servicesimage_intermediates)/product_services_image_info.txt
- $(call generate-image-prop-dictionary, $(product_servicesimage_intermediates)/product_services_image_info.txt,product_services, skip_fsck=true)
+system_extimage_intermediates := \
+ $(call intermediates-dir-for,PACKAGING,system_ext)
+BUILT_SYSTEM_EXTIMAGE_TARGET := $(PRODUCT_OUT)/system_ext.img
+define build-system_extimage-target
+ $(call pretty,"Target system_ext fs image: $(INSTALLED_SYSTEM_EXTIMAGE_TARGET)")
+ @mkdir -p $(TARGET_OUT_SYSTEM_EXT)
+ @mkdir -p $(system_extimage_intermediates) && rm -rf $(system_extimage_intermediates)/system_ext_image_info.txt
+ $(call generate-image-prop-dictionary, $(system_extimage_intermediates)/system_ext_image_info.txt,system_ext, skip_fsck=true)
PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
- ./build/tools/releasetools/build_image.py \
- $(TARGET_OUT_PRODUCT_SERVICES) $(product_servicesimage_intermediates)/product_services_image_info.txt $(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET) $(TARGET_OUT)
- $(hide) $(call assert-max-image-size,$(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET),$(BOARD_PRODUCT_SERVICESIMAGE_PARTITION_SIZE))
+ $(BUILD_IMAGE) \
+ $(TARGET_OUT_SYSTEM_EXT) \
+ $(system_extimage_intermediates)/system_ext_image_info.txt \
+ $(INSTALLED_SYSTEM_EXTIMAGE_TARGET) \
+ $(TARGET_OUT)
+ $(call assert-max-image-size,$(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET),$(BOARD_PRODUCT_SERVICESIMAGE_PARTITION_SIZE))
endef
# We just build this directly to the install location.
-INSTALLED_PRODUCT_SERVICESIMAGE_TARGET := $(BUILT_PRODUCT_SERVICESIMAGE_TARGET)
-$(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_PRODUCT_SERVICESIMAGE_FILES) $(INSTALLED_FILES_FILE_PRODUCT_SERVICES) $(BUILD_IMAGE_SRCS)
- $(build-product_servicesimage-target)
+INSTALLED_SYSTEM_EXTIMAGE_TARGET := $(BUILT_SYSTEM_EXTIMAGE_TARGET)
+$(INSTALLED_SYSTEM_EXTIMAGE_TARGET): \
+ $(INTERNAL_USERIMAGES_DEPS) \
+ $(INTERNAL_SYSTEM_EXTIMAGE_FILES) \
+ $(INSTALLED_FILES_FILE_SYSTEM_EXT)
+ $(build-system_extimage-target)
-.PHONY: productservicesimage-nodeps psnod
-productservicesimage-nodeps psnod: | $(INTERNAL_USERIMAGES_DEPS)
- $(build-product_servicesimage-target)
+.PHONY: systemextimage-nodeps senod
+systemextimage-nodeps senod: | $(INTERNAL_USERIMAGES_DEPS)
+ $(build-system_extimage-target)
-sync: $(INTERNAL_PRODUCT_SERVICESIMAGE_FILES)
+sync: $(INTERNAL_SYSTEM_EXTIMAGE_FILES)
-else ifdef BOARD_PREBUILT_PRODUCT_SERVICESIMAGE
-INSTALLED_PRODUCT_SERVICESIMAGE_TARGET := $(PRODUCT_OUT)/product_services.img
-$(eval $(call copy-one-file,$(BOARD_PREBUILT_PRODUCT_SERVICESIMAGE),$(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET)))
+else ifdef BOARD_PREBUILT_SYSTEM_EXTIMAGE
+INSTALLED_SYSTEM_EXTIMAGE_TARGET := $(PRODUCT_OUT)/system_ext.img
+$(eval $(call copy-one-file,$(BOARD_PREBUILT_SYSTEM_EXTIMAGE),$(INSTALLED_SYSTEM_EXTIMAGE_TARGET)))
endif
# -----------------------------------------------------------------
@@ -3083,14 +3039,18 @@
@mkdir -p $(odmimage_intermediates) && rm -rf $(odmimage_intermediates)/odm_image_info.txt
$(call generate-userimage-prop-dictionary, $(odmimage_intermediates)/odm_image_info.txt, skip_fsck=true)
PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
- ./build/tools/releasetools/build_image.py \
- $(TARGET_OUT_ODM) $(odmimage_intermediates)/odm_image_info.txt $(INSTALLED_ODMIMAGE_TARGET) $(TARGET_OUT)
- $(hide) $(call assert-max-image-size,$(INSTALLED_ODMIMAGE_TARGET),$(BOARD_ODMIMAGE_PARTITION_SIZE))
+ $(BUILD_IMAGE) \
+ $(TARGET_OUT_ODM) $(odmimage_intermediates)/odm_image_info.txt \
+ $(INSTALLED_ODMIMAGE_TARGET) $(TARGET_OUT)
+ $(call assert-max-image-size,$(INSTALLED_ODMIMAGE_TARGET),$(BOARD_ODMIMAGE_PARTITION_SIZE))
endef
# We just build this directly to the install location.
INSTALLED_ODMIMAGE_TARGET := $(BUILT_ODMIMAGE_TARGET)
-$(INSTALLED_ODMIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_ODMIMAGE_FILES) $(INSTALLED_FILES_FILE_ODM) $(BUILD_IMAGE_SRCS)
+$(INSTALLED_ODMIMAGE_TARGET): \
+ $(INTERNAL_USERIMAGES_DEPS) \
+ $(INTERNAL_ODMIMAGE_FILES) \
+ $(INSTALLED_FILES_FILE_ODM)
$(build-odmimage-target)
.PHONY: odmimage-nodeps onod
@@ -3127,7 +3087,7 @@
# Returns a list of image targets corresponding to the given list of partitions. For example, it
# returns "$(INSTALLED_PRODUCTIMAGE_TARGET)" for "product", or "$(INSTALLED_SYSTEMIMAGE_TARGET)
# $(INSTALLED_VENDORIMAGE_TARGET)" for "system vendor".
-# (1): list of partitions like "system", "vendor" or "system product product_services".
+# (1): list of partitions like "system", "vendor" or "system product system_ext".
define images-for-partitions
$(strip $(foreach item,$(1),$(INSTALLED_$(call to-upper,$(item))IMAGE_TARGET)))
endef
@@ -3189,9 +3149,9 @@
--prop com.android.build.product.os_version:$(PLATFORM_VERSION) \
--prop com.android.build.product.security_patch:$(PLATFORM_SECURITY_PATCH)
-BOARD_AVB_PRODUCT_SERVICES_ADD_HASHTREE_FOOTER_ARGS += \
- --prop com.android.build.product_services.os_version:$(PLATFORM_VERSION) \
- --prop com.android.build.product_services.security_patch:$(PLATFORM_SECURITY_PATCH)
+BOARD_AVB_SYSTEM_EXT_ADD_HASHTREE_FOOTER_ARGS += \
+ --prop com.android.build.system_ext.os_version:$(PLATFORM_VERSION) \
+ --prop com.android.build.system_ext.security_patch:$(PLATFORM_SECURITY_PATCH)
BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS += \
--prop com.android.build.boot.os_version:$(PLATFORM_VERSION)
@@ -3224,7 +3184,7 @@
VENDOR_FOOTER_ARGS := BOARD_AVB_VENDOR_ADD_HASHTREE_FOOTER_ARGS
RECOVERY_FOOTER_ARGS := BOARD_AVB_RECOVERY_ADD_HASH_FOOTER_ARGS
PRODUCT_FOOTER_ARGS := BOARD_AVB_PRODUCT_ADD_HASHTREE_FOOTER_ARGS
-PRODUCT_SERVICES_FOOTER_ARGS := BOARD_AVB_PRODUCT_SERVICES_ADD_HASHTREE_FOOTER_ARGS
+SYSTEM_EXT_FOOTER_ARGS := BOARD_AVB_SYSTEM_EXT_ADD_HASHTREE_FOOTER_ARGS
ODM_FOOTER_ARGS := BOARD_AVB_ODM_ADD_HASHTREE_FOOTER_ARGS
# Helper function that checks and sets required build variables for an AVB chained partition.
@@ -3287,8 +3247,8 @@
$(eval $(call check-and-set-avb-args,product))
endif
-ifdef INSTALLED_PRODUCT_SERVICESIMAGE_TARGET
-$(eval $(call check-and-set-avb-args,product_services))
+ifdef INSTALLED_SYSTEM_EXTIMAGE_TARGET
+$(eval $(call check-and-set-avb-args,system_ext))
endif
ifdef INSTALLED_ODMIMAGE_TARGET
@@ -3361,9 +3321,9 @@
$(if $(BOARD_AVB_PRODUCT_KEY_PATH),\
$(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_PRODUCT_KEY_PATH) \
--output $(1)/product.avbpubkey)
- $(if $(BOARD_AVB_PRODUCT_SERVICES_KEY_PATH),\
- $(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_PRODUCT_SERVICES_KEY_PATH) \
- --output $(1)/product_services.avbpubkey)
+ $(if $(BOARD_AVB_SYSTEM_EXT_KEY_PATH),\
+ $(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_SYSTEM_EXT_KEY_PATH) \
+ --output $(1)/system_ext.avbpubkey)
$(if $(BOARD_AVB_ODM_KEY_PATH),\
$(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_ODM_KEY_PATH) \
--output $(1)/odm.avbpubkey)
@@ -3384,8 +3344,8 @@
# Builds a chained VBMeta image. This VBMeta image will contain the descriptors for the partitions
# specified in BOARD_AVB_VBMETA_<NAME>. The built VBMeta image will be included into the top-level
# vbmeta image as a chained partition. For example, if a target defines `BOARD_AVB_VBMETA_SYSTEM
-# := system product_services`, `vbmeta_system.img` will be created that includes the descriptors
-# for `system.img` and `product_services.img`. `vbmeta_system.img` itself will be included into
+# := system system_ext`, `vbmeta_system.img` will be created that includes the descriptors for
+# `system.img` and `system_ext.img`. `vbmeta_system.img` itself will be included into
# `vbmeta.img` as a chained partition.
# $(1): VBMeta image name, such as "vbmeta_system", "vbmeta_vendor" etc.
# $(2): Output filename.
@@ -3431,6 +3391,7 @@
$(hide) rm -rf $(AVB_CHAIN_KEY_DIR)
endef
+ifdef BUILDING_VBMETA_IMAGE
INSTALLED_VBMETAIMAGE_TARGET := $(BUILT_VBMETAIMAGE_TARGET)
$(INSTALLED_VBMETAIMAGE_TARGET): PRIVATE_AVB_VBMETA_SIGNING_ARGS := \
--algorithm $(BOARD_AVB_ALGORITHM) --key $(BOARD_AVB_KEY_PATH)
@@ -3441,7 +3402,7 @@
$(INSTALLED_SYSTEMIMAGE_TARGET) \
$(INSTALLED_VENDORIMAGE_TARGET) \
$(INSTALLED_PRODUCTIMAGE_TARGET) \
- $(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET) \
+ $(INSTALLED_SYSTEM_EXTIMAGE_TARGET) \
$(INSTALLED_ODMIMAGE_TARGET) \
$(INSTALLED_DTBOIMAGE_TARGET) \
$(INSTALLED_RECOVERYIMAGE_TARGET) \
@@ -3455,6 +3416,7 @@
.PHONY: vbmetaimage-nodeps
vbmetaimage-nodeps:
$(build-vbmetaimage-target)
+endif # BUILDING_VBMETA_IMAGE
endif # BOARD_AVB_ENABLE
@@ -3466,11 +3428,10 @@
ifeq (true,$(PRODUCT_BUILD_SUPER_PARTITION))
-# (1): list of items like "system", "vendor", "product", "product_services"
+# (1): list of items like "system", "vendor", "product", "system_ext"
# return: map each item into a command ( wrapped in $$() ) that reads the size
define read-size-of-partitions
-$(foreach image,$(call images-for-partitions,$(1)),$$( \
- build/make/tools/releasetools/sparse_img.py --get_partition_size $(image)))
+$(foreach image,$(call images-for-partitions,$(1)),$$($(SPARSE_IMG) --get_partition_size $(image)))
endef
# round result to BOARD_SUPER_PARTITION_ALIGNMENT
@@ -3507,7 +3468,7 @@
# Add image dependencies so that generated_*_image_info.txt are written before checking.
$(check_all_partition_sizes_file): \
- build/make/tools/releasetools/sparse_img.py \
+ $(SPARSE_IMG) \
$(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
ifeq ($(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS),true)
@@ -3656,7 +3617,8 @@
ifeq ($(build_otatools_package),true)
INTERNAL_OTATOOLS_MODULES := \
- aapt \
+ aapt2 \
+ add_img_to_target_files \
append2simg \
avbtool \
blk_alloc_to_base_fs \
@@ -3664,9 +3626,14 @@
brillo_update_payload \
brotli \
bsdiff \
+ build_image \
+ build_super_image \
build_verity_metadata \
build_verity_tree \
care_map_generator \
+ check_ota_package_signature \
+ check_target_files_signatures \
+ check_target_files_vintf \
checkvintf \
delta_generator \
e2fsck \
@@ -3676,12 +3643,15 @@
fs_config \
generate_verity_key \
img2simg \
+ img_from_target_files \
imgdiff \
libconscrypt_openjdk_jni \
lpmake \
lpunpack \
make_f2fs \
+ merge_target_files \
minigzip \
+ mk_combined_img \
mkbootfs \
mkbootimg \
mke2fs \
@@ -3690,12 +3660,18 @@
mksquashfs \
mksquashfsimage.sh \
mkuserimg_mke2fs \
+ ota_from_target_files \
sefcontext_compile \
+ sgdisk \
shflags \
+ sign_apex \
+ sign_target_files_apks \
signapk \
simg2img \
sload_f2fs \
tune2fs \
+ update_host_simulator \
+ validate_target_files \
verity_signer \
verity_verifier \
zipalign \
@@ -3735,10 +3711,6 @@
$(sort $(shell find external/avb/test/data -type f -name "testkey_*.pem" -o \
-name "atx_metadata.bin"))
endif
-ifneq (,$(wildcard system/update_engine))
-INTERNAL_OTATOOLS_PACKAGE_FILES += \
- $(sort $(shell find system/update_engine/scripts -name "*.pyc" -prune -o -type f -print))
-endif
ifeq (true,$(PRODUCT_SUPPORTS_VBOOT))
INTERNAL_OTATOOLS_PACKAGE_FILES += \
$(sort $(shell find external/vboot_reference/tests/devkeys -type f))
@@ -3753,13 +3725,13 @@
$(BUILT_OTATOOLS_PACKAGE): PRIVATE_OTATOOLS_PACKAGE_FILES := $(INTERNAL_OTATOOLS_PACKAGE_FILES)
$(BUILT_OTATOOLS_PACKAGE): PRIVATE_OTATOOLS_RELEASETOOLS := $(INTERNAL_OTATOOLS_RELEASETOOLS)
$(BUILT_OTATOOLS_PACKAGE): $(INTERNAL_OTATOOLS_PACKAGE_FILES) $(INTERNAL_OTATOOLS_RELEASETOOLS)
-$(BUILT_OTATOOLS_PACKAGE): $(SOONG_ZIP)
+$(BUILT_OTATOOLS_PACKAGE): $(SOONG_ZIP) $(ZIP2ZIP)
@echo "Package OTA tools: $@"
rm -rf $@ $(PRIVATE_ZIP_ROOT)
mkdir -p $(dir $@)
$(call copy-files-with-structure,$(PRIVATE_OTATOOLS_PACKAGE_FILES),$(HOST_OUT)/,$(PRIVATE_ZIP_ROOT))
$(call copy-files-with-structure,$(PRIVATE_OTATOOLS_RELEASETOOLS),build/make/tools/,$(PRIVATE_ZIP_ROOT))
- cp $(SOONG_ZIP) $(PRIVATE_ZIP_ROOT)/bin/
+ cp $(SOONG_ZIP) $(ZIP2ZIP) $(PRIVATE_ZIP_ROOT)/bin/
$(SOONG_ZIP) -o $@ -C $(PRIVATE_ZIP_ROOT) -D $(PRIVATE_ZIP_ROOT)
.PHONY: otatools-package
@@ -3768,6 +3740,146 @@
endif # build_otatools_package
# -----------------------------------------------------------------
+# misc_info.txt
+
+INSTALLED_MISC_INFO_TARGET := $(PRODUCT_OUT)/misc_info.txt
+
+ifeq ($(TARGET_RELEASETOOLS_EXTENSIONS),)
+# default to common dir for device vendor
+tool_extensions := $(TARGET_DEVICE_DIR)/../common
+else
+tool_extensions := $(TARGET_RELEASETOOLS_EXTENSIONS)
+endif
+.KATI_READONLY := tool_extensions
+
+$(INSTALLED_MISC_INFO_TARGET):
+ rm -f $@
+ $(call pretty,"Target misc_info.txt: $@")
+ $(hide) echo "recovery_api_version=$(RECOVERY_API_VERSION)" >> $@
+ $(hide) echo "fstab_version=$(RECOVERY_FSTAB_VERSION)" >> $@
+ifdef BOARD_FLASH_BLOCK_SIZE
+ $(hide) echo "blocksize=$(BOARD_FLASH_BLOCK_SIZE)" >> $@
+endif
+ifdef BOARD_BOOTIMAGE_PARTITION_SIZE
+ $(hide) echo "boot_size=$(BOARD_BOOTIMAGE_PARTITION_SIZE)" >> $@
+endif
+ifeq ($(INSTALLED_BOOTIMAGE_TARGET),)
+ $(hide) echo "no_boot=true" >> $@
+endif
+ifeq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
+ $(hide) echo "no_recovery=true" >> $@
+endif
+ifdef BOARD_INCLUDE_RECOVERY_DTBO
+ $(hide) echo "include_recovery_dtbo=true" >> $@
+endif
+ifdef BOARD_INCLUDE_RECOVERY_ACPIO
+ $(hide) echo "include_recovery_acpio=true" >> $@
+endif
+ifdef BOARD_RECOVERYIMAGE_PARTITION_SIZE
+ $(hide) echo "recovery_size=$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)" >> $@
+endif
+ifdef TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS
+ @# TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS can be empty to indicate that nothing but defaults should be used.
+ $(hide) echo "recovery_mount_options=$(TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS)" >> $@
+else
+ $(hide) echo "recovery_mount_options=$(DEFAULT_TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS)" >> $@
+endif
+ $(hide) echo "tool_extensions=$(tool_extensions)" >> $@
+ $(hide) echo "default_system_dev_certificate=$(DEFAULT_SYSTEM_DEV_CERTIFICATE)" >> $@
+ifdef PRODUCT_EXTRA_RECOVERY_KEYS
+ $(hide) echo "extra_recovery_keys=$(PRODUCT_EXTRA_RECOVERY_KEYS)" >> $@
+endif
+ $(hide) echo 'mkbootimg_args=$(BOARD_MKBOOTIMG_ARGS)' >> $@
+ $(hide) echo 'mkbootimg_version_args=$(INTERNAL_MKBOOTIMG_VERSION_ARGS)' >> $@
+ $(hide) echo "multistage_support=1" >> $@
+ $(hide) echo "blockimgdiff_versions=3,4" >> $@
+ifeq ($(PRODUCT_BUILD_GENERIC_OTA_PACKAGE),true)
+ $(hide) echo "build_generic_ota_package=true" >> $@
+endif
+ifneq ($(OEM_THUMBPRINT_PROPERTIES),)
+ # OTA scripts are only interested in fingerprint related properties
+ $(hide) echo "oem_fingerprint_properties=$(OEM_THUMBPRINT_PROPERTIES)" >> $@
+endif
+ifneq (,$(filter address, $(SANITIZE_TARGET)))
+ # We need to create userdata.img with real data because the instrumented libraries are in userdata.img.
+ $(hide) echo "userdata_img_with_data=true" >> $@
+endif
+ifeq ($(BOARD_USES_FULL_RECOVERY_IMAGE),true)
+ $(hide) echo "full_recovery_image=true" >> $@
+endif
+ifeq ($(BOARD_AVB_ENABLE),true)
+ $(hide) echo "avb_enable=true" >> $@
+ $(hide) echo "avb_vbmeta_key_path=$(BOARD_AVB_KEY_PATH)" >> $@
+ $(hide) echo "avb_vbmeta_algorithm=$(BOARD_AVB_ALGORITHM)" >> $@
+ $(hide) echo "avb_vbmeta_args=$(BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS)" >> $@
+ $(hide) echo "avb_boot_add_hash_footer_args=$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)" >> $@
+ifdef BOARD_AVB_BOOT_KEY_PATH
+ $(hide) echo "avb_boot_key_path=$(BOARD_AVB_BOOT_KEY_PATH)" >> $@
+ $(hide) echo "avb_boot_algorithm=$(BOARD_AVB_BOOT_ALGORITHM)" >> $@
+ $(hide) echo "avb_boot_rollback_index_location=$(BOARD_AVB_BOOT_ROLLBACK_INDEX_LOCATION)" >> $@
+endif # BOARD_AVB_BOOT_KEY_PATH
+ $(hide) echo "avb_recovery_add_hash_footer_args=$(BOARD_AVB_RECOVERY_ADD_HASH_FOOTER_ARGS)" >> $@
+ifdef BOARD_AVB_RECOVERY_KEY_PATH
+ $(hide) echo "avb_recovery_key_path=$(BOARD_AVB_RECOVERY_KEY_PATH)" >> $@
+ $(hide) echo "avb_recovery_algorithm=$(BOARD_AVB_RECOVERY_ALGORITHM)" >> $@
+ $(hide) echo "avb_recovery_rollback_index_location=$(BOARD_AVB_RECOVERY_ROLLBACK_INDEX_LOCATION)" >> $@
+endif # BOARD_AVB_RECOVERY_KEY_PATH
+ifneq (,$(strip $(BOARD_AVB_VBMETA_SYSTEM)))
+ $(hide) echo "avb_vbmeta_system=$(BOARD_AVB_VBMETA_SYSTEM)" >> $@
+ $(hide) echo "avb_vbmeta_system_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $@
+ $(hide) echo "avb_vbmeta_system_key_path=$(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH)" >> $@
+ $(hide) echo "avb_vbmeta_system_algorithm=$(BOARD_AVB_VBMETA_SYSTEM_ALGORITHM)" >> $@
+ $(hide) echo "avb_vbmeta_system_rollback_index_location=$(BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX_LOCATION)" >> $@
+endif # BOARD_AVB_VBMETA_SYSTEM
+ifneq (,$(strip $(BOARD_AVB_VBMETA_VENDOR)))
+ $(hide) echo "avb_vbmeta_vendor=$(BOARD_AVB_VBMETA_VENDOR)" >> $@
+ $(hide) echo "avb_vbmeta_vendor_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $@
+ $(hide) echo "avb_vbmeta_vendor_key_path=$(BOARD_AVB_VBMETA_VENDOR_KEY_PATH)" >> $@
+ $(hide) echo "avb_vbmeta_vendor_algorithm=$(BOARD_AVB_VBMETA_VENDOR_ALGORITHM)" >> $@
+ $(hide) echo "avb_vbmeta_vendor_rollback_index_location=$(BOARD_AVB_VBMETA_VENDOR_ROLLBACK_INDEX_LOCATION)" >> $@
+endif # BOARD_AVB_VBMETA_VENDOR_KEY_PATH
+endif # BOARD_AVB_ENABLE
+ifdef BOARD_BPT_INPUT_FILES
+ $(hide) echo "board_bpt_enable=true" >> $@
+ $(hide) echo "board_bpt_make_table_args=$(BOARD_BPT_MAKE_TABLE_ARGS)" >> $@
+ $(hide) echo "board_bpt_input_files=$(BOARD_BPT_INPUT_FILES)" >> $@
+endif
+ifdef BOARD_BPT_DISK_SIZE
+ $(hide) echo "board_bpt_disk_size=$(BOARD_BPT_DISK_SIZE)" >> $@
+endif
+ $(call generate-userimage-prop-dictionary, $@)
+ifeq ($(AB_OTA_UPDATER),true)
+ @# Include the build type in META/misc_info.txt so the server can easily differentiate production builds.
+ $(hide) echo "build_type=$(TARGET_BUILD_VARIANT)" >> $@
+ $(hide) echo "ab_update=true" >> $@
+endif
+ifdef BOARD_PREBUILT_DTBOIMAGE
+ $(hide) echo "has_dtbo=true" >> $@
+ifeq ($(BOARD_AVB_ENABLE),true)
+ $(hide) echo "dtbo_size=$(BOARD_DTBOIMG_PARTITION_SIZE)" >> $@
+ $(hide) echo "avb_dtbo_add_hash_footer_args=$(BOARD_AVB_DTBO_ADD_HASH_FOOTER_ARGS)" >> $@
+ifdef BOARD_AVB_DTBO_KEY_PATH
+ $(hide) echo "avb_dtbo_key_path=$(BOARD_AVB_DTBO_KEY_PATH)" >> $@
+ $(hide) echo "avb_dtbo_algorithm=$(BOARD_AVB_DTBO_ALGORITHM)" >> $@
+ $(hide) echo "avb_dtbo_rollback_index_location=$(BOARD_AVB_DTBO_ROLLBACK_INDEX_LOCATION)" >> $@
+endif # BOARD_AVB_DTBO_KEY_PATH
+endif # BOARD_AVB_ENABLE
+endif # BOARD_PREBUILT_DTBOIMAGE
+ $(call dump-dynamic-partitions-info,$@)
+ @# VINTF checks
+ifeq ($(PRODUCT_ENFORCE_VINTF_MANIFEST),true)
+ $(hide) echo "vintf_enforce=true" >> $@
+endif
+ifdef ODM_MANIFEST_SKUS
+ $(hide) echo "vintf_odm_manifest_skus=$(ODM_MANIFEST_SKUS)" >> $@
+endif
+
+.PHONY: misc_info
+misc_info: $(INSTALLED_MISC_INFO_TARGET)
+
+droidcore: $(INSTALLED_MISC_INFO_TARGET)
+
+# -----------------------------------------------------------------
# A zip of the directories that map to the target filesystem.
# This zip can be used to create an OTA package or filesystem image
# as a post-build step.
@@ -3804,17 +3916,7 @@
$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_OTA_TOOLS := $(built_ota_tools)
-$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_API_VERSION := $(RECOVERY_API_VERSION)
-$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_FSTAB_VERSION := $(RECOVERY_FSTAB_VERSION)
-
-ifeq ($(TARGET_RELEASETOOLS_EXTENSIONS),)
-# default to common dir for device vendor
-tool_extensions := $(TARGET_DEVICE_DIR)/../common
-else
-tool_extensions := $(TARGET_RELEASETOOLS_EXTENSIONS)
-endif
tool_extension := $(wildcard $(tool_extensions)/releasetools.py)
-$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_TOOL_EXTENSIONS := $(tool_extensions)
$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_TOOL_EXTENSION := $(tool_extension)
ifeq ($(AB_OTA_UPDATER),true)
@@ -3884,6 +3986,11 @@
$(BUILT_TARGET_FILES_PACKAGE): $(FULL_SYSTEMIMAGE_DEPS)
endif
+ifeq ($(BUILD_QEMU_IMAGES),true)
+MK_VBMETA_BOOT_KERNEL_CMDLINE_SH := device/generic/goldfish/tools/mk_vbmeta_boot_params.sh
+$(BUILT_TARGET_FILES_PACKAGE): $(MK_VBMETA_BOOT_KERNEL_CMDLINE_SH)
+endif
+
# Depending on the various images guarantees that the underlying
# directories are up-to-date.
$(BUILT_TARGET_FILES_PACKAGE): \
@@ -3895,7 +4002,7 @@
$(INSTALLED_CACHEIMAGE_TARGET) \
$(INSTALLED_VENDORIMAGE_TARGET) \
$(INSTALLED_PRODUCTIMAGE_TARGET) \
- $(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET) \
+ $(INSTALLED_SYSTEM_EXTIMAGE_TARGET) \
$(INSTALLED_VBMETAIMAGE_TARGET) \
$(INSTALLED_ODMIMAGE_TARGET) \
$(INSTALLED_DTBOIMAGE_TARGET) \
@@ -3910,27 +4017,28 @@
$(PRODUCT_SYSTEM_BASE_FS_PATH) \
$(PRODUCT_VENDOR_BASE_FS_PATH) \
$(PRODUCT_PRODUCT_BASE_FS_PATH) \
- $(PRODUCT_PRODUCT_SERVICES_BASE_FS_PATH) \
+ $(PRODUCT_SYSTEM_EXT_BASE_FS_PATH) \
$(PRODUCT_ODM_BASE_FS_PATH) \
$(LPMAKE) \
$(SELINUX_FC) \
+ $(INSTALLED_MISC_INFO_TARGET) \
$(APKCERTS_FILE) \
$(SOONG_APEX_KEYS_FILE) \
$(SOONG_ZIP) \
$(HOST_OUT_EXECUTABLES)/fs_config \
- $(HOST_OUT_EXECUTABLES)/imgdiff \
- $(HOST_OUT_EXECUTABLES)/bsdiff \
- $(HOST_OUT_EXECUTABLES)/care_map_generator \
- $(BUILD_IMAGE_SRCS) \
+ $(ADD_IMG_TO_TARGET_FILES) \
+ $(MAKE_RECOVERY_PATCH) \
$(BUILT_ASSEMBLED_FRAMEWORK_MANIFEST) \
$(BUILT_ASSEMBLED_VENDOR_MANIFEST) \
$(BUILT_SYSTEM_MATRIX) \
$(BUILT_VENDOR_MATRIX) \
+ $(BUILT_KERNEL_CONFIGS_FILE) \
+ $(BUILT_KERNEL_VERSION_FILE) \
| $(ACP)
@echo "Package target files: $@"
$(call create-system-vendor-symlink)
$(call create-system-product-symlink)
- $(call create-system-product_services-symlink)
+ $(call create-system-system_ext-symlink)
$(call create-vendor-odm-symlink)
$(hide) rm -rf $@ $@.list $(zip_root)
$(hide) mkdir -p $(dir $@) $(zip_root)
@@ -4021,10 +4129,10 @@
$(hide) $(call package_files-copy-root, \
$(TARGET_OUT_PRODUCT),$(zip_root)/PRODUCT)
endif
-ifdef BUILDING_PRODUCT_SERVICES_IMAGE
- @# Contents of the product_services image
+ifdef BUILDING_SYSTEM_EXT_IMAGE
+ @# Contents of the system_ext image
$(hide) $(call package_files-copy-root, \
- $(TARGET_OUT_PRODUCT_SERVICES),$(zip_root)/PRODUCT_SERVICES)
+ $(TARGET_OUT_SYSTEM_EXT),$(zip_root)/SYSTEM_EXT)
endif
ifdef BUILDING_ODM_IMAGE
@# Contents of the odm image
@@ -4055,51 +4163,7 @@
endif
$(hide) echo "$(PRODUCT_OTA_PUBLIC_KEYS)" > $(zip_root)/META/otakeys.txt
$(hide) cp $(SELINUX_FC) $(zip_root)/META/file_contexts.bin
- $(hide) echo "recovery_api_version=$(PRIVATE_RECOVERY_API_VERSION)" > $(zip_root)/META/misc_info.txt
- $(hide) echo "fstab_version=$(PRIVATE_RECOVERY_FSTAB_VERSION)" >> $(zip_root)/META/misc_info.txt
-ifdef BOARD_FLASH_BLOCK_SIZE
- $(hide) echo "blocksize=$(BOARD_FLASH_BLOCK_SIZE)" >> $(zip_root)/META/misc_info.txt
-endif
-ifdef BOARD_BOOTIMAGE_PARTITION_SIZE
- $(hide) echo "boot_size=$(BOARD_BOOTIMAGE_PARTITION_SIZE)" >> $(zip_root)/META/misc_info.txt
-endif
-ifeq ($(INSTALLED_BOOTIMAGE_TARGET),)
- $(hide) echo "no_boot=true" >> $(zip_root)/META/misc_info.txt
-endif
-ifeq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
- $(hide) echo "no_recovery=true" >> $(zip_root)/META/misc_info.txt
-endif
-ifdef BOARD_INCLUDE_RECOVERY_DTBO
- $(hide) echo "include_recovery_dtbo=true" >> $(zip_root)/META/misc_info.txt
-endif
-ifdef BOARD_INCLUDE_RECOVERY_ACPIO
- $(hide) echo "include_recovery_acpio=true" >> $(zip_root)/META/misc_info.txt
-endif
-ifdef BOARD_RECOVERYIMAGE_PARTITION_SIZE
- $(hide) echo "recovery_size=$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)" >> $(zip_root)/META/misc_info.txt
-endif
-ifdef TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS
- @# TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS can be empty to indicate that nothing but defaults should be used.
- $(hide) echo "recovery_mount_options=$(TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS)" >> $(zip_root)/META/misc_info.txt
-else
- $(hide) echo "recovery_mount_options=$(DEFAULT_TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS)" >> $(zip_root)/META/misc_info.txt
-endif
- $(hide) echo "tool_extensions=$(PRIVATE_TOOL_EXTENSIONS)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "default_system_dev_certificate=$(DEFAULT_SYSTEM_DEV_CERTIFICATE)" >> $(zip_root)/META/misc_info.txt
-ifdef PRODUCT_EXTRA_RECOVERY_KEYS
- $(hide) echo "extra_recovery_keys=$(PRODUCT_EXTRA_RECOVERY_KEYS)" >> $(zip_root)/META/misc_info.txt
-endif
- $(hide) echo 'mkbootimg_args=$(BOARD_MKBOOTIMG_ARGS)' >> $(zip_root)/META/misc_info.txt
- $(hide) echo 'mkbootimg_version_args=$(INTERNAL_MKBOOTIMG_VERSION_ARGS)' >> $(zip_root)/META/misc_info.txt
- $(hide) echo "multistage_support=1" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "blockimgdiff_versions=3,4" >> $(zip_root)/META/misc_info.txt
-ifeq ($(PRODUCT_BUILD_GENERIC_OTA_PACKAGE),true)
- $(hide) echo "build_generic_ota_package=true" >> $(zip_root)/META/misc_info.txt
-endif
-ifneq ($(OEM_THUMBPRINT_PROPERTIES),)
- # OTA scripts are only interested in fingerprint related properties
- $(hide) echo "oem_fingerprint_properties=$(OEM_THUMBPRINT_PROPERTIES)" >> $(zip_root)/META/misc_info.txt
-endif
+ $(hide) cp $(INSTALLED_MISC_INFO_TARGET) $(zip_root)/META/misc_info.txt
ifneq ($(PRODUCT_SYSTEM_BASE_FS_PATH),)
$(hide) cp $(PRODUCT_SYSTEM_BASE_FS_PATH) \
$(zip_root)/META/$(notdir $(PRODUCT_SYSTEM_BASE_FS_PATH))
@@ -4112,66 +4176,18 @@
$(hide) cp $(PRODUCT_PRODUCT_BASE_FS_PATH) \
$(zip_root)/META/$(notdir $(PRODUCT_PRODUCT_BASE_FS_PATH))
endif
-ifneq ($(PRODUCT_PRODUCT_SERVICES_BASE_FS_PATH),)
- $(hide) cp $(PRODUCT_PRODUCT_SERVICES_BASE_FS_PATH) \
- $(zip_root)/META/$(notdir $(PRODUCT_PRODUCT_SERVICES_BASE_FS_PATH))
+ifneq ($(PRODUCT_SYSTEM_EXT_BASE_FS_PATH),)
+ $(hide) cp $(PRODUCT_SYSTEM_EXT_BASE_FS_PATH) \
+ $(zip_root)/META/$(notdir $(PRODUCT_SYSTEM_EXT_BASE_FS_PATH))
endif
ifneq ($(PRODUCT_ODM_BASE_FS_PATH),)
$(hide) cp $(PRODUCT_ODM_BASE_FS_PATH) \
$(zip_root)/META/$(notdir $(PRODUCT_ODM_BASE_FS_PATH))
endif
-ifneq (,$(filter address, $(SANITIZE_TARGET)))
- # We need to create userdata.img with real data because the instrumented libraries are in userdata.img.
- $(hide) echo "userdata_img_with_data=true" >> $(zip_root)/META/misc_info.txt
-endif
-ifeq ($(BOARD_USES_FULL_RECOVERY_IMAGE),true)
- $(hide) echo "full_recovery_image=true" >> $(zip_root)/META/misc_info.txt
-endif
-ifeq ($(BOARD_AVB_ENABLE),true)
- $(hide) echo "avb_enable=true" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_key_path=$(BOARD_AVB_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_algorithm=$(BOARD_AVB_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_args=$(BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_boot_add_hash_footer_args=$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)" >> $(zip_root)/META/misc_info.txt
-ifdef BOARD_AVB_BOOT_KEY_PATH
- $(hide) echo "avb_boot_key_path=$(BOARD_AVB_BOOT_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_boot_algorithm=$(BOARD_AVB_BOOT_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_boot_rollback_index_location=$(BOARD_AVB_BOOT_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
-endif # BOARD_AVB_BOOT_KEY_PATH
- $(hide) echo "avb_recovery_add_hash_footer_args=$(BOARD_AVB_RECOVERY_ADD_HASH_FOOTER_ARGS)" >> $(zip_root)/META/misc_info.txt
-ifdef BOARD_AVB_RECOVERY_KEY_PATH
- $(hide) echo "avb_recovery_key_path=$(BOARD_AVB_RECOVERY_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_recovery_algorithm=$(BOARD_AVB_RECOVERY_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_recovery_rollback_index_location=$(BOARD_AVB_RECOVERY_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
-endif # BOARD_AVB_RECOVERY_KEY_PATH
-ifneq (,$(strip $(BOARD_AVB_VBMETA_SYSTEM)))
- $(hide) echo "avb_vbmeta_system=$(BOARD_AVB_VBMETA_SYSTEM)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_system_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_system_key_path=$(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_system_algorithm=$(BOARD_AVB_VBMETA_SYSTEM_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_system_rollback_index_location=$(BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
-endif # BOARD_AVB_VBMETA_SYSTEM
-ifneq (,$(strip $(BOARD_AVB_VBMETA_VENDOR)))
- $(hide) echo "avb_vbmeta_vendor=$(BOARD_AVB_VBMETA_VENDOR)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_vendor_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_vendor_key_path=$(BOARD_AVB_VBMETA_VENDOR_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_vendor_algorithm=$(BOARD_AVB_VBMETA_VENDOR_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_vendor_rollback_index_location=$(BOARD_AVB_VBMETA_VENDOR_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
-endif # BOARD_AVB_VBMETA_VENDOR_KEY_PATH
-endif # BOARD_AVB_ENABLE
-ifdef BOARD_BPT_INPUT_FILES
- $(hide) echo "board_bpt_enable=true" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "board_bpt_make_table_args=$(BOARD_BPT_MAKE_TABLE_ARGS)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "board_bpt_input_files=$(BOARD_BPT_INPUT_FILES)" >> $(zip_root)/META/misc_info.txt
-endif
-ifdef BOARD_BPT_DISK_SIZE
- $(hide) echo "board_bpt_disk_size=$(BOARD_BPT_DISK_SIZE)" >> $(zip_root)/META/misc_info.txt
-endif
- $(call generate-userimage-prop-dictionary, $(zip_root)/META/misc_info.txt)
ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
ifdef BUILDING_SYSTEM_IMAGE
$(hide) PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH MKBOOTIMG=$(MKBOOTIMG) \
- build/make/tools/releasetools/make_recovery_patch $(zip_root) $(zip_root)
+ $(MAKE_RECOVERY_PATCH) $(zip_root) $(zip_root)
endif # BUILDING_SYSTEM_IMAGE
endif
ifeq ($(AB_OTA_UPDATER),true)
@@ -4183,9 +4199,6 @@
$(hide) for conf in $(AB_OTA_POSTINSTALL_CONFIG); do \
echo "$${conf}" >> $(zip_root)/META/postinstall_config.txt; \
done
- @# Include the build type in META/misc_info.txt so the server can easily differentiate production builds.
- $(hide) echo "build_type=$(TARGET_BUILD_VARIANT)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "ab_update=true" >> $(zip_root)/META/misc_info.txt
ifdef OSRELEASED_DIRECTORY
$(hide) cp $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_id $(zip_root)/META/product_id.txt
$(hide) cp $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_version $(zip_root)/META/product_version.txt
@@ -4204,9 +4217,9 @@
$(hide) mkdir -p $(zip_root)/IMAGES
$(hide) cp $(INSTALLED_PRODUCTIMAGE_TARGET) $(zip_root)/IMAGES/
endif
-ifdef BOARD_PREBUILT_PRODUCT_SERVICESIMAGE
+ifdef BOARD_PREBUILT_SYSTEM_EXTIMAGE
$(hide) mkdir -p $(zip_root)/IMAGES
- $(hide) cp $(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET) $(zip_root)/IMAGES/
+ $(hide) cp $(INSTALLED_SYSTEM_EXTIMAGE_TARGET) $(zip_root)/IMAGES/
endif
ifdef BOARD_PREBUILT_BOOTIMAGE
$(hide) mkdir -p $(zip_root)/IMAGES
@@ -4219,19 +4232,7 @@
ifdef BOARD_PREBUILT_DTBOIMAGE
$(hide) mkdir -p $(zip_root)/PREBUILT_IMAGES
$(hide) cp $(INSTALLED_DTBOIMAGE_TARGET) $(zip_root)/PREBUILT_IMAGES/
- $(hide) echo "has_dtbo=true" >> $(zip_root)/META/misc_info.txt
-ifeq ($(BOARD_AVB_ENABLE),true)
- $(hide) echo "dtbo_size=$(BOARD_DTBOIMG_PARTITION_SIZE)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_dtbo_add_hash_footer_args=$(BOARD_AVB_DTBO_ADD_HASH_FOOTER_ARGS)" >> $(zip_root)/META/misc_info.txt
-ifdef BOARD_AVB_DTBO_KEY_PATH
- $(hide) echo "avb_dtbo_key_path=$(BOARD_AVB_DTBO_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_dtbo_algorithm=$(BOARD_AVB_DTBO_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_dtbo_rollback_index_location=$(BOARD_AVB_DTBO_ROLLBACK_INDEX_LOCATION)" \
- >> $(zip_root)/META/misc_info.txt
-endif # BOARD_AVB_DTBO_KEY_PATH
-endif # BOARD_AVB_ENABLE
endif # BOARD_PREBUILT_DTBOIMAGE
- $(call dump-dynamic-partitions-info,$(zip_root)/META/misc_info.txt)
@# The radio images in BOARD_PACK_RADIOIMAGES will be additionally copied from RADIO/ into
@# IMAGES/, which then will be added into <product>-img.zip. Such images must be listed in
@# INSTALLED_RADIOIMAGE_TARGET.
@@ -4248,8 +4249,8 @@
ifdef BUILDING_PRODUCT_IMAGE
$(hide) $(call fs_config,$(zip_root)/PRODUCT,product/) > $(zip_root)/META/product_filesystem_config.txt
endif
-ifdef BUILDING_PRODUCT_SERVICES_IMAGE
- $(hide) $(call fs_config,$(zip_root)/PRODUCT_SERVICES,product_services/) > $(zip_root)/META/product_services_filesystem_config.txt
+ifdef BUILDING_SYSTEM_EXT_IMAGE
+ $(hide) $(call fs_config,$(zip_root)/SYSTEM_EXT,system_ext/) > $(zip_root)/META/system_ext_filesystem_config.txt
endif
ifdef BUILDING_ODM_IMAGE
$(hide) $(call fs_config,$(zip_root)/ODM,odm/) > $(zip_root)/META/odm_filesystem_config.txt
@@ -4281,6 +4282,12 @@
ifdef BUILT_VENDOR_MATRIX
$(hide) cp $(BUILT_VENDOR_MATRIX) $(zip_root)/META/vendor_matrix.xml
endif
+ifdef BUILT_KERNEL_CONFIGS_FILE
+ $(hide) cp $(BUILT_KERNEL_CONFIGS_FILE) $(zip_root)/META/kernel_configs.txt
+endif
+ifdef BUILT_KERNEL_VERSION_FILE
+ $(hide) cp $(BUILT_KERNEL_VERSION_FILE) $(zip_root)/META/kernel_version.txt
+endif
ifneq ($(BOARD_SUPER_PARTITION_GROUPS),)
$(hide) echo "super_partition_groups=$(BOARD_SUPER_PARTITION_GROUPS)" > $(zip_root)/META/dynamic_partitions_info.txt
@# Remove 'vendor' from the group partition list if the image is not available. This should only
@@ -4295,7 +4302,11 @@
endif # BOARD_SUPER_PARTITION_GROUPS
@# TODO(b/134525174): Remove `-r` after addressing the issue with recovery patch generation.
$(hide) PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH MKBOOTIMG=$(MKBOOTIMG) \
- build/make/tools/releasetools/add_img_to_target_files -a -r -v -p $(HOST_OUT) $(zip_root)
+ $(ADD_IMG_TO_TARGET_FILES) -a -r -v -p $(HOST_OUT) $(zip_root)
+ifeq ($(BUILD_QEMU_IMAGES),true)
+ $(hide) AVBTOOL=$(AVBTOOL) $(MK_VBMETA_BOOT_KERNEL_CMDLINE_SH) $(zip_root)/IMAGES/vbmeta.img \
+ $(zip_root)/IMAGES/system.img $(zip_root)/IMAGES/VerifiedBootParams.textproto
+endif
@# Zip everything up, preserving symlinks and placing META/ files first to
@# help early validation of the .zip file while uploading it.
$(hide) find $(zip_root)/META | sort >$@.list
@@ -4326,13 +4337,13 @@
# $(2): additional args
define build-ota-package-target
PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
- build/make/tools/releasetools/ota_from_target_files \
- --verbose \
- --extracted_input_target_files $(patsubst %.zip,%,$(BUILT_TARGET_FILES_PACKAGE)) \
- --path $(HOST_OUT) \
- $(if $(OEM_OTA_CONFIG), --oem_settings $(OEM_OTA_CONFIG)) \
- $(2) \
- $(BUILT_TARGET_FILES_PACKAGE) $(1)
+ $(OTA_FROM_TARGET_FILES) \
+ --verbose \
+ --extracted_input_target_files $(patsubst %.zip,%,$(BUILT_TARGET_FILES_PACKAGE)) \
+ --path $(HOST_OUT) \
+ $(if $(OEM_OTA_CONFIG), --oem_settings $(OEM_OTA_CONFIG)) \
+ $(2) \
+ $(BUILT_TARGET_FILES_PACKAGE) $(1)
endef
name := $(TARGET_PRODUCT)
@@ -4342,21 +4353,11 @@
name := $(name)-ota-$(FILE_NAME_TAG)
INTERNAL_OTA_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
-
INTERNAL_OTA_METADATA := $(PRODUCT_OUT)/ota_metadata
$(INTERNAL_OTA_PACKAGE_TARGET): KEY_CERT_PAIR := $(DEFAULT_KEY_CERT_PAIR)
-
-ifeq ($(AB_OTA_UPDATER),true)
-$(INTERNAL_OTA_PACKAGE_TARGET): $(BRILLO_UPDATE_PAYLOAD)
-else
-$(INTERNAL_OTA_PACKAGE_TARGET): $(BROTLI)
-endif
-
$(INTERNAL_OTA_PACKAGE_TARGET): .KATI_IMPLICIT_OUTPUTS := $(INTERNAL_OTA_METADATA)
-
-$(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) \
- build/make/tools/releasetools/ota_from_target_files
+$(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(OTA_FROM_TARGET_FILES)
@echo "Package OTA: $@"
$(call build-ota-package-target,$@,-k $(KEY_CERT_PAIR) --output_metadata_path $(INTERNAL_OTA_METADATA))
@@ -4371,17 +4372,10 @@
name := $(name)-ota-retrofit-$(FILE_NAME_TAG)
INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
-
$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): KEY_CERT_PAIR := $(DEFAULT_KEY_CERT_PAIR)
-
-ifeq ($(AB_OTA_UPDATER),true)
-$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): $(BRILLO_UPDATE_PAYLOAD)
-else
-$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): $(BROTLI)
-endif
-
-$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) \
- build/make/tools/releasetools/ota_from_target_files
+$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): \
+ $(BUILT_TARGET_FILES_PACKAGE) \
+ $(OTA_FROM_TARGET_FILES)
@echo "Package OTA (retrofit dynamic partitions): $@"
$(call build-ota-package-target,$@,-k $(KEY_CERT_PAIR) --retrofit_dynamic_partitions)
@@ -4404,7 +4398,7 @@
$(INSTALLED_USERDATAIMAGE_TARGET) \
$(INSTALLED_VENDORIMAGE_TARGET) \
$(INSTALLED_PRODUCTIMAGE_TARGET) \
- $(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET)
+ $(INSTALLED_SYSTEM_EXTIMAGE_TARGET)
endif
$(APPCOMPAT_ZIP): PRIVATE_LIST_FILE := $(call intermediates-dir-for,PACKAGING,appcompat)/filelist
$(APPCOMPAT_ZIP): $(SOONG_ZIP)
@@ -4433,7 +4427,7 @@
$(INSTALLED_USERDATAIMAGE_TARGET) \
$(INSTALLED_VENDORIMAGE_TARGET) \
$(INSTALLED_PRODUCTIMAGE_TARGET) \
- $(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET) \
+ $(INSTALLED_SYSTEM_EXTIMAGE_TARGET) \
$(INSTALLED_ODMIMAGE_TARGET) \
$(updater_dep)
endif
@@ -4459,7 +4453,7 @@
$(INSTALLED_USERDATAIMAGE_TARGET) \
$(INSTALLED_VENDORIMAGE_TARGET) \
$(INSTALLED_PRODUCTIMAGE_TARGET) \
- $(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET) \
+ $(INSTALLED_SYSTEM_EXTIMAGE_TARGET) \
$(INSTALLED_ODMIMAGE_TARGET)
endif
$(COVERAGE_ZIP): PRIVATE_LIST_FILE := $(call intermediates-dir-for,PACKAGING,coverage)/filelist
@@ -4516,25 +4510,28 @@
#------------------------------------------------------------------
# A zip of Proguard obfuscation dictionary files.
-# Only for apps_only build.
#
-ifdef TARGET_BUILD_APPS
PROGUARD_DICT_ZIP := $(PRODUCT_OUT)/$(TARGET_PRODUCT)-proguard-dict-$(FILE_NAME_TAG).zip
-# the dependency will be set up later in build/make/core/main.mk.
-$(PROGUARD_DICT_ZIP) :
+# For apps_only build we'll establish the dependency later in build/make/core/main.mk.
+ifndef TARGET_BUILD_APPS
+$(PROGUARD_DICT_ZIP): \
+ $(INSTALLED_SYSTEMIMAGE_TARGET) \
+ $(INSTALLED_RAMDISK_TARGET) \
+ $(INSTALLED_BOOTIMAGE_TARGET) \
+ $(INSTALLED_USERDATAIMAGE_TARGET) \
+ $(INSTALLED_VENDORIMAGE_TARGET) \
+ $(INSTALLED_PRODUCTIMAGE_TARGET) \
+ $(INSTALLED_SYSTEM_EXTIMAGE_TARGET) \
+ $(INSTALLED_ODMIMAGE_TARGET) \
+ $(updater_dep)
+endif
+$(PROGUARD_DICT_ZIP): PRIVATE_LIST_FILE := $(call intermediates-dir-for,PACKAGING,proguard)/filelist
+$(PROGUARD_DICT_ZIP): $(SOONG_ZIP)
@echo "Packaging Proguard obfuscation dictionary files."
- $(hide) dict_files=`find $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS -name proguard_dictionary`; \
- if [ -n "$$dict_files" ]; then \
- unobfuscated_jars=$${dict_files//proguard_dictionary/classes.jar}; \
- zip -qX $@ $$dict_files $$unobfuscated_jars; \
- else \
- touch $(dir $@)/zipdummy; \
- (cd $(dir $@) && zip -q $(notdir $@) zipdummy); \
- zip -qd $@ zipdummy; \
- rm $(dir $@)/zipdummy; \
- fi
-
-endif # TARGET_BUILD_APPS
+ mkdir -p $(dir $@) $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS $(dir $(PRIVATE_LIST_FILE))
+ find $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS -name proguard_dictionary | \
+ sed -e 's/\(.*\)\/proguard_dictionary/\0\n\1\/classes.jar/' > $(PRIVATE_LIST_FILE)
+ $(SOONG_ZIP) --ignore_missing_files -d -o $@ -C $(OUT_DIR)/.. -l $(PRIVATE_LIST_FILE)
ifeq (true,$(PRODUCT_USE_DYNAMIC_PARTITIONS))
@@ -4561,7 +4558,6 @@
# For real devices and for dist builds, build super image from target files to an intermediate directory.
INTERNAL_SUPERIMAGE_DIST_TARGET := $(call intermediates-dir-for,PACKAGING,super.img)/super.img
-INTERNAL_SUPERIMAGE_MISC_INFO := $(call intermediates-dir-for,PACKAGING,superimage_debug)/misc_info.txt
$(INTERNAL_SUPERIMAGE_DIST_TARGET): extracted_input_target_files := $(patsubst %.zip,%,$(BUILT_TARGET_FILES_PACKAGE))
$(INTERNAL_SUPERIMAGE_DIST_TARGET): $(LPMAKE) $(BUILT_TARGET_FILES_PACKAGE) $(BUILD_SUPER_IMAGE)
$(call pretty,"Target super fs image from target files: $@")
@@ -4613,13 +4609,15 @@
$(INSTALLED_SUPERIMAGE_TARGET): $(INSTALLED_SUPERIMAGE_DEPENDENCIES)
$(call pretty,"Target super fs image for debug: $@")
$(call build-superimage-target,$(INSTALLED_SUPERIMAGE_TARGET),\
- $(INTERNAL_SUPERIMAGE_MISC_INFO))
+ $(call intermediates-dir-for,PACKAGING,superimage_debug)/misc_info.txt)
droidcore: $(INSTALLED_SUPERIMAGE_TARGET)
# For devices that uses super image directly, the superimage target points to the file in $(PRODUCT_OUT).
.PHONY: superimage
superimage: $(INSTALLED_SUPERIMAGE_TARGET)
+
+$(call dist-for-goals,dist_files,$(INSTALLED_MISC_INFO_TARGET):super_misc_info.txt)
endif # BOARD_BUILD_SUPER_IMAGE_BY_DEFAULT
# Build $(PRODUCT_OUT)/super.img without dependencies.
@@ -4667,37 +4665,16 @@
INTERNAL_UPDATE_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
-$(INTERNAL_UPDATE_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(ZIP2ZIP)
-
-ifeq (true,$(BOARD_SUPER_IMAGE_IN_UPDATE_PACKAGE))
-$(INTERNAL_UPDATE_PACKAGE_TARGET): $(INTERNAL_SUPERIMAGE_DIST_TARGET)
- @echo "Package: $@"
- # Filter out super_empty and images in BOARD_SUPER_PARTITION_PARTITION_LIST.
- # Filter out system_other for launch DAP devices because it is in super image.
- # Include OTA/super_*.img for retrofit devices and super.img for non-retrofit
- # devices.
- $(hide) $(ZIP2ZIP) -i $(BUILT_TARGET_FILES_PACKAGE) -o $@ \
- -x IMAGES/super_empty.img \
- $(foreach partition,$(BOARD_SUPER_PARTITION_PARTITION_LIST), \
- -x IMAGES/$(partition).img) \
- $(if $(filter system, $(BOARD_SUPER_PARTITION_PARTITION_LIST)), \
- $(if $(filter true, $(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS)),, \
- -x IMAGES/system_other.img)) \
- $(if $(filter true,$(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS)), \
- $(foreach device,$(BOARD_SUPER_PARTITION_BLOCK_DEVICES), \
- OTA/super_$(device).img:super_$(device).img)) \
- OTA/android-info.txt:android-info.txt "IMAGES/*.img:."
- $(if $(INTERNAL_SUPERIMAGE_MISC_INFO), zip -q -j -u $@ $(INTERNAL_SUPERIMAGE_MISC_INFO))
- $(if $(INTERNAL_SUPERIMAGE_DIST_TARGET), zip -q -j -u $@ $(INTERNAL_SUPERIMAGE_DIST_TARGET))
-else
-$(INTERNAL_UPDATE_PACKAGE_TARGET):
- @echo "Package: $@"
- $(hide) $(ZIP2ZIP) -i $(BUILT_TARGET_FILES_PACKAGE) -o $@ \
- OTA/android-info.txt:android-info.txt "IMAGES/*.img:."
-endif # BOARD_SUPER_IMAGE_IN_UPDATE_PACKAGE
+$(INTERNAL_UPDATE_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(IMG_FROM_TARGET_FILES)
+ $(call pretty,"Package: $@")
+ PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$(dir $(ZIP2ZIP)):$$PATH \
+ $(IMG_FROM_TARGET_FILES) \
+ --additional IMAGES/VerifiedBootParams.textproto:VerifiedBootParams.textproto \
+ $(BUILT_TARGET_FILES_PACKAGE) $@
.PHONY: updatepackage
updatepackage: $(INTERNAL_UPDATE_PACKAGE_TARGET)
+$(call dist-for-goals,updatepackage,$(INTERNAL_UPDATE_PACKAGE_TARGET))
# -----------------------------------------------------------------
@@ -4707,7 +4684,7 @@
ifeq ($(BUILD_QEMU_IMAGES),true)
MK_QEMU_IMAGE_SH := device/generic/goldfish/tools/mk_qemu_image.sh
-MK_COMBINE_QEMU_IMAGE_SH := device/generic/goldfish/tools/mk_combined_img.py
+MK_COMBINE_QEMU_IMAGE := $(HOST_OUT_EXECUTABLES)/mk_combined_img
SGDISK_HOST := $(HOST_OUT_EXECUTABLES)/sgdisk
ifdef INSTALLED_SYSTEMIMAGE_TARGET
@@ -4716,11 +4693,11 @@
$(INSTALLED_SYSTEM_QEMU_CONFIG): $(INSTALLED_SUPERIMAGE_TARGET) $(INSTALLED_VBMETAIMAGE_TARGET)
@echo "$(PRODUCT_OUT)/vbmeta.img vbmeta 1" > $@
@echo "$(INSTALLED_SUPERIMAGE_TARGET) super 2" >> $@
-$(INSTALLED_QEMU_SYSTEMIMAGE): $(INSTALLED_VBMETAIMAGE_TARGET) $(MK_COMBINE_QEMU_IMAGE_SH) $(SGDISK_HOST) $(SIMG2IMG) \
+$(INSTALLED_QEMU_SYSTEMIMAGE): $(INSTALLED_VBMETAIMAGE_TARGET) $(MK_COMBINE_QEMU_IMAGE) $(SGDISK_HOST) $(SIMG2IMG) \
$(INSTALLED_SUPERIMAGE_TARGET) $(INSTALLED_SYSTEM_QEMU_CONFIG)
@echo Create system-qemu.img now
(export SGDISK=$(SGDISK_HOST) SIMG2IMG=$(SIMG2IMG); \
- $(MK_COMBINE_QEMU_IMAGE_SH) -i $(INSTALLED_SYSTEM_QEMU_CONFIG) -o $@)
+ $(MK_COMBINE_QEMU_IMAGE) -i $(INSTALLED_SYSTEM_QEMU_CONFIG) -o $@)
systemimage: $(INSTALLED_QEMU_SYSTEMIMAGE)
droidcore: $(INSTALLED_QEMU_SYSTEMIMAGE)
@@ -4743,14 +4720,14 @@
productimage: $(INSTALLED_QEMU_PRODUCTIMAGE)
droidcore: $(INSTALLED_QEMU_PRODUCTIMAGE)
endif
-ifdef INSTALLED_PRODUCT_SERVICESIMAGE_TARGET
-INSTALLED_QEMU_PRODUCT_SERVICESIMAGE := $(PRODUCT_OUT)/product_services-qemu.img
-$(INSTALLED_QEMU_PRODUCT_SERVICESIMAGE): $(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET) $(MK_QEMU_IMAGE_SH) $(SGDISK_HOST) $(SIMG2IMG)
- @echo Create product_services-qemu.img
- (export SGDISK=$(SGDISK_HOST) SIMG2IMG=$(SIMG2IMG); $(MK_QEMU_IMAGE_SH) $(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET))
+ifdef INSTALLED_SYSTEM_EXTIMAGE_TARGET
+INSTALLED_QEMU_SYSTEM_EXTIMAGE := $(PRODUCT_OUT)/system_ext-qemu.img
+$(INSTALLED_QEMU_SYSTEM_EXTIMAGE): $(INSTALLED_SYSTEM_EXTIMAGE_TARGET) $(MK_QEMU_IMAGE_SH) $(SGDISK_HOST) $(SIMG2IMG)
+ @echo Create system_ext-qemu.img
+ (export SGDISK=$(SGDISK_HOST) SIMG2IMG=$(SIMG2IMG); $(MK_QEMU_IMAGE_SH) $(INSTALLED_SYSTEM_EXTIMAGE_TARGET))
-productservicesimage: $(INSTALLED_QEMU_PRODUCT_SERVICESIMAGE)
-droidcore: $(INSTALLED_QEMU_PRODUCT_SERVICESIMAGE)
+systemextimage: $(INSTALLED_QEMU_SYSTEM_EXTIMAGE)
+droidcore: $(INSTALLED_QEMU_SYSTEM_EXTIMAGE)
endif
ifdef INSTALLED_ODMIMAGE_TARGET
INSTALLED_QEMU_ODMIMAGE := $(PRODUCT_OUT)/odm-qemu.img
@@ -4763,7 +4740,6 @@
endif
QEMU_VERIFIED_BOOT_PARAMS := $(PRODUCT_OUT)/VerifiedBootParams.textproto
-MK_VBMETA_BOOT_KERNEL_CMDLINE_SH := device/generic/goldfish/tools/mk_vbmeta_boot_params.sh
$(QEMU_VERIFIED_BOOT_PARAMS): $(INSTALLED_VBMETAIMAGE_TARGET) $(INSTALLED_SYSTEMIMAGE_TARGET) \
$(MK_VBMETA_BOOT_KERNEL_CMDLINE_SH) $(AVBTOOL)
@echo Creating $@
diff --git a/core/app_prebuilt_internal.mk b/core/app_prebuilt_internal.mk
index 69f411c..399d173 100644
--- a/core/app_prebuilt_internal.mk
+++ b/core/app_prebuilt_internal.mk
@@ -41,7 +41,7 @@
include $(BUILD_SYSTEM)/base_rules.mk
built_module := $(LOCAL_BUILT_MODULE)
-# Run veridex on product, product_services and vendor modules.
+# Run veridex on product, system_ext and vendor modules.
# We skip it for unbundled app builds where we cannot build veridex.
module_run_appcompat :=
ifeq (true,$(non_system_module))
diff --git a/core/aux_config.mk b/core/aux_config.mk
index a508a2d..10d2536 100644
--- a/core/aux_config.mk
+++ b/core/aux_config.mk
@@ -32,7 +32,7 @@
# setup AUX globals
AUX_SHLIB_SUFFIX := .so
-AUX_GLOBAL_ARFLAGS := cqsD
+AUX_GLOBAL_ARFLAGS := crsPD
AUX_STATIC_LIB_SUFFIX := .a
# Load ever-lasting "indexed" version of AUX variant environment; it is treated as READ-ONLY from this
@@ -149,6 +149,8 @@
variant_sfx :=_aux_variant_config.mk
os_sfx :=_aux_os_config.mk
+ifdef AUX_OS_VARIANT_LIST
+
config_roots := $(wildcard device vendor)
all_configs :=
ifdef config_roots
@@ -180,4 +182,6 @@
)
endif
+endif # AUX_OS_VARIANT_LIST
+
INSTALLED_AUX_TARGETS :=
diff --git a/core/base_rules.mk b/core/base_rules.mk
index b0e0577..3c973bb 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -81,11 +81,17 @@
LOCAL_ODM_MODULE := true
else ifneq ($(filter $(TARGET_OUT_PRODUCT)/%,$(_path)),)
LOCAL_PRODUCT_MODULE := true
-else ifneq ($(filter $(TARGET_OUT_PRODUCT_SERVICES)/%,$(_path)),)
-LOCAL_PRODUCT_SERVICES_MODULE := true
+else ifneq ($(filter $(TARGET_OUT_SYSTEM_EXT)/%,$(_path)),)
+LOCAL_SYSTEM_EXT_MODULE := true
endif
_path :=
+# TODO(b/135957588) Remove following workaround
+# LOCAL_PRODUCT_SERVICES_MODULE to LOCAL_PRODUCT_MODULE for all Android.mk
+ifndef LOCAL_PRODUCT_MODULE
+LOCAL_PRODUCT_MODULE := $(LOCAL_PRODUCT_SERVICES_MODULE)
+endif
+
ifndef LOCAL_PROPRIETARY_MODULE
LOCAL_PROPRIETARY_MODULE := $(LOCAL_VENDOR_MODULE)
endif
@@ -98,7 +104,7 @@
non_system_module := $(filter true, \
$(LOCAL_PRODUCT_MODULE) \
- $(LOCAL_PRODUCT_SERVICES_MODULE) \
+ $(LOCAL_SYSTEM_EXT_MODULE) \
$(LOCAL_VENDOR_MODULE) \
$(LOCAL_PROPRIETARY_MODULE))
@@ -227,8 +233,8 @@
partition_tag := _ODM
else ifeq (true,$(strip $(LOCAL_PRODUCT_MODULE)))
partition_tag := _PRODUCT
-else ifeq (true,$(strip $(LOCAL_PRODUCT_SERVICES_MODULE)))
- partition_tag := _PRODUCT_SERVICES
+else ifeq (true,$(strip $(LOCAL_SYSTEM_EXT_MODULE)))
+ partition_tag := _SYSTEM_EXT
else ifeq (NATIVE_TESTS,$(LOCAL_MODULE_CLASS))
partition_tag := _DATA
else
@@ -518,11 +524,11 @@
# Only set up copy rules once, even if another arch variant shares it
my_vintf_new_pairs := $(filter-out $(ALL_VINTF_MANIFEST_FRAGMENTS_LIST),$(my_vintf_pairs))
-my_vintf_new_installed := $(call copy-many-vintf-manifest-files-checked,$(my_vintf_pairs))
+my_vintf_new_installed := $(call copy-many-vintf-manifest-files-checked,$(my_vintf_new_pairs))
ALL_VINTF_MANIFEST_FRAGMENTS_LIST += $(my_vintf_new_pairs)
-$(my_all_targets) : $(my_vintf_installed)
+$(my_all_targets) : $(my_vintf_new_installed)
endif # LOCAL_VINTF_FRAGMENTS
endif # !LOCAL_IS_HOST_MODULE
endif # !LOCAL_UNINSTALLABLE_MODULE
@@ -755,7 +761,7 @@
## Register with ALL_MODULES
###########################################################
-ifeq ($(filter $(my_register_name),$(ALL_MODULES)),)
+ifndef ALL_MODULES.$(my_register_name).PATH
# These keys are no longer used, they've been replaced by keys that specify
# target/host/host_cross (REQUIRED_FROM_TARGET / REQUIRED_FROM_HOST) and similar.
#
@@ -885,18 +891,23 @@
##########################################################
# Track module-level dependencies.
# Use $(LOCAL_MODULE) instead of $(my_register_name) to ignore module's bitness.
+ifneq (,$(filter deps-license,$(MAKECMDGOALS)))
ALL_DEPS.MODULES := $(ALL_DEPS.MODULES) $(LOCAL_MODULE)
ALL_DEPS.$(LOCAL_MODULE).ALL_DEPS := $(sort \
$(ALL_DEPS.$(LOCAL_MODULE).ALL_DEPS) \
$(LOCAL_STATIC_LIBRARIES) \
$(LOCAL_WHOLE_STATIC_LIBRARIES) \
$(LOCAL_SHARED_LIBRARIES) \
+ $(LOCAL_DYLIB_LIBRARIES) \
+ $(LOCAL_RLIB_LIBRARIES) \
+ $(LOCAL_PROC_MACRO_LIBRARIES) \
$(LOCAL_HEADER_LIBRARIES) \
$(LOCAL_STATIC_JAVA_LIBRARIES) \
$(LOCAL_JAVA_LIBRARIES) \
$(LOCAL_JNI_SHARED_LIBRARIES))
ALL_DEPS.$(LOCAL_MODULE).LICENSE := $(sort $(ALL_DEPS.$(LOCAL_MODULE).LICENSE) $(license_files))
+endif
###########################################################
## Take care of my_module_tags
@@ -906,14 +917,14 @@
ALL_MODULE_TAGS := $(sort $(ALL_MODULE_TAGS) $(my_module_tags))
# Add this module name to the tag list of each specified tag.
-$(foreach tag,$(my_module_tags),\
+$(foreach tag,$(filter-out optional,$(my_module_tags)),\
$(eval ALL_MODULE_NAME_TAGS.$(tag) := $$(ALL_MODULE_NAME_TAGS.$(tag)) $(my_register_name)))
###########################################################
## umbrella targets used to verify builds
###########################################################
j_or_n :=
-ifneq (,$(filter EXECUTABLES SHARED_LIBRARIES STATIC_LIBRARIES HEADER_LIBRARIES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)))
+ifneq (,$(filter EXECUTABLES SHARED_LIBRARIES STATIC_LIBRARIES HEADER_LIBRARIES NATIVE_TESTS RLIB_LIBRARIES DYLIB_LIBRARIES PROC_MACRO_LIBRARIES,$(LOCAL_MODULE_CLASS)))
j_or_n := native
else
ifneq (,$(filter JAVA_LIBRARIES APPS,$(LOCAL_MODULE_CLASS)))
diff --git a/core/binary.mk b/core/binary.mk
index f63e4cd..d9763f9 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -54,6 +54,7 @@
my_cc := $(LOCAL_CC)
my_cc_wrapper := $(CC_WRAPPER)
my_cxx := $(LOCAL_CXX)
+my_cxx_link := $(LOCAL_CXX)
my_cxx_ldlibs :=
my_cxx_wrapper := $(CXX_WRAPPER)
my_c_includes := $(LOCAL_C_INCLUDES)
@@ -112,11 +113,15 @@
my_ndk_sysroot :=
my_ndk_sysroot_include :=
my_ndk_sysroot_lib :=
+my_api_level := 10000
+
ifneq ($(LOCAL_SDK_VERSION),)
ifdef LOCAL_IS_HOST_MODULE
$(error $(LOCAL_PATH): LOCAL_SDK_VERSION cannot be used in host module)
endif
+ my_cflags += -D__ANDROID_NDK__
+
# Make sure we've built the NDK.
my_additional_dependencies += $(SOONG_OUT_DIR)/ndk_base.timestamp
@@ -147,20 +152,14 @@
my_ndk_api := $(call math_max,$(my_ndk_api),$(my_min_sdk_version))
endif
- my_ndk_api_def := $(my_ndk_api)
my_ndk_hist_api := $(my_ndk_api)
ifeq ($(my_ndk_api),current)
- my_ndk_api_def := __ANDROID_API_FUTURE__
# The last API level supported by the old prebuilt NDKs.
my_ndk_hist_api := 24
+ else
+ my_api_level := $(my_ndk_api)
endif
-
- # Traditionally this has come from android/api-level.h, but with the libc
- # headers unified it must be set by the build system since we don't have
- # per-API level copies of that header now.
- my_cflags += -D__ANDROID_API__=$(my_ndk_api_def)
-
my_ndk_source_root := \
$(HISTORICAL_NDK_VERSIONS_ROOT)/$(LOCAL_NDK_VERSION)/sources
my_ndk_sysroot := \
@@ -267,16 +266,30 @@
endif
endif
+ifeq ($(NATIVE_COVERAGE),true)
+ ifndef LOCAL_IS_HOST_MODULE
+ my_ldflags += -Wl,--wrap,getenv
+
+ ifneq ($(LOCAL_MODULE_CLASS),STATIC_LIBRARIES)
+ ifeq ($(LOCAL_SDK_VERSION),)
+ my_whole_static_libraries += libprofile-extras
+ else
+ my_whole_static_libraries += libprofile-extras_ndk
+ endif
+ endif
+ endif
+endif
+
ifneq ($(LOCAL_USE_VNDK),)
# Required VNDK version for vendor modules is BOARD_VNDK_VERSION.
- my_vndk_version := $(BOARD_VNDK_VERSION)
- ifeq ($(my_vndk_version),current)
+ my_api_level := $(BOARD_VNDK_VERSION)
+ ifeq ($(my_api_level),current)
# Build with current PLATFORM_VNDK_VERSION.
# If PLATFORM_VNDK_VERSION has a CODENAME, it will return
# __ANDROID_API_FUTURE__.
- my_vndk_version := $(call codename-or-sdk-to-sdk,$(PLATFORM_VNDK_VERSION))
+ my_api_level := $(call codename-or-sdk-to-sdk,$(PLATFORM_VNDK_VERSION))
endif
- my_cflags += -D__ANDROID_API__=$(my_vndk_version) -D__ANDROID_VNDK__
+ my_cflags += -D__ANDROID_VNDK__
endif
ifndef LOCAL_IS_HOST_MODULE
@@ -560,7 +573,7 @@
## Compile RenderScript with reflected C++
####################################################
-renderscript_sources := $(filter %.rs %.fs,$(my_src_files))
+renderscript_sources := $(filter %.rscript %.fs,$(my_src_files))
ifneq (,$(renderscript_sources))
my_soong_problems += rs
@@ -604,7 +617,7 @@
endif
bc_dep_files := $(addprefix $(renderscript_intermediate)/, \
- $(patsubst %.fs,%.d, $(patsubst %.rs,%.d, $(notdir $(renderscript_sources)))))
+ $(patsubst %.fs,%.d, $(patsubst %.rscript,%.d, $(notdir $(renderscript_sources)))))
$(RenderScript_file_stamp): PRIVATE_RS_INCLUDES := $(renderscript_includes)
$(RenderScript_file_stamp): PRIVATE_RS_CC := $(LOCAL_RENDERSCRIPT_CC)
@@ -622,7 +635,7 @@
LOCAL_INTERMEDIATE_TARGETS += $(RenderScript_file_stamp)
rs_generated_cpps := $(addprefix \
- $(renderscript_intermediate)/ScriptC_,$(patsubst %.fs,%.cpp, $(patsubst %.rs,%.cpp, \
+ $(renderscript_intermediate)/ScriptC_,$(patsubst %.fs,%.cpp, $(patsubst %.rscript,%.cpp, \
$(notdir $(renderscript_sources)))))
$(call track-src-file-gen,$(renderscript_sources),$(rs_generated_cpps))
@@ -1164,31 +1177,6 @@
####################################################
-## Import includes
-####################################################
-import_includes := $(intermediates)/import_includes
-import_includes_deps := $(strip \
- $(if $(LOCAL_USE_VNDK),\
- $(call intermediates-dir-for,HEADER_LIBRARIES,device_kernel_headers,$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes) \
- $(foreach l, $(installed_shared_library_module_names), \
- $(call intermediates-dir-for,SHARED_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes) \
- $(foreach l, $(my_static_libraries) $(my_whole_static_libraries), \
- $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes) \
- $(foreach l, $(my_header_libraries), \
- $(call intermediates-dir-for,HEADER_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
-$(import_includes): PRIVATE_IMPORT_EXPORT_INCLUDES := $(import_includes_deps)
-$(import_includes) : $(import_includes_deps)
- @echo Import includes file: $@
- $(hide) mkdir -p $(dir $@) && rm -f $@
-ifdef import_includes_deps
- $(hide) for f in $(PRIVATE_IMPORT_EXPORT_INCLUDES); do \
- cat $$f >> $@; \
- done
-else
- $(hide) touch $@
-endif
-
-####################################################
## Verify that NDK-built libraries only link against
## other NDK-built libraries
####################################################
@@ -1299,7 +1287,6 @@
# that custom build rules which generate .o files don't consume other generated
# sources as input (or if they do they take care of that dependency themselves).
$(normal_objects) : | $(my_generated_sources)
-$(all_objects) : $(import_includes)
ALL_C_CPP_ETC_OBJECTS += $(all_objects)
@@ -1393,15 +1380,9 @@
# libraries have already been linked into the module at that point.
# We do, however, care about the NOTICE files for any static
# libraries that we use. (see notice_files.mk)
-#
-# Don't do this in mm, since many of the targets won't exist.
-ifeq ($(ONE_SHOT_MAKEFILE),)
installed_static_library_notice_file_targets := \
$(foreach lib,$(my_static_libraries) $(my_whole_static_libraries), \
NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-STATIC_LIBRARIES-$(lib))
-else
-installed_static_library_notice_file_targets :=
-endif
$(notice_target): | $(installed_static_library_notice_file_targets)
$(LOCAL_INSTALLED_MODULE): | $(notice_target)
@@ -1604,13 +1585,25 @@
ifeq ($(my_use_clang_lld),true)
my_target_global_ldflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_$(my_prefix)GLOBAL_LLDFLAGS)
include $(BUILD_SYSTEM)/pack_dyn_relocs_setup.mk
- ifeq ($(my_pack_module_relocations),false)
+ ifeq ($(my_pack_module_relocations),true)
+ my_target_global_ldflags += -Wl,--pack-dyn-relocs=android+relr -Wl,--use-android-relr-tags
+ else
my_target_global_ldflags += -Wl,--pack-dyn-relocs=none
endif
else
my_target_global_ldflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_$(my_prefix)GLOBAL_LDFLAGS)
endif # my_use_clang_lld
+my_target_triple := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_$(my_prefix)TRIPLE)
+ifndef LOCAL_IS_HOST_MODULE
+ my_target_triple_flag := -target $(my_target_triple)$(my_api_level)
+else
+ my_target_triple_flag := -target $(my_target_triple)
+endif
+my_asflags += $(my_target_triple_flag)
+my_cflags += $(my_target_triple_flag)
+my_ldflags += $(my_target_triple_flag)
+
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_GLOBAL_C_INCLUDES := $(my_target_global_c_includes)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_GLOBAL_C_SYSTEM_INCLUDES := $(my_target_global_c_system_includes)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TARGET_GLOBAL_CFLAGS := $(my_target_global_cflags)
@@ -1662,6 +1655,22 @@
$(LOCAL_INTERMEDIATE_TARGETS): $(my_coverage_lib)
endif
+####################################################
+## Import includes
+####################################################
+imported_includes := $(strip \
+ $(if $(LOCAL_USE_VNDK),\
+ $(call intermediates-dir-for,HEADER_LIBRARIES,device_kernel_headers,$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))) \
+ $(foreach l, $(installed_shared_library_module_names), \
+ $(call intermediates-dir-for,SHARED_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))) \
+ $(foreach l, $(my_static_libraries) $(my_whole_static_libraries), \
+ $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))) \
+ $(foreach l, $(my_header_libraries), \
+ $(call intermediates-dir-for,HEADER_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))))
+
+$(foreach dep,$(imported_includes),\
+ $(eval EXPORTS.$$(dep).USERS := $$(EXPORTS.$$(dep).USERS) $$(all_objects)))
+
###########################################################
## Define PRIVATE_ variables used by multiple module types
###########################################################
@@ -1696,13 +1705,20 @@
my_cxx := $(my_cxx_wrapper) $(CLANG_CXX)
endif
+ifeq ($(strip $(my_cxx_link)),)
+ my_cxx_link := $(CLANG_CXX)
+endif
+
ifneq ($(LOCAL_NO_STATIC_ANALYZER),true)
my_cxx := CCC_CXX=$(CLANG_CXX) CLANG_CXX=$(CLANG_CXX) \
$(SYNTAX_TOOLS_PREFIX)/c++-analyzer
+ my_cxx_link := CCC_CXX=$(CLANG_CXX) CLANG_CXX=$(CLANG_CXX) \
+ $(SYNTAX_TOOLS_PREFIX)/c++-analyzer
endif
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_LINKER := $(my_linker)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_CXX := $(my_cxx)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_CXX_LINK := $(my_cxx_link)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_YACCFLAGS := $(LOCAL_YACCFLAGS)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_ASFLAGS := $(my_asflags)
@@ -1714,7 +1730,7 @@
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_RTTI_FLAG := $(LOCAL_RTTI_FLAG)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_DEBUG_CFLAGS := $(debug_cflags)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_C_INCLUDES := $(my_c_includes)
-$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_IMPORT_INCLUDES := $(import_includes)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_IMPORTED_INCLUDES := $(imported_includes)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_LDFLAGS := $(my_ldflags)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_LDLIBS := $(my_ldlibs)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TIDY_CHECKS := $(my_tidy_checks)
@@ -1741,51 +1757,30 @@
###########################################################
# Export includes
###########################################################
-export_includes := $(intermediates)/export_includes
-export_cflags := $(foreach d,$(my_export_c_include_dirs),-I $(d))
-$(export_includes): PRIVATE_EXPORT_CFLAGS := $(export_cflags)
+
# Headers exported by whole static libraries are also exported by this library.
export_include_deps := $(strip \
$(foreach l,$(my_whole_static_libraries), \
- $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
+ $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))))
# Re-export requested headers from shared libraries.
export_include_deps += $(strip \
$(foreach l,$(LOCAL_EXPORT_SHARED_LIBRARY_HEADERS), \
- $(call intermediates-dir-for,SHARED_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
+ $(call intermediates-dir-for,SHARED_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))))
# Re-export requested headers from static libraries.
export_include_deps += $(strip \
$(foreach l,$(LOCAL_EXPORT_STATIC_LIBRARY_HEADERS), \
- $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
+ $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))))
# Re-export requested headers from header libraries.
export_include_deps += $(strip \
$(foreach l,$(LOCAL_EXPORT_HEADER_LIBRARY_HEADERS), \
- $(call intermediates-dir-for,HEADER_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
-$(export_includes): PRIVATE_REEXPORTED_INCLUDES := $(export_include_deps)
-# By adding $(my_generated_sources) it makes sure the headers get generated
-# before any dependent source files get compiled.
-$(export_includes) : $(my_export_c_include_deps) $(my_generated_sources) $(export_include_deps) $(LOCAL_EXPORT_C_INCLUDE_DEPS)
- @echo Export includes file: $< -- $@
- $(hide) mkdir -p $(dir $@) && rm -f $@.tmp && touch $@.tmp
-ifdef export_cflags
- $(hide) echo "$(PRIVATE_EXPORT_CFLAGS)" >>$@.tmp
-endif
-ifdef export_include_deps
- $(hide) for f in $(PRIVATE_REEXPORTED_INCLUDES); do \
- cat $$f >> $@.tmp; \
- done
-endif
- $(hide) if cmp -s $@.tmp $@ ; then \
- rm $@.tmp ; \
- else \
- mv $@.tmp $@ ; \
- fi
-export_cflags :=
+ $(call intermediates-dir-for,HEADER_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))))
-# Kati adds restat=1 to ninja. GNU make does nothing for this.
-.KATI_RESTAT: $(export_includes)
-
-# Make sure export_includes gets generated when you are running mm/mmm
-$(LOCAL_BUILT_MODULE) : | $(export_includes)
+ifneq ($(strip $(my_export_c_include_dirs)$(export_include_deps)),)
+ EXPORTS_LIST := $(EXPORTS_LIST) $(intermediates)
+ EXPORTS.$(intermediates).FLAGS := $(foreach d,$(my_export_c_include_dirs),-I $(d))
+ EXPORTS.$(intermediates).REEXPORT := $(export_include_deps)
+ EXPORTS.$(intermediates).DEPS := $(my_export_c_include_deps) $(my_generated_sources) $(LOCAL_EXPORT_C_INCLUDE_DEPS)
+endif
ifneq (,$(filter-out $(LOCAL_PATH)/%,$(my_export_c_include_dirs)))
my_soong_problems += non_local__export_c_include_dirs
diff --git a/core/board_config.mk b/core/board_config.mk
index 9c9e8b1..a6aef87 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -67,8 +67,8 @@
BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE \
BOARD_PRODUCTIMAGE_PARTITION_SIZE \
BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE \
- BOARD_PRODUCT_SERVICESIMAGE_PARTITION_SIZE \
- BOARD_PRODUCT_SERVICESIMAGE_FILE_SYSTEM_TYPE \
+ BOARD_SYSTEM_EXTIMAGE_PARTITION_SIZE \
+ BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE \
BOARD_ODMIMAGE_PARTITION_SIZE \
BOARD_ODMIMAGE_FILE_SYSTEM_TYPE \
@@ -78,7 +78,7 @@
BOARD_VENDORIMAGE_PARTITION_RESERVED_SIZE \
BOARD_ODMIMAGE_PARTITION_RESERVED_SIZE \
BOARD_PRODUCTIMAGE_PARTITION_RESERVED_SIZE \
- BOARD_PRODUCT_SERVICESIMAGE_PARTITION_RESERVED_SIZE \
+ BOARD_SYSTEM_EXTIMAGE_PARTITION_RESERVED_SIZE \
BOARD_SUPER_PARTITION_SIZE \
BOARD_SUPER_PARTITION_GROUPS \
@@ -193,20 +193,37 @@
# Note that this assumes that the 2ND_CPU_ABI for a 64 bit target
# is always 32 bits. If this isn't the case, these variables should
# be overriden in the board configuration.
+#
+# Similarly, TARGET_NATIVE_BRIDGE_2ND_ABI for a 64 bit target is always
+# 32 bits. Note that all CPU_ABIs are preferred over all NATIVE_BRIDGE_ABIs.
+_target_native_bridge_abi_list_32_bit :=
+_target_native_bridge_abi_list_64_bit :=
+
ifeq (,$(TARGET_CPU_ABI_LIST_64_BIT))
ifeq (true|true,$(TARGET_IS_64_BIT)|$(TARGET_SUPPORTS_64_BIT_APPS))
TARGET_CPU_ABI_LIST_64_BIT := $(TARGET_CPU_ABI) $(TARGET_CPU_ABI2)
+ _target_native_bridge_abi_list_64_bit := $(TARGET_NATIVE_BRIDGE_ABI)
+ endif
+endif
+
+# "arm64-v8a-hwasan", the ABI for libraries compiled with HWASAN, is supported
+# in all builds with SANITIZE_TARGET=hwaddress.
+ifneq ($(filter hwaddress,$(SANITIZE_TARGET)),)
+ ifneq ($(filter arm64-v8a,$(TARGET_CPU_ABI_LIST_64_BIT)),)
+ TARGET_CPU_ABI_LIST_64_BIT := arm64-v8a-hwasan $(TARGET_CPU_ABI_LIST_64_BIT)
endif
endif
ifeq (,$(TARGET_CPU_ABI_LIST_32_BIT))
ifneq (true,$(TARGET_IS_64_BIT))
TARGET_CPU_ABI_LIST_32_BIT := $(TARGET_CPU_ABI) $(TARGET_CPU_ABI2)
+ _target_native_bridge_abi_list_32_bit := $(TARGET_NATIVE_BRIDGE_ABI)
else
ifeq (true,$(TARGET_SUPPORTS_32_BIT_APPS))
# For a 64 bit target, assume that the 2ND_CPU_ABI
# is a 32 bit ABI.
TARGET_CPU_ABI_LIST_32_BIT := $(TARGET_2ND_CPU_ABI) $(TARGET_2ND_CPU_ABI2)
+ _target_native_bridge_abi_list_32_bit := $(TARGET_NATIVE_BRIDGE_2ND_ABI)
endif
endif
endif
@@ -215,14 +232,21 @@
# of preference) that the target supports. If a TARGET_CPU_ABI_LIST
# is specified by the board configuration, we use that. If not, we
# build a list out of the TARGET_CPU_ABIs specified by the config.
+# Add NATIVE_BRIDGE_ABIs at the end to keep order of preference.
ifeq (,$(TARGET_CPU_ABI_LIST))
ifeq ($(TARGET_IS_64_BIT)|$(TARGET_PREFER_32_BIT_APPS),true|true)
- TARGET_CPU_ABI_LIST := $(TARGET_CPU_ABI_LIST_32_BIT) $(TARGET_CPU_ABI_LIST_64_BIT)
+ TARGET_CPU_ABI_LIST := $(TARGET_CPU_ABI_LIST_32_BIT) $(TARGET_CPU_ABI_LIST_64_BIT) \
+ $(_target_native_bridge_abi_list_32_bit) $(_target_native_bridge_abi_list_64_bit)
else
- TARGET_CPU_ABI_LIST := $(TARGET_CPU_ABI_LIST_64_BIT) $(TARGET_CPU_ABI_LIST_32_BIT)
+ TARGET_CPU_ABI_LIST := $(TARGET_CPU_ABI_LIST_64_BIT) $(TARGET_CPU_ABI_LIST_32_BIT) \
+ $(_target_native_bridge_abi_list_64_bit) $(_target_native_bridge_abi_list_32_bit)
endif
endif
+# Add NATIVE_BRIDGE_ABIs at the end of 32 and 64 bit CPU_ABIs to keep order of preference.
+TARGET_CPU_ABI_LIST_32_BIT += $(_target_native_bridge_abi_list_32_bit)
+TARGET_CPU_ABI_LIST_64_BIT += $(_target_native_bridge_abi_list_64_bit)
+
# Strip whitespace from the ABI list string.
TARGET_CPU_ABI_LIST := $(subst $(space),$(comma),$(strip $(TARGET_CPU_ABI_LIST)))
TARGET_CPU_ABI_LIST_32_BIT := $(subst $(space),$(comma),$(strip $(TARGET_CPU_ABI_LIST_32_BIT)))
@@ -334,6 +358,13 @@
endif
.KATI_READONLY := BUILDING_USERDATA_IMAGE
+# Are we building a vbmeta image
+BUILDING_VBMETA_IMAGE := true
+ifeq ($(PRODUCT_BUILD_VBMETA_IMAGE),false)
+ BUILDING_VBMETA_IMAGE :=
+endif
+.KATI_READONLY := BUILDING_VBMETA_IMAGE
+
###########################################
# Now we can substitute with the real value of TARGET_COPY_OUT_VENDOR
ifeq ($(TARGET_COPY_OUT_VENDOR),$(_vendor_path_placeholder))
@@ -413,48 +444,48 @@
.KATI_READONLY := BUILDING_PRODUCT_IMAGE
###########################################
-# Now we can substitute with the real value of TARGET_COPY_OUT_PRODUCT_SERVICES
-MERGE_PRODUCT_SERVICES_INTO_PRODUCT :=
-ifeq ($(TARGET_COPY_OUT_PRODUCT_SERVICES),$(_product_services_path_placeholder))
- TARGET_COPY_OUT_PRODUCT_SERVICES := $(TARGET_COPY_OUT_PRODUCT)
- MERGE_PRODUCT_SERVICES_INTO_PRODUCT := true
-else ifeq ($(TARGET_COPY_OUT_PRODUCT),$(TARGET_COPY_OUT_PRODUCT_SERVICES))
- MERGE_PRODUCT_SERVICES_INTO_PRODUCT := true
-else ifeq ($(filter product_services system/product_services,$(TARGET_COPY_OUT_PRODUCT_SERVICES)),)
- $(error TARGET_COPY_OUT_PRODUCT_SERVICES must be either 'product_services',\
- '$(TARGET_COPY_OUT_PRODUCT)' or 'system/product_services', seeing '$(TARGET_COPY_OUT_PRODUCT_SERVICES)'.)
-endif
-.KATI_READONLY := MERGE_PRODUCT_SERVICES_INTO_PRODUCT
-PRODUCT_COPY_FILES := $(subst $(_product_services_path_placeholder),$(TARGET_COPY_OUT_PRODUCT_SERVICES),$(PRODUCT_COPY_FILES))
+# TODO(b/135957588) TARGET_COPY_OUT_PRODUCT_SERVICES will be set to
+# TARGET_COPY_OUT_PRODUCT as a workaround.
+TARGET_COPY_OUT_PRODUCT_SERVICES := $(TARGET_COPY_OUT_PRODUCT)
-BOARD_USES_PRODUCT_SERVICESIMAGE :=
-ifdef BOARD_PREBUILT_PRODUCT_SERVICESIMAGE
- BOARD_USES_PRODUCT_SERVICESIMAGE := true
+###########################################
+# Now we can substitute with the real value of TARGET_COPY_OUT_SYSTEM_EXT
+ifeq ($(TARGET_COPY_OUT_SYSTEM_EXT),$(_system_ext_path_placeholder))
+TARGET_COPY_OUT_SYSTEM_EXT := system/system_ext
+else ifeq ($(filter system_ext system/system_ext,$(TARGET_COPY_OUT_SYSTEM_EXT)),)
+$(error TARGET_COPY_OUT_SYSTEM_EXT must be either 'system_ext' or 'system/system_ext', seeing '$(TARGET_COPY_OUT_SYSTEM_EXT)'.)
endif
-ifdef BOARD_PRODUCT_SERVICESIMAGE_FILE_SYSTEM_TYPE
- BOARD_USES_PRODUCT_SERVICESIMAGE := true
-endif
-ifeq ($(TARGET_COPY_OUT_PRODUCT_SERVICES),product_services)
- BOARD_USES_PRODUCT_SERVICESIMAGE := true
-else ifdef BOARD_USES_PRODUCT_SERVICESIMAGE
- $(error TARGET_COPY_OUT_PRODUCT_SERVICES must be set to 'product_services' to use a product_services image)
-endif
+PRODUCT_COPY_FILES := $(subst $(_system_ext_path_placeholder),$(TARGET_COPY_OUT_SYSTEM_EXT),$(PRODUCT_COPY_FILES))
-BUILDING_PRODUCT_SERVICES_IMAGE :=
-ifeq ($(PRODUCT_BUILD_PRODUCT_SERVICES_IMAGE),)
- ifdef BOARD_PRODUCT_SERVICESIMAGE_FILE_SYSTEM_TYPE
- BUILDING_PRODUCT_SERVICES_IMAGE := true
+BOARD_USES_SYSTEM_EXTIMAGE :=
+ifdef BOARD_PREBUILT_SYSTEM_EXTIMAGE
+ BOARD_USES_SYSTEM_EXTIMAGE := true
+endif
+ifdef BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE
+ BOARD_USES_SYSTEM_EXTIMAGE := true
+endif
+ifeq ($(TARGET_COPY_OUT_SYSTEM_EXT),system_ext)
+ BOARD_USES_SYSTEM_EXTIMAGE := true
+else ifdef BOARD_USES_SYSTEM_EXTIMAGE
+ $(error TARGET_COPY_OUT_SYSTEM_EXT must be set to 'system_ext' to use a system_ext image)
+endif
+.KATI_READONLY := BOARD_USES_SYSTEM_EXTIMAGE
+
+BUILDING_SYSTEM_EXT_IMAGE :=
+ifeq ($(PRODUCT_BUILD_SYSTEM_EXT_IMAGE),)
+ ifdef BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE
+ BUILDING_SYSTEM_EXT_IMAGE := true
endif
-else ifeq ($(PRODUCT_BUILD_PRODUCT_SERVICES_IMAGE),true)
- BUILDING_PRODUCT_SERVICES_IMAGE := true
- ifndef BOARD_PRODUCT_SERVICESIMAGE_FILE_SYSTEM_TYPE
- $(error PRODUCT_BUILD_PRODUCT_SERVICES_IMAGE set to true, but BOARD_PRODUCT_SERVICESIMAGE_FILE_SYSTEM_TYPE not defined)
+else ifeq ($(PRODUCT_BUILD_SYSTEM_EXT_IMAGE),true)
+ BUILDING_SYSTEM_EXT_IMAGE := true
+ ifndef BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE
+ $(error PRODUCT_BUILD_SYSTEM_EXT_IMAGE set to true, but BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE not defined)
endif
endif
-ifdef BOARD_PREBUILT_PRODUCT_SERVICESIMAGE
- BUILDING_PRODUCT_SERVICES_IMAGE :=
+ifdef BOARD_PREBUILT_SYSTEM_EXTIMAGE
+ BUILDING_SYSTEM_EXT_IMAGE :=
endif
-.KATI_READONLY := BUILDING_PRODUCT_SERVICES_IMAGE
+.KATI_READONLY := BUILDING_SYSTEM_EXT_IMAGE
###########################################
# Now we can substitute with the real value of TARGET_COPY_OUT_ODM
diff --git a/core/build_id.mk b/core/build_id.mk
index bac2f48..2329288 100644
--- a/core/build_id.mk
+++ b/core/build_id.mk
@@ -18,4 +18,4 @@
# (like "CRB01"). It must be a single word, and is
# capitalized by convention.
-BUILD_ID=PI
+BUILD_ID=QT
diff --git a/core/build_rro_package.mk b/core/build_rro_package.mk
index a6921d5..e5d7685 100644
--- a/core/build_rro_package.mk
+++ b/core/build_rro_package.mk
@@ -20,8 +20,8 @@
partition := $(TARGET_OUT_ODM)
else ifeq ($(strip $(LOCAL_VENDOR_MODULE)),true)
partition := $(TARGET_OUT_VENDOR)
-else ifeq ($(strip $(LOCAL_PRODUCT_SERVICES_MODULE)),true)
- partition := $(TARGET_OUT_PRODUCT_SERVICES)
+else ifeq ($(strip $(LOCAL_SYSTEM_EXT_MODULE)),true)
+ partition := $(TARGET_OUT_SYSTEM_EXT)
else
partition := $(TARGET_OUT_PRODUCT)
endif
diff --git a/core/cc_prebuilt_internal.mk b/core/cc_prebuilt_internal.mk
index 2bf4fdc..6313019 100644
--- a/core/cc_prebuilt_internal.mk
+++ b/core/cc_prebuilt_internal.mk
@@ -75,18 +75,9 @@
built_module := $(LOCAL_BUILT_MODULE)
ifdef prebuilt_module_is_a_library
-export_includes := $(intermediates)/export_includes
-export_cflags := $(foreach d,$(LOCAL_EXPORT_C_INCLUDE_DIRS),-I $(d))
-$(export_includes): PRIVATE_EXPORT_CFLAGS := $(export_cflags)
-$(export_includes): $(LOCAL_EXPORT_C_INCLUDE_DEPS)
- @echo Export includes file: $< -- $@
- $(hide) mkdir -p $(dir $@) && rm -f $@
-ifdef export_cflags
- $(hide) echo "$(PRIVATE_EXPORT_CFLAGS)" >$@
-else
- $(hide) touch $@
-endif
-export_cflags :=
+EXPORTS_LIST := $(EXPORTS_LIST) $(intermediates)
+EXPORTS.$(intermediates).FLAGS := $(foreach d,$(LOCAL_EXPORT_C_INCLUDE_DIRS),-I $(d))
+EXPORTS.$(intermediates).DEPS := $(LOCAL_EXPORT_C_INCLUDE_DEPS)
include $(BUILD_SYSTEM)/allowed_ndk_types.mk
@@ -135,21 +126,23 @@
endif
endif
-my_shared_libraries := \
+my_shared_libraries := $(strip \
$(filter-out $(my_system_shared_libraries),$(LOCAL_SHARED_LIBRARIES)) \
- $(my_system_shared_libraries)
+ $(my_system_shared_libraries))
+
+# Extra shared libraries introduced by LOCAL_CXX_STL (may append some libraries to
+# my_shared_libraries).
+include $(BUILD_SYSTEM)/cxx_stl_setup.mk
ifdef my_shared_libraries
-# Extra shared libraries introduced by LOCAL_CXX_STL.
-include $(BUILD_SYSTEM)/cxx_stl_setup.mk
ifdef LOCAL_USE_VNDK
my_shared_libraries := $(foreach l,$(my_shared_libraries),\
$(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
endif
$(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
$(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_shared_libraries))
-endif
endif # my_shared_libraries
+endif # LOCAL_INSTALLED_MODULE
# We need to enclose the above export_includes and my_built_shared_libraries in
# "my_strip_module not true" because otherwise the rules are defined in dynamic_binary.mk.
@@ -187,15 +180,8 @@
endif
endif
-ifneq ($(filter init%rc,$(notdir $(LOCAL_INSTALLED_MODULE)))$(filter %/etc/init,$(dir $(LOCAL_INSTALLED_MODULE))),)
- $(eval $(call copy-init-script-file-checked,$(my_prebuilt_src_file),$(built_module)))
-else ifneq ($(LOCAL_PREBUILT_STRIP_COMMENTS),)
-$(built_module) : $(my_prebuilt_src_file)
- $(transform-prebuilt-to-target-strip-comments)
-else
$(built_module) : $(my_prebuilt_src_file)
$(transform-prebuilt-to-target)
-endif
ifneq ($(filter EXECUTABLES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),)
$(hide) chmod +x $@
endif
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 5973e0e..3cadcc2 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -80,6 +80,7 @@
LOCAL_DROIDDOC_TEMPLATE_DIR:=
LOCAL_DROIDDOC_USE_STANDARD_DOCLET:=
LOCAL_DX_FLAGS:=
+LOCAL_DYLIB_LIBRARIES:=
LOCAL_EMMA_COVERAGE_FILTER:=
LOCAL_EMMA_INSTRUMENT:=
LOCAL_ENFORCE_USES_LIBRARIES:=
@@ -114,6 +115,7 @@
LOCAL_HOST_PREFIX:=
LOCAL_HOST_REQUIRED_MODULES:=
LOCAL_INIT_RC:=
+LOCAL_INJECT_BSSL_HASH:=
LOCAL_INSTALLED_MODULE:=
LOCAL_INSTALLED_MODULE_STEM:=
LOCAL_INSTRUMENTATION_FOR:=
@@ -214,12 +216,15 @@
LOCAL_PRESUBMIT_DISABLED:=
LOCAL_PRIVATE_PLATFORM_APIS:=
LOCAL_PRIVILEGED_MODULE:=
+LOCAL_PROC_MACRO_LIBRARIES:=
# '',full,custom,disabled,obfuscation,optimization
LOCAL_PRODUCT_MODULE:=
-LOCAL_PRODUCT_SERVICES_MODULE:=
+# TODO(b/135957588) Remove LOCAL_PRODUCT_SERVICES_MODULE
+LOCAL_PRODUCT_SERVICES_MODULE :=
LOCAL_PROGUARD_ENABLED:=
LOCAL_PROGUARD_FLAG_FILES:=
LOCAL_PROGUARD_FLAGS:=
+LOCAL_PROGUARD_FLAGS_DEPS:=
LOCAL_PROPRIETARY_MODULE:=
LOCAL_PROTOC_FLAGS:=
# lite(default),micro,nano,stream,full,nanopb-c,nanopb-c-enable_malloc,nanopb-c-16bit,nanopb-c-enable_malloc-16bit,nanopb-c-32bit,nanopb-c-enable_malloc-32bit
@@ -239,6 +244,7 @@
LOCAL_REQUIRED_MODULES:=
LOCAL_RES_LIBRARIES:=
LOCAL_RESOURCE_DIR:=
+LOCAL_RLIB_LIBRARIES:=
LOCAL_RMTYPEDEFS:=
LOCAL_RRO_THEME:=
LOCAL_RTTI_FLAG:=
@@ -279,6 +285,7 @@
LOCAL_STATIC_JAVA_AAR_LIBRARIES:=
LOCAL_STATIC_JAVA_LIBRARIES:=
LOCAL_STATIC_LIBRARIES:=
+LOCAL_SYSTEM_EXT_MODULE:=
LOCAL_STRIP_MODULE:=
LOCAL_SYSTEM_SHARED_LIBRARIES:=none
LOCAL_TARGET_REQUIRED_MODULES:=
diff --git a/core/combo/HOST_darwin-x86_64.mk b/core/combo/HOST_darwin-x86_64.mk
index 07f8d9f..dac3bbf 100644
--- a/core/combo/HOST_darwin-x86_64.mk
+++ b/core/combo/HOST_darwin-x86_64.mk
@@ -59,8 +59,3 @@
$(PRIVATE_LDFLAGS) \
$(PRIVATE_LDLIBS)
endef
-
-# $(1): The file to check
-define get-file-size
-stat -f "%z" $(1)
-endef
diff --git a/core/combo/HOST_linux-x86.mk b/core/combo/HOST_linux-x86.mk
index deed943..3f4ec0a 100644
--- a/core/combo/HOST_linux-x86.mk
+++ b/core/combo/HOST_linux-x86.mk
@@ -23,8 +23,3 @@
############################################################
## Macros after this line are shared by the 64-bit config.
-
-# $(1): The file to check
-define get-file-size
-stat -c "%s" "$(1)" | tr -d '\n'
-endef
diff --git a/core/combo/TARGET_linux-mips.mk b/core/combo/TARGET_linux-mips.mk
index ba76969..9f14aa2 100644
--- a/core/combo/TARGET_linux-mips.mk
+++ b/core/combo/TARGET_linux-mips.mk
@@ -33,12 +33,6 @@
TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT := mips32r2-fp
endif
-TARGET_ARCH_SPECIFIC_MAKEFILE := $(BUILD_COMBOS)/arch/$(TARGET_$(combo_2nd_arch_prefix)ARCH)/$(TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT).mk
-ifeq ($(strip $(wildcard $(TARGET_ARCH_SPECIFIC_MAKEFILE))),)
-$(error Unknown MIPS architecture variant: $(TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT))
-endif
-
-include $(TARGET_ARCH_SPECIFIC_MAKEFILE)
include $(BUILD_SYSTEM)/combo/fdo.mk
define $(combo_var_prefix)transform-shared-lib-to-toc
diff --git a/core/combo/TARGET_linux-mips64.mk b/core/combo/TARGET_linux-mips64.mk
index b498d1f..ae17e46 100644
--- a/core/combo/TARGET_linux-mips64.mk
+++ b/core/combo/TARGET_linux-mips64.mk
@@ -33,12 +33,6 @@
TARGET_ARCH_VARIANT := mips64r6
endif
-TARGET_ARCH_SPECIFIC_MAKEFILE := $(BUILD_COMBOS)/arch/$(TARGET_ARCH)/$(TARGET_ARCH_VARIANT).mk
-ifeq ($(strip $(wildcard $(TARGET_ARCH_SPECIFIC_MAKEFILE))),)
-$(error Unknown MIPS architecture variant: $(TARGET_ARCH_VARIANT))
-endif
-
-include $(TARGET_ARCH_SPECIFIC_MAKEFILE)
include $(BUILD_SYSTEM)/combo/fdo.mk
define $(combo_var_prefix)transform-shared-lib-to-toc
diff --git a/core/combo/arch/arm/armv7-a-neon.mk b/core/combo/arch/arm/armv7-a-neon.mk
index 01d2235..0c01ac3 100644
--- a/core/combo/arch/arm/armv7-a-neon.mk
+++ b/core/combo/arch/arm/armv7-a-neon.mk
@@ -1,7 +1,6 @@
# Configuration for Linux on ARM.
# Generating binaries for the ARMv7-a architecture and higher with NEON
#
-ARCH_ARM_HAVE_ARMV7A := true
ARCH_ARM_HAVE_VFP := true
ARCH_ARM_HAVE_VFP_D32 := true
ARCH_ARM_HAVE_NEON := true
diff --git a/core/combo/arch/arm/armv8-2a.mk b/core/combo/arch/arm/armv8-2a.mk
index c1d8182..7e2ca18 100644
--- a/core/combo/arch/arm/armv8-2a.mk
+++ b/core/combo/arch/arm/armv8-2a.mk
@@ -3,7 +3,6 @@
#
# Many libraries are not aware of armv8-2a, and AArch32 is (almost) a superset
# of armv7-a-neon. So just let them think we are just like v7.
-ARCH_ARM_HAVE_ARMV7A := true
ARCH_ARM_HAVE_VFP := true
ARCH_ARM_HAVE_VFP_D32 := true
ARCH_ARM_HAVE_NEON := true
diff --git a/core/combo/arch/arm/armv8-a.mk b/core/combo/arch/arm/armv8-a.mk
index 9ef5c49..19bc014 100644
--- a/core/combo/arch/arm/armv8-a.mk
+++ b/core/combo/arch/arm/armv8-a.mk
@@ -3,7 +3,6 @@
#
# Many libraries are not aware of armv8-a, and AArch32 is (almost) a superset
# of armv7-a-neon. So just let them think we are just like v7.
-ARCH_ARM_HAVE_ARMV7A := true
ARCH_ARM_HAVE_VFP := true
ARCH_ARM_HAVE_VFP_D32 := true
ARCH_ARM_HAVE_NEON := true
diff --git a/core/combo/arch/mips/mips32-fp.mk b/core/combo/arch/mips/mips32-fp.mk
deleted file mode 100644
index 4b09bc1..0000000
--- a/core/combo/arch/mips/mips32-fp.mk
+++ /dev/null
@@ -1,5 +0,0 @@
-# Configuration for Android on MIPS.
-# Generating binaries for MIPS32/hard-float/little-endian
-
-ARCH_MIPS_HAS_FPU :=true
-ARCH_HAVE_ALIGNED_DOUBLES :=true
diff --git a/core/combo/arch/mips/mips32r2-fp-xburst.mk b/core/combo/arch/mips/mips32r2-fp-xburst.mk
deleted file mode 100644
index 83fb12e..0000000
--- a/core/combo/arch/mips/mips32r2-fp-xburst.mk
+++ /dev/null
@@ -1,6 +0,0 @@
-# Configuration for Android on Ingenic xb4780/Xburst MIPS CPU.
-# Generating binaries for MIPS32R2/hard-float/little-endian without
-# support for the Madd family of instructions.
-
-ARCH_MIPS_HAS_FPU :=true
-ARCH_HAVE_ALIGNED_DOUBLES :=true
diff --git a/core/combo/arch/mips/mips32r2-fp.mk b/core/combo/arch/mips/mips32r2-fp.mk
deleted file mode 100644
index 97c14c3..0000000
--- a/core/combo/arch/mips/mips32r2-fp.mk
+++ /dev/null
@@ -1,5 +0,0 @@
-# Configuration for Android on MIPS.
-# Generating binaries for MIPS32R2/hard-float/little-endian
-
-ARCH_MIPS_HAS_FPU :=true
-ARCH_HAVE_ALIGNED_DOUBLES :=true
diff --git a/core/combo/arch/mips/mips32r2dsp-fp.mk b/core/combo/arch/mips/mips32r2dsp-fp.mk
deleted file mode 100644
index 522b6b9..0000000
--- a/core/combo/arch/mips/mips32r2dsp-fp.mk
+++ /dev/null
@@ -1,7 +0,0 @@
-# Configuration for Android on MIPS.
-# Generating binaries for MIPS32R2/hard-float/little-endian/dsp
-
-ARCH_MIPS_HAS_DSP :=true
-ARCH_MIPS_DSP_REV :=1
-ARCH_MIPS_HAS_FPU :=true
-ARCH_HAVE_ALIGNED_DOUBLES :=true
diff --git a/core/combo/arch/mips/mips32r2dspr2-fp.mk b/core/combo/arch/mips/mips32r2dspr2-fp.mk
deleted file mode 100644
index 886d378..0000000
--- a/core/combo/arch/mips/mips32r2dspr2-fp.mk
+++ /dev/null
@@ -1,7 +0,0 @@
-# Configuration for Android on MIPS.
-# Generating binaries for MIPS32R2/hard-float/little-endian/dsp
-
-ARCH_MIPS_HAS_DSP :=true
-ARCH_MIPS_DSP_REV :=2
-ARCH_MIPS_HAS_FPU :=true
-ARCH_HAVE_ALIGNED_DOUBLES :=true
diff --git a/core/combo/arch/mips/mips32r6.mk b/core/combo/arch/mips/mips32r6.mk
deleted file mode 100644
index 7bc6cac..0000000
--- a/core/combo/arch/mips/mips32r6.mk
+++ /dev/null
@@ -1,4 +0,0 @@
-# Configuration for Android on MIPS.
-# Generating binaries for MIPS32R6/hard-float/little-endian
-
-ARCH_MIPS_REV6 := true
diff --git a/core/combo/arch/mips64/mips64r2.mk b/core/combo/arch/mips64/mips64r2.mk
deleted file mode 100644
index 54aa387..0000000
--- a/core/combo/arch/mips64/mips64r2.mk
+++ /dev/null
@@ -1,6 +0,0 @@
-# Configuration for Android on mips64r2.
-
-# This target is for temporary use only, until mips64r6 is supported by Android's qemu.
-
-ARCH_MIPS_HAS_FPU :=true
-ARCH_HAVE_ALIGNED_DOUBLES :=true
diff --git a/core/combo/arch/mips64/mips64r6.mk b/core/combo/arch/mips64/mips64r6.mk
deleted file mode 100644
index 42d6c9e..0000000
--- a/core/combo/arch/mips64/mips64r6.mk
+++ /dev/null
@@ -1,3 +0,0 @@
-# Configuration for Android on mips64r6.
-
-ARCH_MIPS64_REV6 := true
diff --git a/core/combo/arch/x86/amberlake.mk b/core/combo/arch/x86/amberlake.mk
index 37100a4..a7ae6ed 100644
--- a/core/combo/arch/x86/amberlake.mk
+++ b/core/combo/arch/x86/amberlake.mk
@@ -3,11 +3,4 @@
# that have AVX2 feature flag
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86/atom.mk b/core/combo/arch/x86/atom.mk
index 43a170c..bae7946 100644
--- a/core/combo/arch/x86/atom.mk
+++ b/core/combo/arch/x86/atom.mk
@@ -4,6 +4,3 @@
#
# See build/make/core/combo/arch/x86/x86.mk for differences.
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_MOVBE := true
-ARCH_X86_HAVE_POPCNT := false # popcnt is not supported by current Atom CPUs
diff --git a/core/combo/arch/x86/broadwell.mk b/core/combo/arch/x86/broadwell.mk
index 37100a4..a7ae6ed 100644
--- a/core/combo/arch/x86/broadwell.mk
+++ b/core/combo/arch/x86/broadwell.mk
@@ -3,11 +3,4 @@
# that have AVX2 feature flag
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86/haswell.mk b/core/combo/arch/x86/haswell.mk
index 50c27b4..ffa3bac 100644
--- a/core/combo/arch/x86/haswell.mk
+++ b/core/combo/arch/x86/haswell.mk
@@ -1,11 +1,4 @@
# Configuration for Linux on x86.
# Generating binaries for Haswell processors.
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86/icelake.mk b/core/combo/arch/x86/icelake.mk
index 76fe212..a7ae6ed 100644
--- a/core/combo/arch/x86/icelake.mk
+++ b/core/combo/arch/x86/icelake.mk
@@ -3,12 +3,4 @@
# that have AVX2 feature flag
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_AVX512 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86/ivybridge.mk b/core/combo/arch/x86/ivybridge.mk
index 44035d8..a1358e6 100644
--- a/core/combo/arch/x86/ivybridge.mk
+++ b/core/combo/arch/x86/ivybridge.mk
@@ -1,11 +1,4 @@
# Configuration for Linux on x86.
# Generating binaries for Ivy Bridge processors.
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := false
diff --git a/core/combo/arch/x86/kabylake.mk b/core/combo/arch/x86/kabylake.mk
index 50518d6..9906259 100644
--- a/core/combo/arch/x86/kabylake.mk
+++ b/core/combo/arch/x86/kabylake.mk
@@ -3,11 +3,4 @@
# that support AVX2 feature flag
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86/sandybridge.mk b/core/combo/arch/x86/sandybridge.mk
index a4c1bd9..d6552ab 100644
--- a/core/combo/arch/x86/sandybridge.mk
+++ b/core/combo/arch/x86/sandybridge.mk
@@ -1,11 +1,4 @@
# Configuration for Linux on x86.
# Generating binaries for SandyBridge processors.
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := false
-ARCH_X86_HAVE_AVX := false
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := false
diff --git a/core/combo/arch/x86/silvermont.mk b/core/combo/arch/x86/silvermont.mk
index cba1079..8ac2b98 100644
--- a/core/combo/arch/x86/silvermont.mk
+++ b/core/combo/arch/x86/silvermont.mk
@@ -4,10 +4,4 @@
# See build/make/core/combo/arch/x86/x86-atom.mk for differences.
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86/skylake.mk b/core/combo/arch/x86/skylake.mk
index 03705c0..9906259 100644
--- a/core/combo/arch/x86/skylake.mk
+++ b/core/combo/arch/x86/skylake.mk
@@ -3,13 +3,4 @@
# that support AVX2 feature flag
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_AVX512 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
-
diff --git a/core/combo/arch/x86/stoneyridge.mk b/core/combo/arch/x86/stoneyridge.mk
index 30405a1..05ff77a 100644
--- a/core/combo/arch/x86/stoneyridge.mk
+++ b/core/combo/arch/x86/stoneyridge.mk
@@ -1,12 +1,4 @@
# Configuration for Linux on x86.
# Generating binaries for Stoney Ridge processors.
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86/tigerlake.mk b/core/combo/arch/x86/tigerlake.mk
index 76fe212..a7ae6ed 100644
--- a/core/combo/arch/x86/tigerlake.mk
+++ b/core/combo/arch/x86/tigerlake.mk
@@ -3,12 +3,4 @@
# that have AVX2 feature flag
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_AVX512 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86/whiskeylake.mk b/core/combo/arch/x86/whiskeylake.mk
index 37100a4..a7ae6ed 100644
--- a/core/combo/arch/x86/whiskeylake.mk
+++ b/core/combo/arch/x86/whiskeylake.mk
@@ -3,11 +3,4 @@
# that have AVX2 feature flag
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86/x86.mk b/core/combo/arch/x86/x86.mk
index db55ff8..066f66a 100644
--- a/core/combo/arch/x86/x86.mk
+++ b/core/combo/arch/x86/x86.mk
@@ -8,9 +8,3 @@
# These features are optional and shall not be included in the base platform
# Otherwise, sdk_x86-eng system images might fail to run on some
# developer machines.
-ARCH_X86_HAVE_SSSE3 := false
-ARCH_X86_HAVE_MOVBE := false
-ARCH_X86_HAVE_POPCNT := false
-ARCH_X86_HAVE_AVX := false
-ARCH_X86_HAVE_AVX2 := false
-ARCH_X86_HAVE_AVX512 := false
diff --git a/core/combo/arch/x86/x86_64.mk b/core/combo/arch/x86/x86_64.mk
index fc2a087..eff406b 100644
--- a/core/combo/arch/x86/x86_64.mk
+++ b/core/combo/arch/x86/x86_64.mk
@@ -4,9 +4,4 @@
# The generic 'x86' variant cannot be used, since it resets some flags used
# by the 'x86_64' variant.
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_MOVBE := false # Only supported on Atom.
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
diff --git a/core/combo/arch/x86_64/amberlake.mk b/core/combo/arch/x86_64/amberlake.mk
index 37100a4..a7ae6ed 100644
--- a/core/combo/arch/x86_64/amberlake.mk
+++ b/core/combo/arch/x86_64/amberlake.mk
@@ -3,11 +3,4 @@
# that have AVX2 feature flag
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86_64/broadwell.mk b/core/combo/arch/x86_64/broadwell.mk
index 37100a4..a7ae6ed 100644
--- a/core/combo/arch/x86_64/broadwell.mk
+++ b/core/combo/arch/x86_64/broadwell.mk
@@ -3,11 +3,4 @@
# that have AVX2 feature flag
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86_64/haswell.mk b/core/combo/arch/x86_64/haswell.mk
index f9c6ebd..faf12fa 100644
--- a/core/combo/arch/x86_64/haswell.mk
+++ b/core/combo/arch/x86_64/haswell.mk
@@ -1,11 +1,4 @@
# Configuration for Linux on x86_64.
# Generating binaries for Haswell processors.
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86_64/icelake.mk b/core/combo/arch/x86_64/icelake.mk
index 76fe212..a7ae6ed 100644
--- a/core/combo/arch/x86_64/icelake.mk
+++ b/core/combo/arch/x86_64/icelake.mk
@@ -3,12 +3,4 @@
# that have AVX2 feature flag
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_AVX512 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86_64/ivybridge.mk b/core/combo/arch/x86_64/ivybridge.mk
index 69011d6..464fa98 100644
--- a/core/combo/arch/x86_64/ivybridge.mk
+++ b/core/combo/arch/x86_64/ivybridge.mk
@@ -1,11 +1,4 @@
# Configuration for Linux on x86_64.
# Generating binaries for Ivy Bridge processors.
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := false
diff --git a/core/combo/arch/x86_64/kabylake.mk b/core/combo/arch/x86_64/kabylake.mk
index 37100a4..a7ae6ed 100644
--- a/core/combo/arch/x86_64/kabylake.mk
+++ b/core/combo/arch/x86_64/kabylake.mk
@@ -3,11 +3,4 @@
# that have AVX2 feature flag
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86_64/sandybridge.mk b/core/combo/arch/x86_64/sandybridge.mk
index 2092d19..a09db2a 100644
--- a/core/combo/arch/x86_64/sandybridge.mk
+++ b/core/combo/arch/x86_64/sandybridge.mk
@@ -1,11 +1,4 @@
# Configuration for Linux on x86_64.
# Generating binaries for SandyBridge processors.
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := false
-ARCH_X86_HAVE_AVX := false
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := false
diff --git a/core/combo/arch/x86_64/silvermont.mk b/core/combo/arch/x86_64/silvermont.mk
index cba1079..8ac2b98 100644
--- a/core/combo/arch/x86_64/silvermont.mk
+++ b/core/combo/arch/x86_64/silvermont.mk
@@ -4,10 +4,4 @@
# See build/make/core/combo/arch/x86/x86-atom.mk for differences.
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86_64/skylake.mk b/core/combo/arch/x86_64/skylake.mk
index 76fe212..a7ae6ed 100644
--- a/core/combo/arch/x86_64/skylake.mk
+++ b/core/combo/arch/x86_64/skylake.mk
@@ -3,12 +3,4 @@
# that have AVX2 feature flag
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_AVX512 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86_64/stoneyridge.mk b/core/combo/arch/x86_64/stoneyridge.mk
index f7d9583..5950d9a 100644
--- a/core/combo/arch/x86_64/stoneyridge.mk
+++ b/core/combo/arch/x86_64/stoneyridge.mk
@@ -1,12 +1,4 @@
# Configuration for Linux on x86_64.
# Generating binaries for Stoney Ridge processors.
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86_64/tigerlake.mk b/core/combo/arch/x86_64/tigerlake.mk
index 76fe212..a7ae6ed 100644
--- a/core/combo/arch/x86_64/tigerlake.mk
+++ b/core/combo/arch/x86_64/tigerlake.mk
@@ -3,12 +3,4 @@
# that have AVX2 feature flag
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_AVX512 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86_64/whiskeylake.mk b/core/combo/arch/x86_64/whiskeylake.mk
index 37100a4..a7ae6ed 100644
--- a/core/combo/arch/x86_64/whiskeylake.mk
+++ b/core/combo/arch/x86_64/whiskeylake.mk
@@ -3,11 +3,4 @@
# that have AVX2 feature flag
#
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := true
-ARCH_X86_HAVE_AVX2 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE := true
diff --git a/core/combo/arch/x86_64/x86_64.mk b/core/combo/arch/x86_64/x86_64.mk
index e7c8928..17413c7 100755
--- a/core/combo/arch/x86_64/x86_64.mk
+++ b/core/combo/arch/x86_64/x86_64.mk
@@ -5,12 +5,4 @@
# that are run in the emulator under KVM emulation (i.e. running directly on
# the host development machine's CPU).
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_MOVBE := false # Only supported on Atom.
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_SSE4 := true
ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := false
-ARCH_X86_HAVE_AVX2 := false
-ARCH_X86_HAVE_AVX512 := false
diff --git a/core/combo/select.mk b/core/combo/select.mk
index eab4c72..33c8e6d 100644
--- a/core/combo/select.mk
+++ b/core/combo/select.mk
@@ -28,7 +28,7 @@
# Set reasonable defaults for the various variables
-$(combo_var_prefix)GLOBAL_ARFLAGS := cqsD -format=gnu
+$(combo_var_prefix)GLOBAL_ARFLAGS := crsPD -format=gnu
$(combo_var_prefix)STATIC_LIB_SUFFIX := .a
diff --git a/core/config.mk b/core/config.mk
index 537dc92..d120d61 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -101,6 +101,25 @@
$(KATI_obsolete_export It is a global setting. See $(CHANGES_URL)#export_keyword)
$(KATI_obsolete_var BUILD_BROKEN_ANDROIDMK_EXPORTS)
$(KATI_obsolete_var PRODUCT_STATIC_BOOT_CONTROL_HAL,Use shared library module instead. See $(CHANGES_URL)#PRODUCT_STATIC_BOOT_CONTROL_HAL)
+$(KATI_obsolete_var \
+ ARCH_ARM_HAVE_ARMV7A \
+ ARCH_DSP_REV \
+ ARCH_HAVE_ALIGNED_DOUBLES \
+ ARCH_MIPS_HAS_DSP \
+ ARCH_MIPS_HAS_FPU \
+ ARCH_MIPS_REV6 \
+ ARCH_X86_HAVE_AES_NI \
+ ARCH_X86_HAVE_AVX \
+ ARCH_X86_HAVE_AVX2 \
+ ARCH_X86_HAVE_AVX512 \
+ ARCH_X86_HAVE_MOVBE \
+ ARCH_X86_HAVE_POPCNT \
+ ARCH_X86_HAVE_SSE4 \
+ ARCH_X86_HAVE_SSE4_2 \
+ ARCH_X86_HAVE_SSSE3 \
+)
+$(KATI_obsolete_var PRODUCT_IOT)
+$(KATI_obsolete_var MD5SUM)
# Used to force goals to build. Only use for conditionally defined goals.
.PHONY: FORCE
@@ -120,6 +139,9 @@
.KATI_READONLY := TARGET_DEVICE_DIR
endif
+ONE_SHOT_MAKEFILE :=
+.KATI_READONLY := ONE_SHOT_MAKEFILE
+
# Set up efficient math functions which are used in make.
# Here since this file is included by envsetup as well as during build.
include $(BUILD_SYSTEM_COMMON)/math.mk
@@ -312,6 +334,7 @@
ifeq ($(CALLED_FROM_SETUP),true)
include $(BUILD_SYSTEM)/ccache.mk
include $(BUILD_SYSTEM)/goma.mk
+include $(BUILD_SYSTEM)/rbe.mk
endif
ifdef TARGET_PREFER_32_BIT
@@ -451,9 +474,6 @@
ifneq ($(filter true,$(SOONG_ALLOW_MISSING_DEPENDENCIES)),)
ALLOW_MISSING_DEPENDENCIES := true
endif
-ifneq ($(ONE_SHOT_MAKEFILE),)
-ALLOW_MISSING_DEPENDENCIES := true
-endif
.KATI_READONLY := ALLOW_MISSING_DEPENDENCIES
TARGET_BUILD_APPS_USE_PREBUILT_SDK :=
@@ -466,7 +486,6 @@
prebuilt_sdk_tools := prebuilts/sdk/tools
prebuilt_sdk_tools_bin := $(prebuilt_sdk_tools)/$(HOST_OS)/bin
-# Always use prebuilts for ckati and makeparallel
prebuilt_build_tools := prebuilts/build-tools
prebuilt_build_tools_wrappers := prebuilts/build-tools/common/bin
prebuilt_build_tools_jars := prebuilts/build-tools/common/framework
@@ -488,22 +507,12 @@
# Tools that are prebuilts for TARGET_BUILD_APPS
#
ifeq (,$(TARGET_BUILD_APPS)$(filter true,$(TARGET_BUILD_PDK)))
- AIDL := $(HOST_OUT_EXECUTABLES)/aidl
AAPT := $(HOST_OUT_EXECUTABLES)/aapt
- AAPT2 := $(HOST_OUT_EXECUTABLES)/aapt2
MAINDEXCLASSES := $(HOST_OUT_EXECUTABLES)/mainDexClasses
- SIGNAPK_JAR := $(HOST_OUT_JAVA_LIBRARIES)/signapk$(COMMON_JAVA_PACKAGE_SUFFIX)
- SIGNAPK_JNI_LIBRARY_PATH := $(HOST_OUT_SHARED_LIBRARIES)
- ZIPALIGN := $(HOST_OUT_EXECUTABLES)/zipalign
else # TARGET_BUILD_APPS || TARGET_BUILD_PDK
- AIDL := $(prebuilt_build_tools_bin)/aidl
AAPT := $(prebuilt_sdk_tools_bin)/aapt
- AAPT2 := $(prebuilt_sdk_tools_bin)/aapt2
MAINDEXCLASSES := $(prebuilt_sdk_tools)/mainDexClasses
- SIGNAPK_JAR := $(prebuilt_sdk_tools)/lib/signapk$(COMMON_JAVA_PACKAGE_SUFFIX)
- SIGNAPK_JNI_LIBRARY_PATH := $(prebuilt_sdk_tools)/$(HOST_OS)/lib64
- ZIPALIGN := $(prebuilt_build_tools_bin)/zipalign
endif # TARGET_BUILD_APPS || TARGET_BUILD_PDK
ifeq (,$(TARGET_BUILD_APPS))
@@ -524,13 +533,11 @@
FILESLIST := $(SOONG_HOST_OUT_EXECUTABLES)/fileslist
FILESLIST_UTIL :=$= build/make/tools/fileslist_util.py
HOST_INIT_VERIFIER := $(HOST_OUT_EXECUTABLES)/host_init_verifier
-MAKEPARALLEL := $(prebuilt_build_tools_bin)/makeparallel
-SOONG_JAVAC_WRAPPER := $(SOONG_HOST_OUT_EXECUTABLES)/soong_javac_wrapper
-SOONG_ZIP := $(SOONG_HOST_OUT_EXECUTABLES)/soong_zip
-MERGE_ZIPS := $(SOONG_HOST_OUT_EXECUTABLES)/merge_zips
XMLLINT := $(SOONG_HOST_OUT_EXECUTABLES)/xmllint
-ZIP2ZIP := $(SOONG_HOST_OUT_EXECUTABLES)/zip2zip
-ZIPTIME := $(prebuilt_build_tools_bin)/ziptime
+
+# SOONG_ZIP is exported by Soong, but needs to be defined early for
+# $OUT/dexpreopt.global. It will be verified against the Soong version.
+SOONG_ZIP := $(SOONG_HOST_OUT_EXECUTABLES)/soong_zip
# ---------------------------------------------------------------
# Generic tools.
@@ -559,7 +566,6 @@
VTSC := $(HOST_OUT_EXECUTABLES)/vtsc$(HOST_EXECUTABLE_SUFFIX)
MKBOOTFS := $(HOST_OUT_EXECUTABLES)/mkbootfs$(HOST_EXECUTABLE_SUFFIX)
MINIGZIP := $(HOST_OUT_EXECUTABLES)/minigzip$(HOST_EXECUTABLE_SUFFIX)
-BROTLI := $(HOST_OUT_EXECUTABLES)/brotli$(HOST_EXECUTABLE_SUFFIX)
ifeq (,$(strip $(BOARD_CUSTOM_MKBOOTIMG)))
MKBOOTIMG := $(HOST_OUT_EXECUTABLES)/mkbootimg$(HOST_EXECUTABLE_SUFFIX)
else
@@ -579,27 +585,29 @@
FS_GET_STATS := $(HOST_OUT_EXECUTABLES)/fs_get_stats$(HOST_EXECUTABLE_SUFFIX)
MKEXTUSERIMG := $(HOST_OUT_EXECUTABLES)/mkuserimg_mke2fs
MKE2FS_CONF := system/extras/ext4_utils/mke2fs.conf
-BLK_ALLOC_TO_BASE_FS := $(HOST_OUT_EXECUTABLES)/blk_alloc_to_base_fs$(HOST_EXECUTABLE_SUFFIX)
MKSQUASHFSUSERIMG := $(HOST_OUT_EXECUTABLES)/mksquashfsimage.sh
MKF2FSUSERIMG := $(HOST_OUT_EXECUTABLES)/mkf2fsuserimg.sh
SIMG2IMG := $(HOST_OUT_EXECUTABLES)/simg2img$(HOST_EXECUTABLE_SUFFIX)
E2FSCK := $(HOST_OUT_EXECUTABLES)/e2fsck$(HOST_EXECUTABLE_SUFFIX)
-MKTARBALL := build/make/tools/mktarball.sh
TUNE2FS := $(HOST_OUT_EXECUTABLES)/tune2fs$(HOST_EXECUTABLE_SUFFIX)
JARJAR := $(HOST_OUT_JAVA_LIBRARIES)/jarjar.jar
DATA_BINDING_COMPILER := $(HOST_OUT_JAVA_LIBRARIES)/databinding-compiler.jar
FAT16COPY := build/make/tools/fat16copy.py
-CHECK_LINK_TYPE := build/make/tools/check_link_type.py
CHECK_ELF_FILE := build/make/tools/check_elf_file.py
LPMAKE := $(HOST_OUT_EXECUTABLES)/lpmake$(HOST_EXECUTABLE_SUFFIX)
-BUILD_SUPER_IMAGE := build/make/tools/releasetools/build_super_image.py
+ADD_IMG_TO_TARGET_FILES := $(HOST_OUT_EXECUTABLES)/add_img_to_target_files$(HOST_EXECUTABLE_SUFFIX)
+BUILD_IMAGE := $(HOST_OUT_EXECUTABLES)/build_image$(HOST_EXECUTABLE_SUFFIX)
+BUILD_SUPER_IMAGE := $(HOST_OUT_EXECUTABLES)/build_super_image$(HOST_EXECUTABLE_SUFFIX)
+IMG_FROM_TARGET_FILES := $(HOST_OUT_EXECUTABLES)/img_from_target_files$(HOST_EXECUTABLE_SUFFIX)
+MAKE_RECOVERY_PATCH := $(HOST_OUT_EXECUTABLES)/make_recovery_patch$(HOST_EXECUTABLE_SUFFIX)
+OTA_FROM_TARGET_FILES := $(HOST_OUT_EXECUTABLES)/ota_from_target_files$(HOST_EXECUTABLE_SUFFIX)
+SPARSE_IMG := $(HOST_OUT_EXECUTABLES)/sparse_img$(HOST_EXECUTABLE_SUFFIX)
PROGUARD_HOME := external/proguard
PROGUARD := $(PROGUARD_HOME)/bin/proguard.sh
PROGUARD_DEPS := $(PROGUARD) $(PROGUARD_HOME)/lib/proguard.jar
JAVATAGS := build/make/tools/java-event-log-tags.py
MERGETAGS := build/make/tools/merge-event-log-tags.py
-BUILD_IMAGE_SRCS := $(wildcard build/make/tools/releasetools/*.py)
APPEND2SIMG := $(HOST_OUT_EXECUTABLES)/append2simg
VERITY_SIGNER := $(HOST_OUT_EXECUTABLES)/verity_signer
BUILD_VERITY_METADATA := $(HOST_OUT_EXECUTABLES)/build_verity_metadata
@@ -608,7 +616,6 @@
FUTILITY := $(HOST_OUT_EXECUTABLES)/futility-host
VBOOT_SIGNER := $(HOST_OUT_EXECUTABLES)/vboot_signer
FEC := $(HOST_OUT_EXECUTABLES)/fec
-BRILLO_UPDATE_PAYLOAD := $(HOST_OUT_EXECUTABLES)/brillo_update_payload
DEXDUMP := $(HOST_OUT_EXECUTABLES)/dexdump$(BUILD_EXECUTABLE_SUFFIX)
PROFMAN := $(HOST_OUT_EXECUTABLES)/profman
@@ -631,13 +638,6 @@
# Path to tools.jar
HOST_JDK_TOOLS_JAR := $(ANDROID_JAVA8_HOME)/lib/tools.jar
-# It's called md5 on Mac OS and md5sum on Linux
-ifeq ($(HOST_OS),darwin)
-MD5SUM:=md5 -q
-else
-MD5SUM:=md5sum
-endif
-
APICHECK_COMMAND := $(JAVA) -Xmx4g -jar $(APICHECK) --no-banner --compatible-output=yes
# Boolean variable determining if the whitelist for compatible properties is enabled
@@ -769,6 +769,13 @@
endif
.KATI_READONLY := DEFAULT_SYSTEM_DEV_CERTIFICATE
+# Certificate for the NetworkStack sepolicy context
+ifdef PRODUCT_MAINLINE_SEPOLICY_DEV_CERTIFICATES
+ MAINLINE_SEPOLICY_DEV_CERTIFICATES := $(PRODUCT_MAINLINE_SEPOLICY_DEV_CERTIFICATES)
+else
+ MAINLINE_SEPOLICY_DEV_CERTIFICATES := $(dir $(DEFAULT_SYSTEM_DEV_CERTIFICATE))
+endif
+
BUILD_NUMBER_FROM_FILE := $$(cat $(OUT_DIR)/build_number.txt)
BUILD_DATETIME_FROM_FILE := $$(cat $(BUILD_DATETIME_FILE))
@@ -784,7 +791,7 @@
# is made which breaks compatibility with the previous platform sepolicy version,
# not just on every increase in PLATFORM_SDK_VERSION. The minor version should
# be reset to 0 on every bump of the PLATFORM_SDK_VERSION.
-sepolicy_major_vers := 28
+sepolicy_major_vers := 29
sepolicy_minor_vers := 0
ifneq ($(sepolicy_major_vers), $(PLATFORM_SDK_VERSION))
@@ -871,10 +878,10 @@
endif
endif
-ifneq ($(BOARD_PRODUCT_SERVICESIMAGE_PARTITION_SIZE),)
-ifneq ($(BOARD_PRODUCT_SERVICESIMAGE_PARTITION_RESERVED_SIZE),)
-$(error Should not define BOARD_PRODUCT_SERVICESIMAGE_PARTITION_SIZE and \
- BOARD_PRODUCT_SERVICESIMAGE_PARTITION_RESERVED_SIZE together)
+ifneq ($(BOARD_SYSTEM_EXTIMAGE_PARTITION_SIZE),)
+ifneq ($(BOARD_SYSTEM_EXTIMAGE_PARTITION_RESERVED_SIZE),)
+$(error Should not define BOARD_SYSTEM_EXTIMAGE_PARTITION_SIZE and \
+ BOARD_SYSTEM_EXTIMAGE_PARTITION_RESERVED_SIZE together)
endif
endif
@@ -899,7 +906,7 @@
)
# BOARD_*_PARTITION_LIST: a list of the following tokens
-valid_super_partition_list := system vendor product product_services odm
+valid_super_partition_list := system vendor product system_ext odm
$(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)), \
$(if $(filter-out $(valid_super_partition_list),$(BOARD_$(group)_PARTITION_LIST)), \
$(error BOARD_$(group)_PARTITION_LIST contains invalid partition name \
@@ -1154,13 +1161,12 @@
# in the source tree.
dont_bother_goals := out \
snod systemimage-nodeps \
- stnod systemtarball-nodeps \
- userdataimage-nodeps userdatatarball-nodeps \
+ userdataimage-nodeps \
cacheimage-nodeps \
bptimage-nodeps \
vnod vendorimage-nodeps \
pnod productimage-nodeps \
- psnod productservicesimage-nodeps \
+ senod systemextimage-nodeps \
onod odmimage-nodeps \
systemotherimage-nodeps \
ramdisk-nodeps \
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index 7a9f23e..2439f79 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -345,9 +345,6 @@
my_shared_libraries := $($(LOCAL_2ND_ARCH_VAR_PREFIX)ADDRESS_SANITIZER_RUNTIME_LIBRARY) \
$(my_shared_libraries)
endif
- ifeq (,$(filter $(LOCAL_MODULE),$(ADDRESS_SANITIZER_CONFIG_EXTRA_STATIC_LIBRARIES)))
- my_static_libraries += $(ADDRESS_SANITIZER_CONFIG_EXTRA_STATIC_LIBRARIES)
- endif
# Do not add unnecessary dependency in shared libraries.
ifeq ($(LOCAL_MODULE_CLASS),SHARED_LIBRARIES)
diff --git a/core/cxx_stl_setup.mk b/core/cxx_stl_setup.mk
index 7d3ca5c..8e4a46c 100644
--- a/core/cxx_stl_setup.mk
+++ b/core/cxx_stl_setup.mk
@@ -55,8 +55,6 @@
my_cxx_ldlibs :=
ifneq ($(filter $(my_cxx_stl),libc++ libc++_static),)
- my_cflags += -D_USING_LIBCXX
-
ifeq ($($(my_prefix)OS),darwin)
# libc++'s headers are annotated with availability macros that indicate
# which version of Mac OS was the first to ship with a libc++ feature
@@ -84,6 +82,7 @@
my_ldflags += -nodefaultlibs
my_cxx_ldlibs += $($($(my_prefix)OS)_$(my_link_type)_gcclibs)
else
+ my_static_libraries += libc++demangle
ifeq (arm,$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH))
my_static_libraries += libunwind_llvm
my_ldflags += -Wl,--exclude-libs,libunwind_llvm.a
diff --git a/core/definitions.mk b/core/definitions.mk
index 343afff..a442bc0 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -364,7 +364,7 @@
###########################################################
define all-renderscript-files-under
-$(call find-subdir-files,$(1) \( -name "*.rs" -or -name "*.fs" \) -and -not -name ".*")
+$(call find-subdir-files,$(1) \( -name "*.rscript" -or -name "*.fs" \) -and -not -name ".*")
endef
###########################################################
@@ -951,7 +951,7 @@
$(hide) mkdir -p $(dir $@)
$(hide) $(BCC_COMPAT) -O3 -o $(dir $@)/$(notdir $(<:.bc=.o)) -fPIC -shared \
-rt-path $(RS_PREBUILT_CLCORE) -mtriple $(RS_COMPAT_TRIPLE) $<
-$(hide) $(PRIVATE_CXX) -shared -Wl,-soname,$(notdir $@) -nostdlib \
+$(hide) $(PRIVATE_CXX_LINK) -shared -Wl,-soname,$(notdir $@) -nostdlib \
-Wl,-rpath,\$$ORIGIN/../lib \
$(dir $@)/$(notdir $(<:.bc=.o)) \
$(RS_PREBUILT_COMPILER_RT) \
@@ -1107,7 +1107,7 @@
###########################################################
define c-includes
$(addprefix -I , $(PRIVATE_C_INCLUDES)) \
-$$(cat $(PRIVATE_IMPORT_INCLUDES))\
+$(foreach i,$(PRIVATE_IMPORTED_INCLUDES),$(EXPORTS.$(i)))\
$(if $(PRIVATE_NO_DEFAULT_COMPILER_FLAGS),,\
$(addprefix -I ,\
$(filter-out $(PRIVATE_C_INCLUDES), \
@@ -1533,7 +1533,7 @@
endef
define transform-o-to-aux-executable-inner
-$(hide) $(PRIVATE_CXX) -pie \
+$(hide) $(PRIVATE_CXX_LINK) -pie \
-Bdynamic \
-Wl,--gc-sections \
$(PRIVATE_ALL_OBJECTS) \
@@ -1552,7 +1552,7 @@
endef
define transform-o-to-aux-static-executable-inner
-$(hide) $(PRIVATE_CXX) \
+$(hide) $(PRIVATE_CXX_LINK) \
-Bstatic \
-Wl,--gc-sections \
$(PRIVATE_ALL_OBJECTS) \
@@ -1653,7 +1653,7 @@
# it to be overriden en-masse see combo/linux-arm.make for an example.
ifneq ($(HOST_CUSTOM_LD_COMMAND),true)
define transform-host-o-to-shared-lib-inner
-$(hide) $(PRIVATE_CXX) \
+$(hide) $(PRIVATE_CXX_LINK) \
-Wl,-rpath,\$$ORIGIN/../$(notdir $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)OUT_SHARED_LIBRARIES)) \
-Wl,-rpath,\$$ORIGIN/$(notdir $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)OUT_SHARED_LIBRARIES)) \
-shared -Wl,-soname,$(notdir $@) \
@@ -1694,7 +1694,7 @@
###########################################################
define transform-o-to-shared-lib-inner
-$(hide) $(PRIVATE_CXX) \
+$(hide) $(PRIVATE_CXX_LINK) \
-nostdlib -Wl,-soname,$(notdir $@) \
-Wl,--gc-sections \
-shared \
@@ -1729,7 +1729,7 @@
###########################################################
define transform-o-to-executable-inner
-$(hide) $(PRIVATE_CXX) -pie \
+$(hide) $(PRIVATE_CXX_LINK) -pie \
-nostdlib -Bdynamic \
-Wl,-dynamic-linker,$(PRIVATE_LINKER) \
-Wl,--gc-sections \
@@ -1772,7 +1772,7 @@
###########################################################
define transform-o-to-static-executable-inner
-$(hide) $(PRIVATE_CXX) \
+$(hide) $(PRIVATE_CXX_LINK) \
-nostdlib -Bstatic \
$(if $(filter $(PRIVATE_LDFLAGS),-shared),,-static) \
-Wl,--gc-sections \
@@ -1810,7 +1810,7 @@
ifneq ($(HOST_CUSTOM_LD_COMMAND),true)
define transform-host-o-to-executable-inner
-$(hide) $(PRIVATE_CXX) \
+$(hide) $(PRIVATE_CXX_LINK) \
$(PRIVATE_ALL_OBJECTS) \
-Wl,--whole-archive \
$(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES) \
@@ -2270,7 +2270,7 @@
# $(1): the package file we are signing.
define sign-package-arg
$(hide) mv $(1) $(1).unsigned
-$(hide) $(JAVA) -Djava.library.path=$(SIGNAPK_JNI_LIBRARY_PATH) -jar $(SIGNAPK_JAR) \
+$(hide) $(JAVA) -Djava.library.path=$$(dirname $(SIGNAPK_JNI_LIBRARY_PATH)) -jar $(SIGNAPK_JAR) \
$(PRIVATE_CERTIFICATE) $(PRIVATE_PRIVATE_KEY) \
$(PRIVATE_ADDITIONAL_CERTIFICATES) $(1).unsigned $(1).signed
$(hide) mv $(1).signed $(1)
@@ -2302,7 +2302,7 @@
ifeq ($(HOST_OS),linux)
# Runs appcompat and store logs in $(PRODUCT_OUT)/appcompat
define extract-package
-$(AAPT2) dump $@ | awk -F ' |=' '/^Package/{print $$3}' >> $(PRODUCT_OUT)/appcompat/$(PRIVATE_MODULE).log &&
+$(AAPT2) dump resources $@ | awk -F ' |=' '/^Package/{print $$3}' >> $(PRODUCT_OUT)/appcompat/$(PRIVATE_MODULE).log &&
endef
define appcompat-header
$(hide) \
@@ -2315,13 +2315,15 @@
echo "Install path on $(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT): $(PRIVATE_INSTALLED_MODULE)" >> $(PRODUCT_OUT)/appcompat/$(PRIVATE_MODULE).log && \
echo >> $(PRODUCT_OUT)/appcompat/$(PRIVATE_MODULE).log
endef
+ART_VERIDEX_APPCOMPAT_SCRIPT:=$(HOST_OUT)/bin/appcompat.sh
define run-appcompat
$(hide) \
echo "appcompat.sh output:" >> $(PRODUCT_OUT)/appcompat/$(PRIVATE_MODULE).log && \
- PACKAGING=$(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING ANDROID_LOG_TAGS="*:e" art/tools/veridex/appcompat.sh --dex-file=$@ --api-flags=$(INTERNAL_PLATFORM_HIDDENAPI_FLAGS) 2>&1 >> $(PRODUCT_OUT)/appcompat/$(PRIVATE_MODULE).log
+ PACKAGING=$(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING ANDROID_LOG_TAGS="*:e" $(ART_VERIDEX_APPCOMPAT_SCRIPT) --dex-file=$@ --api-flags=$(INTERNAL_PLATFORM_HIDDENAPI_FLAGS) 2>&1 >> $(PRODUCT_OUT)/appcompat/$(PRIVATE_MODULE).log
endef
appcompat-files = \
- art/tools/veridex/appcompat.sh \
+ $(AAPT2) \
+ $(ART_VERIDEX_APPCOMPAT_SCRIPT) \
$(INTERNAL_PLATFORM_HIDDENAPI_FLAGS) \
$(HOST_OUT_EXECUTABLES)/veridex \
$(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/core_dex_intermediates/classes.dex \
@@ -2452,9 +2454,17 @@
$(2): \
$(1) \
$(HOST_INIT_VERIFIER) \
- $(KNOWN_HIDL_INTERFACES) \
- $(call intermediates-dir-for,ETC,passwd)/passwd
- $(hide) $(HOST_INIT_VERIFIER) -p $(call intermediates-dir-for,ETC,passwd)/passwd -k $(KNOWN_HIDL_INTERFACES) $$<
+ $(HIDL_INHERITANCE_HIERARCHY) \
+ $(call intermediates-dir-for,ETC,passwd_system)/passwd_system \
+ $(call intermediates-dir-for,ETC,passwd_vendor)/passwd_vendor \
+ $(call intermediates-dir-for,ETC,passwd_odm)/passwd_odm \
+ $(call intermediates-dir-for,ETC,passwd_product)/passwd_product
+ $(hide) $(HOST_INIT_VERIFIER) \
+ -p $(call intermediates-dir-for,ETC,passwd_system)/passwd_system \
+ -p $(call intermediates-dir-for,ETC,passwd_vendor)/passwd_vendor \
+ -p $(call intermediates-dir-for,ETC,passwd_odm)/passwd_odm \
+ -p $(call intermediates-dir-for,ETC,passwd_product)/passwd_product \
+ -i $(HIDL_INHERITANCE_HIERARCHY) $$<
else
$(2): $(1)
endif
@@ -2678,9 +2688,9 @@
###########################################################
# $(1): The file to check
-ifndef get-file-size
-$(error HOST_OS must define get-file-size)
-endif
+define get-file-size
+stat -c "%s" "$(1)" | tr -d '\n'
+endef
# $(1): The file(s) to check (often $@)
# $(2): The partition size.
diff --git a/core/dex_preopt_config.mk b/core/dex_preopt_config.mk
index 570dbd8..69eaea1 100644
--- a/core/dex_preopt_config.mk
+++ b/core/dex_preopt_config.mk
@@ -8,8 +8,8 @@
SYSTEM_OTHER_ODEX_FILTER ?= \
app/% \
priv-app/% \
- product_services/app/% \
- product_services/priv-app/% \
+ system_ext/app/% \
+ system_ext/priv-app/% \
product/app/% \
product/priv-app/% \
@@ -99,7 +99,7 @@
$(call add_json_bool, DisableGenerateProfile, $(filter false,$(WITH_DEX_PREOPT_GENERATE_PROFILE)))
$(call add_json_str, ProfileDir, $(PRODUCT_DEX_PREOPT_PROFILE_DIR))
$(call add_json_list, BootJars, $(PRODUCT_BOOT_JARS))
- $(call add_json_list, RuntimeApexJars, $(RUNTIME_APEX_JARS))
+ $(call add_json_list, ArtApexJars, $(ART_APEX_JARS))
$(call add_json_list, ProductUpdatableBootModules, $(PRODUCT_UPDATABLE_BOOT_MODULES))
$(call add_json_list, ProductUpdatableBootLocations, $(PRODUCT_UPDATABLE_BOOT_LOCATIONS))
$(call add_json_list, SystemServerJars, $(PRODUCT_SYSTEM_SERVER_JARS))
diff --git a/core/dynamic_binary.mk b/core/dynamic_binary.mk
index 0accdc0..27ff2c9 100644
--- a/core/dynamic_binary.mk
+++ b/core/dynamic_binary.mk
@@ -39,6 +39,17 @@
include $(BUILD_SYSTEM)/binary.mk
###################################
+ifdef LOCAL_INJECT_BSSL_HASH
+inject_module := $(intermediates)/INJECT_BSSL_HASH/$(notdir $(my_installed_module_stem))
+LOCAL_INTERMEDIATE_TARGETS += $(inject_module)
+$(inject_module): $(SOONG_HOST_OUT)/bin/bssl_inject_hash
+$(inject_module): $(linked_module)
+ @echo "target inject BSSL hash: $(PRIVATE_MODULE) ($@)"
+ $(SOONG_HOST_OUT)/bin/bssl_inject_hash -in-object $< -o $@
+else
+inject_module := $(linked_module)
+endif
+
###########################################################
## Store a copy with symbols for symbolic debugging
###########################################################
@@ -47,7 +58,7 @@
else
my_unstripped_path := $(LOCAL_UNSTRIPPED_PATH)
endif
-symbolic_input := $(linked_module)
+symbolic_input := $(inject_module)
symbolic_output := $(my_unstripped_path)/$(my_installed_module_stem)
$(symbolic_output) : $(symbolic_input)
@echo "target Symbolic: $(PRIVATE_MODULE) ($@)"
@@ -59,7 +70,7 @@
ifeq ($(BREAKPAD_GENERATE_SYMBOLS),true)
my_breakpad_path := $(TARGET_OUT_BREAKPAD)/$(patsubst $(PRODUCT_OUT)/%,%,$(my_module_path))
-breakpad_input := $(linked_module)
+breakpad_input := $(inject_module)
breakpad_output := $(my_breakpad_path)/$(my_installed_module_stem).sym
$(breakpad_output) : $(breakpad_input) | $(BREAKPAD_DUMP_SYMS) $(PRIVATE_READELF)
@echo "target breakpad: $(PRIVATE_MODULE) ($@)"
@@ -133,6 +144,7 @@
$(cleantarget): PRIVATE_CLEAN_FILES += \
$(linked_module) \
+ $(inject_module) \
$(breakpad_output) \
$(symbolic_output) \
$(strip_output)
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 46edc0e..88bf352 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -82,8 +82,6 @@
# ---------------------------------------------------------------
# The product defaults to generic on hardware
-# NOTE: This will be overridden in product_config.mk if make
-# was invoked with a PRODUCT-xxx-yyy goal.
ifeq ($(TARGET_PRODUCT),)
TARGET_PRODUCT := aosp_arm
endif
@@ -94,6 +92,13 @@
TARGET_BUILD_VARIANT := eng
endif
+TARGET_BUILD_APPS ?=
+
+.KATI_READONLY := \
+ TARGET_PRODUCT \
+ TARGET_BUILD_VARIANT \
+ TARGET_BUILD_APPS
+
# ---------------------------------------------------------------
# Set up configuration for host machine. We don't do cross-
# compiles except for arm/mips, so the HOST is whatever we are
@@ -106,9 +111,6 @@
ifneq (,$(findstring Darwin,$(UNAME)))
HOST_OS := darwin
endif
-ifneq (,$(findstring Macintosh,$(UNAME)))
- HOST_OS := darwin
-endif
HOST_OS_EXTRA := $(shell uname -rsm)
ifeq ($(HOST_OS),linux)
@@ -233,11 +235,14 @@
# BoardConfig, to be either the partition dir, or a subdir within 'system'.
_vendor_path_placeholder := ||VENDOR-PATH-PH||
_product_path_placeholder := ||PRODUCT-PATH-PH||
-_product_services_path_placeholder := ||PRODUCT_SERVICES-PATH-PH||
+_system_ext_path_placeholder := ||SYSTEM_EXT-PATH-PH||
_odm_path_placeholder := ||ODM-PATH-PH||
TARGET_COPY_OUT_VENDOR := $(_vendor_path_placeholder)
TARGET_COPY_OUT_PRODUCT := $(_product_path_placeholder)
-TARGET_COPY_OUT_PRODUCT_SERVICES := $(_product_services_path_placeholder)
+# TODO(b/135957588) TARGET_COPY_OUT_PRODUCT_SERVICES will copy the target to
+# product
+TARGET_COPY_OUT_PRODUCT_SERVICES := $(_product_path_placeholder)
+TARGET_COPY_OUT_SYSTEM_EXT := $(_system_ext_path_placeholder)
TARGET_COPY_OUT_ODM := $(_odm_path_placeholder)
# Returns the non-sanitized version of the path provided in $1.
@@ -248,10 +253,10 @@
#################################################################
# Set up minimal BOOTCLASSPATH list of jars to build/execute
# java code with dalvikvm/art.
-# Jars present in the runtime apex. These should match exactly the list of
-# Java libraries in the runtime apex build rule.
-RUNTIME_APEX_JARS := core-oj core-libart okhttp bouncycastle apache-xml
-TARGET_CORE_JARS := $(RUNTIME_APEX_JARS) conscrypt
+# Jars present in the ART apex. These should match exactly the list of
+# Java libraries in the ART apex build rule.
+ART_APEX_JARS := core-oj core-libart core-icu4j okhttp bouncycastle apache-xml
+TARGET_CORE_JARS := $(ART_APEX_JARS) conscrypt
ifeq ($(EMMA_INSTRUMENT),true)
ifneq ($(EMMA_INSTRUMENT_STATIC),true)
# For instrumented build, if Jacoco is not being included statically
@@ -323,6 +328,7 @@
HOST_OUT_EXECUTABLES := $(HOST_OUT)/bin
HOST_OUT_SHARED_LIBRARIES := $(HOST_OUT)/lib64
+HOST_OUT_DYLIB_LIBRARIES := $(HOST_OUT)/lib64
HOST_OUT_RENDERSCRIPT_BITCODE := $(HOST_OUT_SHARED_LIBRARIES)
HOST_OUT_JAVA_LIBRARIES := $(HOST_OUT)/framework
HOST_OUT_SDK_ADDON := $(HOST_OUT)/sdk_addon
@@ -785,38 +791,52 @@
$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_PRODUCT_APPS \
$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_PRODUCT_APPS_PRIVILEGED
-TARGET_OUT_PRODUCT_SERVICES := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_PRODUCT_SERVICES)
+TARGET_OUT_SYSTEM_EXT := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_SYSTEM_EXT)
ifneq ($(filter address,$(SANITIZE_TARGET)),)
-target_out_product_services_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_PRODUCT_SERVICES)
+target_out_system_ext_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_SYSTEM_EXT)
ifeq ($(SANITIZE_LITE),true)
# When using SANITIZE_LITE, APKs must not be packaged with sanitized libraries, as they will not
# work with unsanitized app_process. For simplicity, generate APKs into /data/asan/.
-target_out_product_services_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_PRODUCT_SERVICES)
+target_out_system_ext_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_SYSTEM_EXT)
else
-target_out_product_services_app_base := $(TARGET_OUT_PRODUCT_SERVICES)
+target_out_system_ext_app_base := $(TARGET_OUT_SYSTEM_EXT)
endif
else
-target_out_product_services_shared_libraries_base := $(TARGET_OUT_PRODUCT_SERVICES)
-target_out_product_services_app_base := $(TARGET_OUT_PRODUCT_SERVICES)
+target_out_system_ext_shared_libraries_base := $(TARGET_OUT_SYSTEM_EXT)
+target_out_system_ext_app_base := $(TARGET_OUT_SYSTEM_EXT)
endif
ifeq ($(TARGET_IS_64_BIT),true)
-TARGET_OUT_PRODUCT_SERVICES_SHARED_LIBRARIES := $(target_out_product_services_shared_libraries_base)/lib64
+TARGET_OUT_SYSTEM_EXT_SHARED_LIBRARIES := $(target_out_system_ext_shared_libraries_base)/lib64
else
-TARGET_OUT_PRODUCT_SERVICES_SHARED_LIBRARIES := $(target_out_product_services_shared_libraries_base)/lib
+TARGET_OUT_SYSTEM_EXT_SHARED_LIBRARIES := $(target_out_system_ext_shared_libraries_base)/lib
endif
-TARGET_OUT_PRODUCT_SERVICES_JAVA_LIBRARIES:= $(TARGET_OUT_PRODUCT_SERVICES)/framework
-TARGET_OUT_PRODUCT_SERVICES_APPS := $(target_out_product_services_app_base)/app
-TARGET_OUT_PRODUCT_SERVICES_APPS_PRIVILEGED := $(target_out_product_services_app_base)/priv-app
-TARGET_OUT_PRODUCT_SERVICES_ETC := $(TARGET_OUT_PRODUCT_SERVICES)/etc
+TARGET_OUT_SYSTEM_EXT_JAVA_LIBRARIES:= $(TARGET_OUT_SYSTEM_EXT)/framework
+TARGET_OUT_SYSTEM_EXT_APPS := $(target_out_system_ext_app_base)/app
+TARGET_OUT_SYSTEM_EXT_APPS_PRIVILEGED := $(target_out_system_ext_app_base)/priv-app
+TARGET_OUT_SYSTEM_EXT_ETC := $(TARGET_OUT_SYSTEM_EXT)/etc
+TARGET_OUT_SYSTEM_EXT_EXECUTABLES := $(TARGET_OUT_SYSTEM_EXT)/bin
+.KATI_READONLY := \
+ TARGET_OUT_SYSTEM_EXT_EXECUTABLES \
+ TARGET_OUT_SYSTEM_EXT_SHARED_LIBRARIES \
+ TARGET_OUT_SYSTEM_EXT_JAVA_LIBRARIES \
+ TARGET_OUT_SYSTEM_EXT_APPS \
+ TARGET_OUT_SYSTEM_EXT_APPS_PRIVILEGED \
+ TARGET_OUT_SYSTEM_EXT_ETC
+$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_SYSTEM_EXT_EXECUTABLES := $(TARGET_OUT_SYSTEM_EXT_EXECUTABLES)
ifeq ($(TARGET_TRANSLATE_2ND_ARCH),true)
-$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_PRODUCT_SERVICES_SHARED_LIBRARIES := $(target_out_product_services_shared_libraries_base)/lib/$(TARGET_2ND_ARCH)
+$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_SYSTEM_EXT_SHARED_LIBRARIES := $(target_out_system_ext_shared_libraries_base)/lib/$(TARGET_2ND_ARCH)
else
-$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_PRODUCT_SERVICES_SHARED_LIBRARIES := $(target_out_product_services_shared_libraries_base)/lib
+$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_SYSTEM_EXT_SHARED_LIBRARIES := $(target_out_system_ext_shared_libraries_base)/lib
endif
-$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_PRODUCT_SERVICES_APPS := $(TARGET_OUT_PRODUCT_SERVICES_APPS)
-$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_PRODUCT_SERVICES_APPS_PRIVILEGED := $(TARGET_OUT_PRODUCT_SERVICES_APPS_PRIVILEGED)
+$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_SYSTEM_EXT_APPS := $(TARGET_OUT_SYSTEM_EXT_APPS)
+$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_SYSTEM_EXT_APPS_PRIVILEGED := $(TARGET_OUT_SYSTEM_EXT_APPS_PRIVILEGED)
+.KATI_READONLY := \
+ $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_SYSTEM_EXT_EXECUTABLES \
+ $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_SYSTEM_EXT_SHARED_LIBRARIES \
+ $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_SYSTEM_EXT_APPS \
+ $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_SYSTEM_EXT_APPS_PRIVILEGED
TARGET_OUT_BREAKPAD := $(PRODUCT_OUT)/breakpad
.KATI_READONLY := TARGET_OUT_BREAKPAD
@@ -876,7 +896,7 @@
TARGET_INSTALLER_SYSTEM_OUT
COMMON_MODULE_CLASSES := TARGET-NOTICE_FILES HOST-NOTICE_FILES HOST-JAVA_LIBRARIES
-PER_ARCH_MODULE_CLASSES := SHARED_LIBRARIES STATIC_LIBRARIES EXECUTABLES GYP RENDERSCRIPT_BITCODE NATIVE_TESTS HEADER_LIBRARIES
+PER_ARCH_MODULE_CLASSES := SHARED_LIBRARIES STATIC_LIBRARIES EXECUTABLES GYP RENDERSCRIPT_BITCODE NATIVE_TESTS HEADER_LIBRARIES RLIB_LIBRARIES DYLIB_LIBRARIES
.KATI_READONLY := COMMON_MODULE_CLASSES PER_ARCH_MODULE_CLASSES
ifeq ($(CALLED_FROM_SETUP),true)
diff --git a/core/fuzz_test.mk b/core/fuzz_test.mk
index f5bdef0..71801ba 100644
--- a/core/fuzz_test.mk
+++ b/core/fuzz_test.mk
@@ -64,12 +64,12 @@
$(error $(LOCAL_PATH): Do not set LOCAL_MODULE_PATH_64 when building test $(LOCAL_MODULE))
endif
-LOCAL_MODULE_PATH_64 := $(TARGET_OUT_DATA_NATIVE_TESTS)/fuzzers/$(LOCAL_MODULE)
-LOCAL_MODULE_PATH_32 := $($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_DATA_NATIVE_TESTS)/fuzzers/$(LOCAL_MODULE)
+LOCAL_MODULE_PATH_64 := $(TARGET_OUT_DATA_NATIVE_TESTS)/fuzzers/$(my_fuzzer)/$(LOCAL_MODULE)
+LOCAL_MODULE_PATH_32 := $($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_DATA_NATIVE_TESTS)/fuzzers/$(my_fuzzer)/$(LOCAL_MODULE)
ifndef LOCAL_MULTILIB
ifndef LOCAL_32_BIT_ONLY
-LOCAL_MULTILIB := both
+LOCAL_MULTILIB := 64
endif
endif
diff --git a/core/install_jni_libs.mk b/core/install_jni_libs.mk
index 515d34f..0fec9ca 100644
--- a/core/install_jni_libs.mk
+++ b/core/install_jni_libs.mk
@@ -26,7 +26,7 @@
$(TARGET_OUT_VENDOR)/% \
$(TARGET_OUT_OEM)/% \
$(TARGET_OUT_PRODUCT)/% \
- $(TARGET_OUT_PRODUCT_SERVICES)/% \
+ $(TARGET_OUT_SYSTEM_EXT)/% \
ifeq ($(filter $(supported_partition_patterns),$(my_module_path)),)
my_embed_jni := true
diff --git a/core/instrumentation_test_config_template.xml b/core/instrumentation_test_config_template.xml
index afaa561..18ea676 100644
--- a/core/instrumentation_test_config_template.xml
+++ b/core/instrumentation_test_config_template.xml
@@ -22,8 +22,6 @@
<option name="test-file-name" value="{MODULE}.apk" />
</target_preparer>
- {EXTRA_CONFIGS}
-
<test class="com.android.tradefed.testtype.{TEST_TYPE}" >
<option name="package" value="{PACKAGE}" />
<option name="runner" value="{RUNNER}" />
diff --git a/core/java.mk b/core/java.mk
index b463037..d080450 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -470,6 +470,8 @@
proguard_flag_files := $(addprefix $(LOCAL_PATH)/, $(LOCAL_PROGUARD_FLAG_FILES))
proguard_flag_files += $(addprefix $(LOCAL_PATH)/, $(LOCAL_R8_FLAG_FILES))
LOCAL_PROGUARD_FLAGS += $(addprefix -include , $(proguard_flag_files))
+LOCAL_PROGUARD_FLAGS_DEPS += $(proguard_flag_files)
+proguard_flag_files :=
ifdef LOCAL_TEST_MODULE_TO_PROGUARD_WITH
extra_input_jar := $(call intermediates-dir-for,APPS,$(LOCAL_TEST_MODULE_TO_PROGUARD_WITH),,COMMON)/classes.jar
@@ -481,8 +483,6 @@
$(built_dex_intermediate): .KATI_IMPLICIT_OUTPUTS := $(proguard_dictionary) $(proguard_configuration)
endif
-else # LOCAL_PROGUARD_ENABLED not defined
-proguard_flag_files :=
endif # LOCAL_PROGUARD_ENABLED defined
ifneq ($(LOCAL_IS_STATIC_JAVA_LIBRARY),true)
@@ -492,7 +492,7 @@
$(built_dex_intermediate): PRIVATE_EXTRA_INPUT_JAR := $(extra_input_jar)
$(built_dex_intermediate): PRIVATE_PROGUARD_FLAGS := $(legacy_proguard_flags) $(common_proguard_flags) $(LOCAL_PROGUARD_FLAGS)
$(built_dex_intermediate): PRIVATE_PROGUARD_DICTIONARY := $(proguard_dictionary)
- $(built_dex_intermediate) : $(full_classes_pre_proguard_jar) $(extra_input_jar) $(my_proguard_sdk_raise) $(common_proguard_flag_files) $(proguard_flag_files) $(legacy_proguard_lib_deps) $(R8_COMPAT_PROGUARD)
+ $(built_dex_intermediate) : $(full_classes_pre_proguard_jar) $(extra_input_jar) $(my_proguard_sdk_raise) $(common_proguard_flag_files) $(legacy_proguard_lib_deps) $(R8_COMPAT_PROGUARD) $(LOCAL_PROGUARD_FLAGS_DEPS)
$(transform-jar-to-dex-r8)
else # !LOCAL_PROGUARD_ENABLED
$(built_dex_intermediate): PRIVATE_D8_LIBS := $(full_java_bootclasspath_libs) $(full_shared_java_header_libs)
diff --git a/core/java_common.mk b/core/java_common.mk
index ff2886e..dfe75f3 100644
--- a/core/java_common.mk
+++ b/core/java_common.mk
@@ -81,19 +81,19 @@
$(proto_java_srcjar): PRIVATE_PROTO_JAVA_OUTPUT_DIR := $(proto_java_sources_dir)
$(proto_java_srcjar): PRIVATE_PROTOC_FLAGS := $(LOCAL_PROTOC_FLAGS)
ifeq ($(LOCAL_PROTOC_OPTIMIZE_TYPE),micro)
-$(proto_java_srcjar): PRIVATE_PROTO_JAVA_OUTPUT_OPTION := --javamicro_out
+ $(proto_java_srcjar): PRIVATE_PROTO_JAVA_OUTPUT_OPTION := --javamicro_out
+ $(proto_java_srcjar): PRIVATE_PROTOC_FLAGS += --plugin=$(HOST_OUT_EXECUTABLES)/protoc-gen-javamicro
+ $(proto_java_srcjar): $(HOST_OUT_EXECUTABLES)/protoc-gen-javamicro
+else ifeq ($(LOCAL_PROTOC_OPTIMIZE_TYPE),nano)
+ $(proto_java_srcjar): PRIVATE_PROTO_JAVA_OUTPUT_OPTION := --javanano_out
+ $(proto_java_srcjar): PRIVATE_PROTOC_FLAGS += --plugin=$(HOST_OUT_EXECUTABLES)/protoc-gen-javanano
+ $(proto_java_srcjar): $(HOST_OUT_EXECUTABLES)/protoc-gen-javanano
+else ifeq ($(LOCAL_PROTOC_OPTIMIZE_TYPE),stream)
+ $(proto_java_srcjar): PRIVATE_PROTO_JAVA_OUTPUT_OPTION := --javastream_out
+ $(proto_java_srcjar): PRIVATE_PROTOC_FLAGS += --plugin=$(HOST_OUT_EXECUTABLES)/protoc-gen-javastream
+ $(proto_java_srcjar): $(HOST_OUT_EXECUTABLES)/protoc-gen-javastream
else
- ifeq ($(LOCAL_PROTOC_OPTIMIZE_TYPE),nano)
-$(proto_java_srcjar): PRIVATE_PROTO_JAVA_OUTPUT_OPTION := --javanano_out
- else
- ifeq ($(LOCAL_PROTOC_OPTIMIZE_TYPE),stream)
-$(proto_java_srcjar): PRIVATE_PROTO_JAVA_OUTPUT_OPTION := --javastream_out
-$(proto_java_srcjar): PRIVATE_PROTOC_FLAGS += --plugin=$(HOST_OUT_EXECUTABLES)/protoc-gen-javastream
-$(proto_java_srcjar): $(HOST_OUT_EXECUTABLES)/protoc-gen-javastream
- else
-$(proto_java_srcjar): PRIVATE_PROTO_JAVA_OUTPUT_OPTION := --java_out
- endif
- endif
+ $(proto_java_srcjar): PRIVATE_PROTO_JAVA_OUTPUT_OPTION := --java_out
endif
$(proto_java_srcjar): PRIVATE_PROTO_JAVA_OUTPUT_PARAMS := $(if $(filter lite,$(LOCAL_PROTOC_OPTIMIZE_TYPE)),lite$(if $(LOCAL_PROTO_JAVA_OUTPUT_PARAMS),:,),)$(LOCAL_PROTO_JAVA_OUTPUT_PARAMS)
$(proto_java_srcjar) : $(proto_sources_fullpath) $(PROTOC) $(SOONG_ZIP)
@@ -494,13 +494,9 @@
##########################################################
# Copy NOTICE files of transitive static dependencies
# Don't do this in mm, since many of the targets won't exist.
-ifeq ($(ONE_SHOT_MAKEFILE),)
installed_static_library_notice_file_targets := \
$(foreach lib,$(LOCAL_STATIC_JAVA_LIBRARIES), \
NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-JAVA_LIBRARIES-$(lib))
-else
-installed_static_library_notice_file_targets :=
-endif
$(notice_target): | $(installed_static_library_notice_file_targets)
$(LOCAL_INSTALLED_MODULE): | $(notice_target)
diff --git a/core/java_library.mk b/core/java_library.mk
index 34e4874..4734eaf 100644
--- a/core/java_library.mk
+++ b/core/java_library.mk
@@ -44,6 +44,7 @@
LOCAL_STATIC_JAVA_LIBRARIES += jacocoagent
# Exclude jacoco classes from proguard
LOCAL_PROGUARD_FLAGS += -include $(BUILD_SYSTEM)/proguard.jacoco.flags
+LOCAL_PROGUARD_FLAGS_DEPS += $(BUILD_SYSTEM)/proguard.jacoco.flags
endif # LOCAL_EMMA_INSTRUMENT
endif # EMMA_INSTRUMENT_STATIC
else
diff --git a/core/java_renderscript.mk b/core/java_renderscript.mk
index 13a6f8e..672863b 100644
--- a/core/java_renderscript.mk
+++ b/core/java_renderscript.mk
@@ -1,10 +1,10 @@
###############################################################
## Renderscript support for java
-## Adds rules to convert .rs files to .java and .bc files
+## Adds rules to convert .rscript files to .java and .bc files
###############################################################
-renderscript_sources := $(filter %.rs,$(LOCAL_SRC_FILES))
-LOCAL_SRC_FILES := $(filter-out %.rs,$(LOCAL_SRC_FILES))
+renderscript_sources := $(filter %.rscript,$(LOCAL_SRC_FILES))
+LOCAL_SRC_FILES := $(filter-out %.rscript,$(LOCAL_SRC_FILES))
rs_generated_res_zip :=
rs_generated_src_jar :=
@@ -67,7 +67,7 @@
LOCAL_RENDERSCRIPT_INCLUDES := $(LOCAL_RENDERSCRIPT_INCLUDES_OVERRIDE)
endif
-bc_files := $(patsubst %.rs,%.bc, $(notdir $(renderscript_sources)))
+bc_files := $(patsubst %.rscript,%.bc, $(notdir $(renderscript_sources)))
bc_dep_files := $(addprefix $(renderscript_intermediate.COMMON)/,$(patsubst %.bc,%.d,$(bc_files)))
$(rs_generated_src_jar): PRIVATE_RS_INCLUDES := $(LOCAL_RENDERSCRIPT_INCLUDES)
@@ -139,6 +139,7 @@
$(rs_support_lib) $(rs_support_io_lib) $(rs_jni_lib) $(rs_compiler_rt)
$(rs_compatibility_jni_libs): $(BCC_COMPAT)
$(rs_compatibility_jni_libs): PRIVATE_CXX := $(CXX_WRAPPER) $(CLANG_CXX)
+$(rs_compatibility_jni_libs): PRIVATE_CXX_LINK := $(CLANG_CXX)
$(rs_compatibility_jni_libs): PRIVATE_SDK_VERSION := $(my_min_sdk_version)
$(rs_compatibility_jni_libs): $(renderscript_intermediate)/librs.%.so: \
$(renderscript_intermediate.bc_folder)%.bc \
diff --git a/core/main.mk b/core/main.mk
index 7ee63c6..b91770b 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -1,31 +1,9 @@
-# Only use ANDROID_BUILD_SHELL to wrap around bash.
-# DO NOT use other shells such as zsh.
-ifdef ANDROID_BUILD_SHELL
-SHELL := $(ANDROID_BUILD_SHELL)
-else
-# Use bash, not whatever shell somebody has installed as /bin/sh
-# This is repeated in config.mk, since envsetup.sh runs that file
-# directly.
-SHELL := /bin/bash
-endif
-
ifndef KATI
-
-host_prebuilts := linux-x86
-ifeq ($(shell uname),Darwin)
-host_prebuilts := darwin-x86
+$(warning Calling make directly is no longer supported.)
+$(warning Either use 'envsetup.sh; m' or 'build/soong/soong_ui.bash --make-mode')
+$(error done)
endif
-.PHONY: run_soong_ui
-run_soong_ui:
- +@prebuilts/build-tools/$(host_prebuilts)/bin/makeparallel --ninja build/soong/soong_ui.bash --make-mode $(MAKECMDGOALS)
-
-.PHONY: $(MAKECMDGOALS)
-$(sort $(MAKECMDGOALS)) : run_soong_ui
- @#empty
-
-else # KATI
-
$(info [1/1] initializing build system ...)
# Absolute path of the present working direcotry.
@@ -33,9 +11,6 @@
# the top of the source tree, for example when "make -C" is used in m/mm/mmm.
PWD := $(shell pwd)
-TOP := .
-TOPDIR :=
-
# This is the default target. It must be the first declared target.
.PHONY: droid
DEFAULT_GOAL := droid
@@ -69,11 +44,7 @@
.KATI_READONLY := BUILD_NUMBER_FILE
$(KATI_obsolete_var BUILD_NUMBER,See https://android.googlesource.com/platform/build/+/master/Changes.md#BUILD_NUMBER)
-ifeq ($(HOST_OS),darwin)
-DATE_FROM_FILE := date -r $(BUILD_DATETIME_FROM_FILE)
-else
DATE_FROM_FILE := date -d @$(BUILD_DATETIME_FROM_FILE)
-endif
.KATI_READONLY := DATE_FROM_FILE
# Pick a reasonable string to use to identify files.
@@ -102,6 +73,8 @@
-include test/sts/tools/sts-tradefed/build/config.mk
# CTS-Instant-specific config
-include test/suite_harness/tools/cts-instant-tradefed/build/config.mk
+# MTS-specific config.
+-include test/mts/tools/build/config.mk
# Clean rules
.PHONY: clean-dex-files
@@ -194,6 +167,8 @@
$(error stopping)
endif
+# These are the valid values of TARGET_BUILD_VARIANT.
+INTERNAL_VALID_VARIANTS := user userdebug eng
ifneq ($(filter-out $(INTERNAL_VALID_VARIANTS),$(TARGET_BUILD_VARIANT)),)
$(info ***************************************************************)
$(info ***************************************************************)
@@ -438,43 +413,6 @@
subdir_makefiles_inc := .
FULL_BUILD :=
-ifneq ($(ONE_SHOT_MAKEFILE),)
-# We've probably been invoked by the "mm" shell function
-# with a subdirectory's makefile.
-include $(SOONG_ANDROID_MK) $(wildcard $(ONE_SHOT_MAKEFILE))
-# Change CUSTOM_MODULES to include only modules that were
-# defined by this makefile; this will install all of those
-# modules as a side-effect. Do this after including ONE_SHOT_MAKEFILE
-# so that the modules will be installed in the same place they
-# would have been with a normal make.
-CUSTOM_MODULES := $(sort $(call get-tagged-modules,$(ALL_MODULE_TAGS)))
-
-# A helper goal printing out install paths
-define register_module_install_path
-.PHONY: GET-MODULE-INSTALL-PATH-$(1)
-GET-MODULE-INSTALL-PATH-$(1):
- echo 'INSTALL-PATH: $(1) $(ALL_MODULES.$(1).INSTALLED)'
-endef
-
-SORTED_ALL_MODULES := $(sort $(ALL_MODULES))
-UNIQUE_ALL_MODULES :=
-$(foreach m,$(SORTED_ALL_MODULES),\
- $(if $(call streq,$(m),$(lastword $(UNIQUE_ALL_MODULES))),,\
- $(eval UNIQUE_ALL_MODULES += $(m))))
-SORTED_ALL_MODULES :=
-
-$(foreach mod,$(UNIQUE_ALL_MODULES),$(if $(ALL_MODULES.$(mod).INSTALLED),\
- $(eval $(call register_module_install_path,$(mod)))\
- $(foreach path,$(ALL_MODULES.$(mod).PATH),\
- $(eval my_path_prefix := GET-INSTALL-PATH-IN)\
- $(foreach component,$(subst /,$(space),$(path)),\
- $(eval my_path_prefix := $$(my_path_prefix)-$$(component))\
- $(eval .PHONY: $$(my_path_prefix))\
- $(eval $$(my_path_prefix): GET-MODULE-INSTALL-PATH-$(mod))))))
-UNIQUE_ALL_MODULES :=
-
-else # ONE_SHOT_MAKEFILE
-
ifneq ($(dont_bother),true)
FULL_BUILD := true
#
@@ -496,8 +434,6 @@
endif # dont_bother
-endif # ONE_SHOT_MAKEFILE
-
ifndef subdir_makefiles_total
subdir_makefiles_total := $(words init post finish)
endif
@@ -723,7 +659,7 @@
$(eval req_files := )\
$(foreach req_mod,$(req_mods), \
$(eval req_file := $(filter $(TARGET_OUT_ROOT)/%, $(call module-installed-files,$(req_mod)))) \
- $(if $(strip $(req_file))$(ONE_SHOT_MAKEFILE),\
+ $(if $(strip $(req_file)),\
,\
$(error $(m).LOCAL_TARGET_REQUIRED_MODULES : illegal value $(req_mod) : not a device module. If you want to specify host modules to be required to be installed along with your host module, add those module names to LOCAL_REQUIRED_MODULES instead)\
)\
@@ -749,7 +685,7 @@
$(eval req_files := )\
$(foreach req_mod,$(req_mods), \
$(eval req_file := $(filter $(HOST_OUT)/%, $(call module-installed-files,$(req_mod)))) \
- $(if $(strip $(req_file))$(ONE_SHOT_MAKEFILE),\
+ $(if $(strip $(req_file)),\
,\
$(error $(m).LOCAL_HOST_REQUIRED_MODULES : illegal value $(req_mod) : not a host module. If you want to specify target modules to be required to be installed along with your target module, add those module names to LOCAL_REQUIRED_MODULES instead)\
)\
@@ -1018,44 +954,26 @@
$(error exiting from previous errors)
endif
-# The intermediate filename for link type rules
-#
-# APPS are special -- they have up to three different rules:
-# 1. The COMMON rule for Java libraries
-# 2. The jni_link_type rule for embedded native code
-# 3. The 2ND_jni_link_type for the second architecture native code
-define link-type-file
-$(eval _ltf_aux_variant:=$(link-type-aux-variant))\
-$(if $(_ltf_aux_variant),$(call aux-variant-load-env,$(_ltf_aux_variant)))\
-$(call intermediates-dir-for,$(link-type-class),$(link-type-name),$(filter AUX HOST HOST_CROSS,$(link-type-prefix)),$(link-type-common),$(link-type-2ndarchprefix),$(filter HOST_CROSS,$(link-type-prefix)))/$(if $(filter APPS,$(link-type-class)),$(if $(link-type-common),,$(link-type-2ndarchprefix)jni_))link_type\
-$(if $(_ltf_aux_variant),$(call aux-variant-load-env,none))\
-$(eval _ltf_aux_variant:=)
-endef
+# -------------------------------------------------------------------
+# Handle exported/imported includes
-# Write out the file-based link_type rules for the ALLOW_MISSING_DEPENDENCIES
-# case. We always need to write the file for mm to work, but only need to
-# check it if we weren't able to check it when reading the Android.mk files.
-define link-type-file-rule
-my_link_type_deps := $(foreach l,$($(1).DEPS),$(call link-type-file,$(l)))
-my_link_type_file := $(call link-type-file,$(1))
-$($(1).BUILT): | $$(my_link_type_file)
-$$(my_link_type_file): PRIVATE_DEPS := $$(my_link_type_deps)
-ifeq ($($(1).MISSING),true)
-$$(my_link_type_file): $(CHECK_LINK_TYPE)
-endif
-$$(my_link_type_file): $$(my_link_type_deps)
- @echo Check module type: $$@
- $$(hide) mkdir -p $$(dir $$@) && rm -f $$@
-ifeq ($($(1).MISSING),true)
- $$(hide) $(CHECK_LINK_TYPE) --makefile $($(1).MAKEFILE) --module $(link-type-name) \
- --type "$($(1).TYPE)" $(addprefix --allowed ,$($(1).ALLOWED)) \
- $(addprefix --warn ,$($(1).WARN)) $$(PRIVATE_DEPS)
-endif
- $$(hide) echo "$($(1).TYPE)" >$$@
-endef
+# Recursively calculate flags
+$(foreach export,$(EXPORTS_LIST), \
+ $(eval EXPORTS.$$(export) = $$(EXPORTS.$(export).FLAGS) \
+ $(foreach dep,$(EXPORTS.$(export).REEXPORT),$$(EXPORTS.$(dep)))))
-$(foreach lt,$(ALL_LINK_TYPES),\
- $(eval $(call link-type-file-rule,$(lt))))
+# Recursively calculate dependencies
+$(foreach export,$(EXPORTS_LIST), \
+ $(eval EXPORT_DEPS.$$(export) = $$(EXPORTS.$(export).DEPS) \
+ $(foreach dep,$(EXPORTS.$(export).REEXPORT),$$(EXPORT_DEPS.$(dep)))))
+
+# Converts the recursive variables to simple variables so that we don't have to
+# evaluate them for every .o rule
+$(foreach export,$(EXPORTS_LIST),$(eval EXPORTS.$$(export) := $$(strip $$(EXPORTS.$$(export)))))
+$(foreach export,$(EXPORTS_LIST),$(eval EXPORT_DEPS.$$(export) := $$(sort $$(EXPORT_DEPS.$$(export)))))
+
+# Add dependencies
+$(foreach export,$(EXPORTS_LIST),$(eval $(call add-dependency,$$(EXPORTS.$$(export).USERS),$$(EXPORT_DEPS.$$(export)))))
# -------------------------------------------------------------------
# Figure out our module sets.
@@ -1111,7 +1029,7 @@
define resolve-product-relative-paths
$(subst $(_vendor_path_placeholder),$(TARGET_COPY_OUT_VENDOR),\
$(subst $(_product_path_placeholder),$(TARGET_COPY_OUT_PRODUCT),\
- $(subst $(_product_services_path_placeholder),$(TARGET_COPY_OUT_PRODUCT_SERVICES),\
+ $(subst $(_system_ext_path_placeholder),$(TARGET_COPY_OUT_SYSTEM_EXT),\
$(subst $(_odm_path_placeholder),$(TARGET_COPY_OUT_ODM),\
$(foreach p,$(1),$(call append-path,$(PRODUCT_OUT),$(p)$(2)))))))
endef
@@ -1244,6 +1162,7 @@
libnativebridge.so \
libnativehelper.so \
libnativeloader.so \
+ libneuralnetworks.so \
libnpt.so \
libopenjdk.so \
libopenjdkjvm.so \
@@ -1275,6 +1194,14 @@
# when native bridge is active.
APEX_LIBS_ABSENCE_CHECK_EXCLUDE += lib/arm lib64/arm64
+ifdef TARGET_NATIVE_BRIDGE_RELATIVE_PATH
+ APEX_LIBS_ABSENCE_CHECK_EXCLUDE += lib/$(TARGET_NATIVE_BRIDGE_RELATIVE_PATH) lib64/$(TARGET_NATIVE_BRIDGE_RELATIVE_PATH)
+endif
+
+ifdef TARGET_NATIVE_BRIDGE_2ND_RELATIVE_PATH
+ APEX_LIBS_ABSENCE_CHECK_EXCLUDE += lib/$(TARGET_NATIVE_BRIDGE_2ND_RELATIVE_PATH) lib64/$(TARGET_NATIVE_BRIDGE_2ND_RELATIVE_PATH)
+endif
+
# Exclude vndk-* subdirectories which contain prebuilts from older releases.
APEX_LIBS_ABSENCE_CHECK_EXCLUDE += lib/vndk-% lib64/vndk-%
@@ -1309,7 +1236,7 @@
$(filter-out $(foreach dir,$(APEX_LIBS_ABSENCE_CHECK_EXCLUDE), \
$(TARGET_OUT)/$(if $(findstring %,$(dir)),$(dir),$(dir)/%)), \
$(filter $(TARGET_OUT)/lib/% $(TARGET_OUT)/lib64/%,$(1)))), \
- APEX libraries found in system image (see comment for check-apex-libs-absence in \
+ APEX libraries found in product_target_FILES (see comment for check-apex-libs-absence in \
build/make/core/main.mk for details))
endef
@@ -1317,6 +1244,11 @@
# dependencies visible to make, but as long as they have install rules in
# /system they may still be created there through other make targets. To catch
# that we also do a check on disk just before the system image is built.
+ # NB: This check may fail if you have built intermediate targets in the out
+ # tree earlier, e.g. "m <some lib in APEX_MODULE_LIBS>". In that case, please
+ # try "m installclean && m systemimage" to get a correct system image. For
+ # local work you can also disable the check with the
+ # DISABLE_APEX_LIBS_ABSENCE_CHECK environment variable.
define check-apex-libs-absence-on-disk
$(hide) ( \
cd $(TARGET_OUT) && \
@@ -1325,8 +1257,9 @@
-type f \( -false $(foreach lib,$(APEX_MODULE_LIBS),-o -name $(lib)) \) \
-print) && \
if [ -n "$$findres" ]; then \
- echo "APEX libraries found in system image (see comment for check-apex-libs-absence" 1>&2; \
- echo "in build/make/core/main.mk for details):" 1>&2; \
+ echo "APEX libraries found in system image in TARGET_OUT (see comments for" 1>&2; \
+ echo "check-apex-libs-absence and check-apex-libs-absence-on-disk in" 1>&2; \
+ echo "build/make/core/main.mk for details):" 1>&2; \
echo "$$findres" | sort 1>&2; \
false; \
fi; \
@@ -1573,12 +1506,6 @@
.PHONY: ramdisk_debug
ramdisk_debug: $(INSTALLED_DEBUG_RAMDISK_TARGET)
-.PHONY: systemtarball
-systemtarball: $(INSTALLED_SYSTEMTARBALL_TARGET)
-
-.PHONY: boottarball
-boottarball: $(INSTALLED_BOOTTARBALL_TARGET)
-
.PHONY: userdataimage
userdataimage: $(INSTALLED_USERDATAIMAGE_TARGET)
@@ -1586,9 +1513,6 @@
$(call dist-for-goals, userdataimage, $(BUILT_USERDATAIMAGE_TARGET))
endif
-.PHONY: userdatatarball
-userdatatarball: $(INSTALLED_USERDATATARBALL_TARGET)
-
.PHONY: cacheimage
cacheimage: $(INSTALLED_CACHEIMAGE_TARGET)
@@ -1601,8 +1525,8 @@
.PHONY: productimage
productimage: $(INSTALLED_PRODUCTIMAGE_TARGET)
-.PHONY: productservicesimage
-productservicesimage: $(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET)
+.PHONY: systemextimage
+systemextimage: $(INSTALLED_SYSTEM_EXTIMAGE_TARGET)
.PHONY: odmimage
odmimage: $(INSTALLED_ODMIMAGE_TARGET)
@@ -1652,8 +1576,8 @@
$(INSTALLED_FILES_JSON_ODM) \
$(INSTALLED_FILES_FILE_PRODUCT) \
$(INSTALLED_FILES_JSON_PRODUCT) \
- $(INSTALLED_FILES_FILE_PRODUCT_SERVICES) \
- $(INSTALLED_FILES_JSON_PRODUCT_SERVICES) \
+ $(INSTALLED_FILES_FILE_SYSTEM_EXT) \
+ $(INSTALLED_FILES_JSON_SYSTEM_EXT) \
$(INSTALLED_FILES_FILE_SYSTEMOTHER) \
$(INSTALLED_FILES_JSON_SYSTEMOTHER) \
$(INSTALLED_FILES_FILE_RAMDISK) \
@@ -1735,6 +1659,7 @@
$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET) \
$(BUILT_OTATOOLS_PACKAGE) \
$(SYMBOLS_ZIP) \
+ $(PROGUARD_DICT_ZIP) \
$(COVERAGE_ZIP) \
$(APPCOMPAT_ZIP) \
$(INSTALLED_FILES_FILE) \
@@ -1745,8 +1670,8 @@
$(INSTALLED_FILES_JSON_ODM) \
$(INSTALLED_FILES_FILE_PRODUCT) \
$(INSTALLED_FILES_JSON_PRODUCT) \
- $(INSTALLED_FILES_FILE_PRODUCT_SERVICES) \
- $(INSTALLED_FILES_JSON_PRODUCT_SERVICES) \
+ $(INSTALLED_FILES_FILE_SYSTEM_EXT) \
+ $(INSTALLED_FILES_JSON_SYSTEM_EXT) \
$(INSTALLED_FILES_FILE_SYSTEMOTHER) \
$(INSTALLED_FILES_JSON_SYSTEMOTHER) \
$(INSTALLED_FILES_FILE_RECOVERY) \
@@ -1766,7 +1691,6 @@
$(call dist-for-goals, droidcore, \
$(APPS_ZIP) \
$(INTERNAL_EMULATOR_PACKAGE_TARGET) \
- $(PACKAGE_STATS_FILE) \
)
endif
endif
@@ -1850,12 +1774,13 @@
LSDUMP_PATHS_FILE := $(PRODUCT_OUT)/lsdump_paths.txt
.PHONY: findlsdumps
-findlsdumps: $(LSDUMP_PATHS_FILE) $(LSDUMP_PATHS)
+# LSDUMP_PATHS is a list of tag:path.
+findlsdumps: $(LSDUMP_PATHS_FILE) $(foreach p,$(LSDUMP_PATHS),$(call word-colon,2,$(p)))
$(LSDUMP_PATHS_FILE): PRIVATE_LSDUMP_PATHS := $(LSDUMP_PATHS)
$(LSDUMP_PATHS_FILE):
@echo "Generate $@"
- @rm -rf $@ && echo "$(PRIVATE_LSDUMP_PATHS)" | sed -e 's/ /\n/g' > $@
+ @rm -rf $@ && echo -e "$(subst :,:$(space),$(subst $(space),\n,$(PRIVATE_LSDUMP_PATHS)))" > $@
.PHONY: check-elf-files
check-elf-files:
@@ -1887,5 +1812,3 @@
$(call dist-write-file,$(KATI_PACKAGE_MK_DIR)/dist.mk)
$(info [$(call inc_and_print,subdir_makefiles_inc)/$(subdir_makefiles_total)] writing build rules ...)
-
-endif # KATI
diff --git a/core/misc_prebuilt_internal.mk b/core/misc_prebuilt_internal.mk
index cdd5cd5..a52b9e5 100644
--- a/core/misc_prebuilt_internal.mk
+++ b/core/misc_prebuilt_internal.mk
@@ -25,5 +25,14 @@
include $(BUILD_SYSTEM)/base_rules.mk
+ifneq ($(filter init%rc,$(notdir $(LOCAL_INSTALLED_MODULE)))$(filter %/etc/init,$(dir $(LOCAL_INSTALLED_MODULE))),)
+ $(eval $(call copy-init-script-file-checked,$(my_prebuilt_src_file),$(LOCAL_BUILT_MODULE)))
+else ifneq ($(LOCAL_PREBUILT_STRIP_COMMENTS),)
+$(LOCAL_BUILT_MODULE) : $(my_prebuilt_src_file)
+ $(transform-prebuilt-to-target-strip-comments)
+else
$(LOCAL_BUILT_MODULE) : $(my_prebuilt_src_file)
$(transform-prebuilt-to-target)
+endif
+
+built_module := $(LOCAL_BUILT_MODULE)
diff --git a/core/ninja_config.mk b/core/ninja_config.mk
index e9e89c3..b1f4b03 100644
--- a/core/ninja_config.mk
+++ b/core/ninja_config.mk
@@ -7,7 +7,7 @@
KATI_OUTPUT_PATTERNS := $(OUT_DIR)/build%.ninja $(OUT_DIR)/ninja%.sh
# Modifier goals we don't need to pass to Ninja.
-NINJA_EXCLUDE_GOALS := all APP-% PRODUCT-%
+NINJA_EXCLUDE_GOALS := all
# A list of goals which affect parsing of makefiles and we need to pass to Kati.
PARSE_TIME_MAKE_GOALS := \
@@ -16,7 +16,6 @@
all \
ECLIPSE-% \
AUX-% \
- boottarball-nodeps \
brillo_tests \
btnod \
build-art% \
@@ -44,7 +43,6 @@
snod \
stnod \
systemimage-nodeps \
- systemtarball-nodeps \
target-files-package \
test-art% \
user \
@@ -57,7 +55,7 @@
include $(wildcard vendor/*/build/ninja_config.mk)
# Any Android goals that need to be built.
-ANDROID_GOALS := $(filter-out $(KATI_OUTPUT_PATTERNS) $(CKATI) $(MAKEPARALLEL),\
+ANDROID_GOALS := $(filter-out $(KATI_OUTPUT_PATTERNS),\
$(sort $(ORIGINAL_MAKECMDGOALS) $(MAKECMDGOALS)))
# Goals we need to pass to Ninja.
NINJA_GOALS := $(filter-out $(NINJA_EXCLUDE_GOALS), $(ANDROID_GOALS))
diff --git a/core/notice_files.mk b/core/notice_files.mk
index 6df570e..680a0b1 100644
--- a/core/notice_files.mk
+++ b/core/notice_files.mk
@@ -55,7 +55,7 @@
module_installed_filename := $(patsubst $(PRODUCT_OUT)/%,%,$(LOCAL_INSTALLED_MODULE))
else
# This module isn't installable
- ifneq ($(filter STATIC_LIBRARIES HEADER_LIBRARIES,$(LOCAL_MODULE_CLASS)),)
+ ifneq ($(filter STATIC_LIBRARIES RLIB_LIBRARIES PROC_MACRO_LIBRARIES HEADER_LIBRARIES,$(LOCAL_MODULE_CLASS)),)
# Stick the static libraries with the dynamic libraries.
# We can't use xxx_OUT_STATIC_LIBRARIES because it points into
# device-obj or host-obj.
diff --git a/core/pack_dyn_relocs_setup.mk b/core/pack_dyn_relocs_setup.mk
index c5564b1..f86e11e 100644
--- a/core/pack_dyn_relocs_setup.mk
+++ b/core/pack_dyn_relocs_setup.mk
@@ -32,3 +32,12 @@
# Do not pack relocations on host modules
my_pack_module_relocations := false
endif
+
+# Lld relocation packing cannot be enabled for binaries before Android Pie.
+ifneq ($(LOCAL_SDK_VERSION),)
+ ifneq ($(LOCAL_SDK_VERSION),current)
+ ifeq ($(call math_lt,$(LOCAL_SDK_VERSION),28),true)
+ my_pack_module_relocations := false
+ endif
+ endif
+endif
diff --git a/core/package_internal.mk b/core/package_internal.mk
index c414295..557a2c6 100644
--- a/core/package_internal.mk
+++ b/core/package_internal.mk
@@ -109,7 +109,7 @@
$(LOCAL_ODM_MODULE) \
$(LOCAL_OEM_MODULE) \
$(LOCAL_PRODUCT_MODULE) \
- $(LOCAL_PRODUCT_SERVICES_MODULE) \
+ $(LOCAL_SYSTEM_EXT_MODULE) \
$(LOCAL_PROPRIETARY_MODULE) \
$(LOCAL_VENDOR_MODULE))
enforce_rro_enabled := $(if $(non_system_module),,true)
@@ -253,6 +253,7 @@
endif # need_compile_res
endif # !custom
LOCAL_PROGUARD_FLAGS := $(addprefix -include ,$(proguard_options_file)) $(LOCAL_PROGUARD_FLAGS)
+LOCAL_PROGUARD_FLAGS_DEPS += $(proguard_options_file)
ifeq (true,$(EMMA_INSTRUMENT))
ifndef LOCAL_EMMA_INSTRUMENT
@@ -272,6 +273,7 @@
LOCAL_STATIC_JAVA_LIBRARIES += jacocoagent
# Exclude jacoco classes from proguard
LOCAL_PROGUARD_FLAGS += -include $(BUILD_SYSTEM)/proguard.jacoco.flags
+LOCAL_PROGUARD_FLAGS_DEPS += $(BUILD_SYSTEM)/proguard.jacoco.flags
endif # Contains java code
else
ifdef LOCAL_SDK_VERSION
@@ -282,6 +284,7 @@
LOCAL_STATIC_JAVA_LIBRARIES += jacocoagent
# Exclude jacoco classes from proguard
LOCAL_PROGUARD_FLAGS += -include $(BUILD_SYSTEM)/proguard.jacoco.flags
+LOCAL_PROGUARD_FLAGS_DEPS += $(BUILD_SYSTEM)/proguard.jacoco.flags
endif # Contains java code
endif # TARGET_BUILD_APPS
endif # LOCAL_SDK_VERSION
@@ -452,7 +455,7 @@
certificate := $(LOCAL_CERTIFICATE).x509.pem
additional_certificates := $(foreach c,$(LOCAL_ADDITIONAL_CERTIFICATES), $(c).x509.pem $(c).pk8)
-$(LOCAL_BUILT_MODULE): $(private_key) $(certificate) $(SIGNAPK_JAR)
+$(LOCAL_BUILT_MODULE): $(private_key) $(certificate) $(SIGNAPK_JAR) $(SIGNAPK_JNI_LIBRARY_PATH)
$(LOCAL_BUILT_MODULE): PRIVATE_PRIVATE_KEY := $(private_key)
$(LOCAL_BUILT_MODULE): PRIVATE_CERTIFICATE := $(certificate)
@@ -508,7 +511,7 @@
endif
endif
-# Run veridex on product, product_services and vendor modules.
+# Run veridex on product, system_ext and vendor modules.
# We skip it for unbundled app builds where we cannot build veridex.
module_run_appcompat :=
ifeq (true,$(non_system_module))
diff --git a/core/pdk_config.mk b/core/pdk_config.mk
index ce78ece..4a069d3 100644
--- a/core/pdk_config.mk
+++ b/core/pdk_config.mk
@@ -20,6 +20,7 @@
target/common/obj/JAVA_LIBRARIES/conscrypt_intermediates \
target/common/obj/JAVA_LIBRARIES/core-oj_intermediates \
target/common/obj/JAVA_LIBRARIES/core-libart_intermediates \
+ target/common/obj/JAVA_LIBRARIES/core-icu4j_intermediates \
target/common/obj/JAVA_LIBRARIES/ext_intermediates \
target/common/obj/JAVA_LIBRARIES/framework_intermediates \
target/common/obj/JAVA_LIBRARIES/hwbinder_intermediates \
diff --git a/core/product-graph.mk b/core/product-graph.mk
index 9db277c..b97a69d 100644
--- a/core/product-graph.mk
+++ b/core/product-graph.mk
@@ -117,7 +117,7 @@
$(hide) echo 'PRODUCT_DEFAULT_PROPERTY_OVERRIDES=$$(PRODUCTS.$(strip $(1)).PRODUCT_DEFAULT_PROPERTY_OVERRIDES)' >> $$@
$(hide) echo 'PRODUCT_SYSTEM_DEFAULT_PROPERTIES=$$(PRODUCTS.$(strip $(1)).PRODUCT_SYSTEM_DEFAULT_PROPERTIES)' >> $$@
$(hide) echo 'PRODUCT_PRODUCT_PROPERTIES=$$(PRODUCTS.$(strip $(1)).PRODUCT_PRODUCT_PROPERTIES)' >> $$@
- $(hide) echo 'PRODUCT_PRODUCT_SERVICES_PROPERTIES=$$(PRODUCTS.$(strip $(1)).PRODUCT_PRODUCT_SERVICES_PROPERTIES)' >> $$@
+ $(hide) echo 'PRODUCT_SYSTEM_EXT_PROPERTIES=$$(PRODUCTS.$(strip $(1)).PRODUCT_SYSTEM_EXT_PROPERTIES)' >> $$@
$(hide) echo 'PRODUCT_ODM_PROPERTIES=$$(PRODUCTS.$(strip $(1)).PRODUCT_ODM_PROPERTIES)' >> $$@
$(hide) echo 'PRODUCT_CHARACTERISTICS=$$(PRODUCTS.$(strip $(1)).PRODUCT_CHARACTERISTICS)' >> $$@
$(hide) echo 'PRODUCT_COPY_FILES=$$(PRODUCTS.$(strip $(1)).PRODUCT_COPY_FILES)' >> $$@
@@ -131,6 +131,7 @@
$(hide) echo 'PRODUCT_SDK_ADDON_DOC_MODULES=$$(PRODUCTS.$(strip $(1)).PRODUCT_SDK_ADDON_DOC_MODULES)' >> $$@
$(hide) echo 'PRODUCT_DEFAULT_WIFI_CHANNELS=$$(PRODUCTS.$(strip $(1)).PRODUCT_DEFAULT_WIFI_CHANNELS)' >> $$@
$(hide) echo 'PRODUCT_DEFAULT_DEV_CERTIFICATE=$$(PRODUCTS.$(strip $(1)).PRODUCT_DEFAULT_DEV_CERTIFICATE)' >> $$@
+ $(hide) echo 'PRODUCT_MAINLINE_SEPOLICY_DEV_CERTIFICATES=$$(PRODUCTS.$(strip $(1)).PRODUCT_MAINLINE_SEPOLICY_DEV_CERTIFICATES)' >> $$@
$(hide) echo 'PRODUCT_RESTRICT_VENDOR_FILES=$$(PRODUCTS.$(strip $(1)).PRODUCT_RESTRICT_VENDOR_FILES)' >> $$@
$(hide) echo 'PRODUCT_VENDOR_KERNEL_HEADERS=$$(PRODUCTS.$(strip $(1)).PRODUCT_VENDOR_KERNEL_HEADERS)' >> $$@
diff --git a/core/product.mk b/core/product.mk
index 77b129d..2c89fab 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -161,8 +161,8 @@
# A list of property assignments, like "key = value", with zero or more
# whitespace characters on either side of the '='.
-# used for adding properties to build.prop of product partition
-_product_list_vars += PRODUCT_PRODUCT_SERVICES_PROPERTIES
+# used for adding properties to build.prop of system_ext and odm partitions
+_product_list_vars += PRODUCT_SYSTEM_EXT_PROPERTIES
_product_list_vars += PRODUCT_ODM_PROPERTIES
# The characteristics of the product, which among other things is passed to aapt
@@ -205,6 +205,7 @@
_product_list_vars += PRODUCT_DEFAULT_WIFI_CHANNELS
_product_list_vars += PRODUCT_DEFAULT_DEV_CERTIFICATE
+_product_list_vars += PRODUCT_MAINLINE_SEPOLICY_DEV_CERTIFICATES
_product_list_vars += PRODUCT_RESTRICT_VENDOR_FILES
# The list of product-specific kernel header dirs
@@ -238,7 +239,7 @@
_product_single_value_vars += PRODUCT_SYSTEM_VERITY_PARTITION
_product_single_value_vars += PRODUCT_VENDOR_VERITY_PARTITION
_product_single_value_vars += PRODUCT_PRODUCT_VERITY_PARTITION
-_product_single_value_vars += PRODUCT_PRODUCT_SERVICES_VERITY_PARTITION
+_product_single_value_vars += PRODUCT_SYSTEM_EXT_VERITY_PARTITION
_product_single_value_vars += PRODUCT_ODM_VERITY_PARTITION
_product_single_value_vars += PRODUCT_SYSTEM_SERVER_DEBUG_INFO
_product_single_value_vars += PRODUCT_OTHER_JAVA_DEBUG_INFO
@@ -265,7 +266,7 @@
_product_single_value_vars += PRODUCT_SYSTEM_BASE_FS_PATH
_product_single_value_vars += PRODUCT_VENDOR_BASE_FS_PATH
_product_single_value_vars += PRODUCT_PRODUCT_BASE_FS_PATH
-_product_single_value_vars += PRODUCT_PRODUCT_SERVICES_BASE_FS_PATH
+_product_single_value_vars += PRODUCT_SYSTEM_EXT_BASE_FS_PATH
_product_single_value_vars += PRODUCT_ODM_BASE_FS_PATH
# The first API level this product shipped with
@@ -280,9 +281,6 @@
# Make this art variable visible to soong_config.mk.
_product_single_value_vars += PRODUCT_ART_USE_READ_BARRIER
-# Whether the product is an Android Things variant.
-_product_single_value_vars += PRODUCT_IOT
-
# Add reserved headroom to a system image.
_product_single_value_vars += PRODUCT_SYSTEM_HEADROOM
@@ -357,13 +355,14 @@
_product_single_value_vars += PRODUCT_BUILD_SYSTEM_OTHER_IMAGE
_product_single_value_vars += PRODUCT_BUILD_VENDOR_IMAGE
_product_single_value_vars += PRODUCT_BUILD_PRODUCT_IMAGE
-_product_single_value_vars += PRODUCT_BUILD_PRODUCT_SERVICES_IMAGE
+_product_single_value_vars += PRODUCT_BUILD_SYSTEM_EXT_IMAGE
_product_single_value_vars += PRODUCT_BUILD_ODM_IMAGE
_product_single_value_vars += PRODUCT_BUILD_CACHE_IMAGE
_product_single_value_vars += PRODUCT_BUILD_RAMDISK_IMAGE
_product_single_value_vars += PRODUCT_BUILD_USERDATA_IMAGE
_product_single_value_vars += PRODUCT_BUILD_RECOVERY_IMAGE
_product_single_value_vars += PRODUCT_BUILD_BOOT_IMAGE
+_product_single_value_vars += PRODUCT_BUILD_VBMETA_IMAGE
_product_list_vars += PRODUCT_UPDATABLE_BOOT_MODULES
_product_list_vars += PRODUCT_UPDATABLE_BOOT_LOCATIONS
@@ -371,6 +370,12 @@
# Whether the product would like to check prebuilt ELF files.
_product_single_value_vars += PRODUCT_CHECK_ELF_FILES
+# If set, device uses virtual A/B.
+_product_single_value_vars += PRODUCT_VIRTUAL_AB_OTA
+
+# If set, device retrofits virtual A/B.
+_product_single_value_vars += PRODUCT_VIRTUAL_AB_OTA_RETROFIT
+
.KATI_READONLY := _product_single_value_vars _product_list_vars
_product_var_list :=$= $(_product_single_value_vars) $(_product_list_vars)
diff --git a/core/product_config.mk b/core/product_config.mk
index cb58cf4..1293c94 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -78,86 +78,18 @@
endef
# ---------------------------------------------------------------
-
-# These are the valid values of TARGET_BUILD_VARIANT. Also, if anything else is passed
-# as the variant in the PRODUCT-$TARGET_BUILD_PRODUCT-$TARGET_BUILD_VARIANT form,
-# it will be treated as a goal, and the eng variant will be used.
-INTERNAL_VALID_VARIANTS := user userdebug eng
-
-# ---------------------------------------------------------------
-# Provide "PRODUCT-<prodname>-<goal>" targets, which lets you build
-# a particular configuration without needing to set up the environment.
-#
+# Check for obsolete PRODUCT- and APP- goals
ifeq ($(CALLED_FROM_SETUP),true)
product_goals := $(strip $(filter PRODUCT-%,$(MAKECMDGOALS)))
ifdef product_goals
- # Scrape the product and build names out of the goal,
- # which should be of the form PRODUCT-<productname>-<buildname>.
- #
- ifneq ($(words $(product_goals)),1)
- $(error Only one PRODUCT-* goal may be specified; saw "$(product_goals)")
- endif
- goal_name := $(product_goals)
- product_goals := $(patsubst PRODUCT-%,%,$(product_goals))
- product_goals := $(subst -, ,$(product_goals))
- ifneq ($(words $(product_goals)),2)
- $(error Bad PRODUCT-* goal "$(goal_name)")
- endif
-
- # The product they want
- TARGET_PRODUCT := $(word 1,$(product_goals))
-
- # The variant they want
- TARGET_BUILD_VARIANT := $(word 2,$(product_goals))
-
- ifeq ($(TARGET_BUILD_VARIANT),tests)
- $(error "tests" has been deprecated as a build variant. Use it as a build goal instead.)
- endif
-
- # The build server wants to do make PRODUCT-dream-sdk
- # which really means TARGET_PRODUCT=dream make sdk.
- ifneq ($(filter-out $(INTERNAL_VALID_VARIANTS),$(TARGET_BUILD_VARIANT)),)
- override MAKECMDGOALS := $(MAKECMDGOALS) $(TARGET_BUILD_VARIANT)
- TARGET_BUILD_VARIANT := userdebug
- default_goal_substitution :=
- else
- default_goal_substitution := droid
- endif
-
- # Replace the PRODUCT-* goal with the build goal that it refers to.
- # Note that this will ensure that it appears in the same relative
- # position, in case it matters.
- override MAKECMDGOALS := $(patsubst $(goal_name),$(default_goal_substitution),$(MAKECMDGOALS))
+ $(error The PRODUCT-* goal is no longer supported. Use `TARGET_PRODUCT=<product> m droid` instead)
endif
-endif # CALLED_FROM_SETUP
-# else: Use the value set in the environment or buildspec.mk.
-
-# ---------------------------------------------------------------
-# Provide "APP-<appname>" targets, which lets you build
-# an unbundled app.
-#
-ifeq ($(CALLED_FROM_SETUP),true)
unbundled_goals := $(strip $(filter APP-%,$(MAKECMDGOALS)))
ifdef unbundled_goals
- ifneq ($(words $(unbundled_goals)),1)
- $(error Only one APP-* goal may be specified; saw "$(unbundled_goals)")
- endif
- TARGET_BUILD_APPS := $(strip $(subst -, ,$(patsubst APP-%,%,$(unbundled_goals))))
- ifneq ($(filter droid,$(MAKECMDGOALS)),)
- override MAKECMDGOALS := $(patsubst $(unbundled_goals),,$(MAKECMDGOALS))
- else
- override MAKECMDGOALS := $(patsubst $(unbundled_goals),droid,$(MAKECMDGOALS))
- endif
+ $(error The APP-* goal is no longer supported. Use `TARGET_BUILD_APPS="<app>" m droid` instead)
endif # unbundled_goals
endif
-# Now that we've parsed APP-* and PRODUCT-*, mark these as readonly
-TARGET_BUILD_APPS ?=
-.KATI_READONLY := \
- TARGET_PRODUCT \
- TARGET_BUILD_VARIANT \
- TARGET_BUILD_APPS
-
# Default to building dalvikvm on hosts that support it...
ifeq ($(HOST_OS),linux)
# ... or if the if the option is already set
@@ -418,7 +350,7 @@
SYSTEM_OTHER \
VENDOR \
PRODUCT \
- PRODUCT_SERVICES \
+ SYSTEM_EXT \
ODM \
CACHE \
RAMDISK \
diff --git a/core/proguard_basic_keeps.flags b/core/proguard_basic_keeps.flags
index a0f577d..28ec2d0 100644
--- a/core/proguard_basic_keeps.flags
+++ b/core/proguard_basic_keeps.flags
@@ -72,3 +72,7 @@
# Less spammy.
-dontnote
+
+# The lite proto runtime uses reflection to access fields based on the names in
+# the schema, keep all the fields.
+-keepclassmembers class * extends com.google.protobuf.MessageLite { <fields>; }
diff --git a/core/rbe.mk b/core/rbe.mk
new file mode 100644
index 0000000..231859b
--- /dev/null
+++ b/core/rbe.mk
@@ -0,0 +1,32 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Notice: this works only with Google's RBE service.
+ifneq ($(filter-out false,$(USE_RBE)),)
+ ifdef RBE_DIR
+ rbe_dir := $(RBE_DIR)
+ else
+ rbe_dir := $(HOME)/rbe
+ endif
+ RBE_WRAPPER := $(rbe_dir)/rewrapper --labels=type=compile,lang=cpp,compiler=clang --env_var_whitelist=PWD
+
+ # Append rewrapper to existing *_WRAPPER variables so it's possible to
+ # use both ccache and rewrapper.
+ CC_WRAPPER := $(strip $(CC_WRAPPER) $(RBE_WRAPPER))
+ CXX_WRAPPER := $(strip $(CXX_WRAPPER) $(RBE_WRAPPER))
+
+ rbe_dir :=
+endif
diff --git a/core/sdk_check.mk b/core/sdk_check.mk
index 49ea2a8..c09fc7c 100644
--- a/core/sdk_check.mk
+++ b/core/sdk_check.mk
@@ -8,6 +8,11 @@
whitelisted_modules := framework-res__auto_generated_rro
+
+ifeq (,$(JAVA_SDK_ENFORCEMENT_ERROR))
+ JAVA_SDK_ENFORCEMENT_ERROR := APPS
+endif
+
ifeq ($(LOCAL_SDK_VERSION)$(LOCAL_PRIVATE_PLATFORM_APIS),)
ifeq (,$(filter $(LOCAL_MODULE),$(whitelisted_modules)))
ifneq ($(JAVA_SDK_ENFORCEMENT_WARNING)$(JAVA_SDK_ENFORCEMENT_ERROR),)
diff --git a/core/soong_app_prebuilt.mk b/core/soong_app_prebuilt.mk
index d873cc4..8fc2e4c 100644
--- a/core/soong_app_prebuilt.mk
+++ b/core/soong_app_prebuilt.mk
@@ -43,7 +43,7 @@
endif # TURBINE_ENABLED != false
endif
-# Run veridex on product, product_services and vendor modules.
+# Run veridex on product, system_ext and vendor modules.
# We skip it for unbundled app builds where we cannot build veridex.
module_run_appcompat :=
ifeq (true,$(non_system_module))
diff --git a/core/soong_cc_prebuilt.mk b/core/soong_cc_prebuilt.mk
index 301f985..34dd3e8 100644
--- a/core/soong_cc_prebuilt.mk
+++ b/core/soong_cc_prebuilt.mk
@@ -65,16 +65,9 @@
ifneq ($(filter STATIC_LIBRARIES SHARED_LIBRARIES HEADER_LIBRARIES,$(LOCAL_MODULE_CLASS)),)
# Soong module is a static or shared library
- export_includes := $(intermediates)/export_includes
- $(export_includes): PRIVATE_EXPORT_CFLAGS := $(LOCAL_EXPORT_CFLAGS)
- $(export_includes): $(LOCAL_EXPORT_C_INCLUDE_DEPS)
- @echo Export includes file: $< -- $@
- $(hide) mkdir -p $(dir $@) && rm -f $@
- ifdef LOCAL_EXPORT_CFLAGS
- $(hide) echo "$(PRIVATE_EXPORT_CFLAGS)" >$@
- else
- $(hide) touch $@
- endif
+ EXPORTS_LIST := $(EXPORTS_LIST) $(intermediates)
+ EXPORTS.$(intermediates).FLAGS := $(LOCAL_EXPORT_CFLAGS)
+ EXPORTS.$(intermediates).DEPS := $(LOCAL_EXPORT_C_INCLUDE_DEPS)
ifdef LOCAL_SOONG_TOC
$(eval $(call copy-one-file,$(LOCAL_SOONG_TOC),$(LOCAL_BUILT_MODULE).toc))
diff --git a/core/soong_config.mk b/core/soong_config.mk
index ac3cfb3..bcd025b 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -25,7 +25,7 @@
$(call add_json_str, Make_suffix, -$(TARGET_PRODUCT))
$(call add_json_str, BuildId, $(BUILD_ID))
-$(call add_json_str, BuildNumberFromFile, $$$(BUILD_NUMBER_FROM_FILE))
+$(call add_json_str, BuildNumberFromFile, $(BUILD_NUMBER_FROM_FILE))
$(call add_json_str, Platform_version_name, $(PLATFORM_VERSION))
$(call add_json_val, Platform_sdk_version, $(PLATFORM_SDK_VERSION))
@@ -62,11 +62,13 @@
$(call add_json_str, NativeBridgeArchVariant, $(TARGET_NATIVE_BRIDGE_ARCH_VARIANT))
$(call add_json_str, NativeBridgeCpuVariant, $(TARGET_NATIVE_BRIDGE_CPU_VARIANT))
$(call add_json_list, NativeBridgeAbi, $(TARGET_NATIVE_BRIDGE_ABI))
+$(call add_json_str, NativeBridgeRelativePath, $(TARGET_NATIVE_BRIDGE_RELATIVE_PATH))
$(call add_json_str, NativeBridgeSecondaryArch, $(TARGET_NATIVE_BRIDGE_2ND_ARCH))
$(call add_json_str, NativeBridgeSecondaryArchVariant, $(TARGET_NATIVE_BRIDGE_2ND_ARCH_VARIANT))
$(call add_json_str, NativeBridgeSecondaryCpuVariant, $(TARGET_NATIVE_BRIDGE_2ND_CPU_VARIANT))
$(call add_json_list, NativeBridgeSecondaryAbi, $(TARGET_NATIVE_BRIDGE_2ND_ABI))
+$(call add_json_str, NativeBridgeSecondaryRelativePath, $(TARGET_NATIVE_BRIDGE_2ND_RELATIVE_PATH))
$(call add_json_str, HostArch, $(HOST_ARCH))
$(call add_json_str, HostSecondaryArch, $(HOST_2ND_ARCH))
@@ -133,8 +135,7 @@
$(call add_json_list, BootJars, $(PRODUCT_BOOT_JARS))
$(call add_json_bool, VndkUseCoreVariant, $(TARGET_VNDK_USE_CORE_VARIANT))
-
-$(call add_json_bool, Product_is_iot, $(filter true,$(PRODUCT_IOT)))
+$(call add_json_bool, VndkSnapshotBuildArtifacts, $(VNDK_SNAPSHOT_BUILD_ARTIFACTS))
$(call add_json_bool, Treble_linker_namespaces, $(filter true,$(PRODUCT_TREBLE_LINKER_NAMESPACES)))
$(call add_json_bool, Enforce_vintf_manifest, $(filter true,$(PRODUCT_ENFORCE_VINTF_MANIFEST)))
@@ -146,10 +147,11 @@
$(call add_json_str, VendorPath, $(TARGET_COPY_OUT_VENDOR))
$(call add_json_str, OdmPath, $(TARGET_COPY_OUT_ODM))
$(call add_json_str, ProductPath, $(TARGET_COPY_OUT_PRODUCT))
-$(call add_json_str, ProductServicesPath, $(TARGET_COPY_OUT_PRODUCT_SERVICES))
+$(call add_json_str, SystemExtPath, $(TARGET_COPY_OUT_SYSTEM_EXT))
$(call add_json_bool, MinimizeJavaDebugInfo, $(filter true,$(PRODUCT_MINIMIZE_JAVA_DEBUG_INFO)))
$(call add_json_bool, UseGoma, $(filter-out false,$(USE_GOMA)))
+$(call add_json_bool, UseRBE, $(filter-out false,$(USE_RBE)))
$(call add_json_bool, Arc, $(filter true,$(TARGET_ARC)))
$(call add_json_list, NamespacesToExport, $(PRODUCT_SOONG_NAMESPACES))
@@ -162,7 +164,7 @@
$(call add_json_list, BoardPlatPrivateSepolicyDirs, $(BOARD_PLAT_PRIVATE_SEPOLICY_DIR))
$(call add_json_list, BoardSepolicyM4Defs, $(BOARD_SEPOLICY_M4DEFS))
-$(call add_json_bool, FlattenApex, $(filter true,$(TARGET_FLATTEN_APEX)))
+$(call add_json_bool, Flatten_apex, $(filter true,$(TARGET_FLATTEN_APEX)))
$(call add_json_str, DexpreoptGlobalConfig, $(DEX_PREOPT_CONFIG))
diff --git a/core/soong_rust_prebuilt.mk b/core/soong_rust_prebuilt.mk
new file mode 100644
index 0000000..ea43078
--- /dev/null
+++ b/core/soong_rust_prebuilt.mk
@@ -0,0 +1,127 @@
+# Native prebuilt coming from Soong.
+# Extra inputs:
+# LOCAL_SOONG_UNSTRIPPED_BINARY
+
+ifneq ($(LOCAL_MODULE_MAKEFILE),$(SOONG_ANDROID_MK))
+ $(call pretty-error,soong_rust_prebuilt.mk may only be used from Soong)
+endif
+
+ifdef LOCAL_IS_HOST_MODULE
+ ifneq ($(HOST_OS),$(LOCAL_MODULE_HOST_OS))
+ my_prefix := HOST_CROSS_
+ LOCAL_HOST_PREFIX := $(my_prefix)
+ else
+ my_prefix := HOST_
+ LOCAL_HOST_PREFIX :=
+ endif
+else
+ my_prefix := TARGET_
+endif
+
+ifeq ($($(my_prefix)ARCH),$(LOCAL_MODULE_$(my_prefix)ARCH))
+ # primary arch
+ LOCAL_2ND_ARCH_VAR_PREFIX :=
+else ifeq ($($(my_prefix)2ND_ARCH),$(LOCAL_MODULE_$(my_prefix)ARCH))
+ # secondary arch
+ LOCAL_2ND_ARCH_VAR_PREFIX := $($(my_prefix)2ND_ARCH_VAR_PREFIX)
+else
+ $(call pretty-error,Unsupported LOCAL_MODULE_$(my_prefix)ARCH=$(LOCAL_MODULE_$(my_prefix)ARCH))
+endif
+
+skip_module :=
+ifeq ($(TARGET_TRANSLATE_2ND_ARCH),true)
+ ifndef LOCAL_IS_HOST_MODULE
+ ifdef LOCAL_2ND_ARCH_VAR_PREFIX
+ # Only support rlib and dylib libraries for translated arch
+ ifeq ($(filter RLIB_LIBRARIES DYLIB_LIBRARIES PROC_MACRO_LIBRARIES,$(LOCAL_MODULE_CLASS)),)
+ skip_module := true
+ endif
+ endif
+ endif
+endif
+
+
+ifndef skip_module
+
+# Don't install rlib/proc_macro libraries.
+ifndef LOCAL_UNINSTALLABLE_MODULE
+ ifneq ($(filter RLIB_LIBRARIES PROC_MACRO_LIBRARIES,$(LOCAL_MODULE_CLASS)),)
+ LOCAL_UNINSTALLABLE_MODULE := true
+ endif
+endif
+
+
+#######################################
+include $(BUILD_SYSTEM)/base_rules.mk
+#######################################
+
+# The real dependency will be added after all Android.mks are loaded and the install paths
+# of the shared libraries are determined.
+ifdef LOCAL_INSTALLED_MODULE
+ ifdef LOCAL_SHARED_LIBRARIES
+ my_shared_libraries := $(LOCAL_SHARED_LIBRARIES)
+ $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
+ $(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_shared_libraries))
+ endif
+ ifdef LOCAL_DYLIB_LIBRARIES
+ my_dylibs := $(LOCAL_DYLIB_LIBRARIES)
+ # Treat these as shared library dependencies for installation purposes.
+ $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
+ $(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_dylibs))
+ endif
+endif
+
+$(LOCAL_BUILT_MODULE): $(LOCAL_PREBUILT_MODULE_FILE)
+ $(transform-prebuilt-to-target)
+ifneq ($(filter EXECUTABLES,$(LOCAL_MODULE_CLASS)),)
+ $(hide) chmod +x $@
+endif
+
+ifndef LOCAL_IS_HOST_MODULE
+ ifdef LOCAL_SOONG_UNSTRIPPED_BINARY
+ my_symbol_path := $(if $(LOCAL_SOONG_SYMBOL_PATH),$(LOCAL_SOONG_SYMBOL_PATH),$(my_module_path))
+ # Store a copy with symbols for symbolic debugging
+ my_unstripped_path := $(TARGET_OUT_UNSTRIPPED)/$(patsubst $(PRODUCT_OUT)/%,%,$(my_symbol_path))
+ # drop /root as /root is mounted as /
+ my_unstripped_path := $(patsubst $(TARGET_OUT_UNSTRIPPED)/root/%,$(TARGET_OUT_UNSTRIPPED)/%, $(my_unstripped_path))
+ symbolic_output := $(my_unstripped_path)/$(my_installed_module_stem)
+ $(eval $(call copy-one-file,$(LOCAL_SOONG_UNSTRIPPED_BINARY),$(symbolic_output)))
+ $(call add-dependency,$(LOCAL_BUILT_MODULE),$(symbolic_output))
+ endif
+endif
+
+# A product may be configured to strip everything in some build variants.
+# We do the stripping as a post-install command so that LOCAL_BUILT_MODULE
+# is still with the symbols and we don't need to clean it (and relink) when
+# you switch build variant.
+ifneq ($(filter $(STRIP_EVERYTHING_BUILD_VARIANTS),$(TARGET_BUILD_VARIANT)),)
+$(LOCAL_INSTALLED_MODULE): PRIVATE_POST_INSTALL_CMD := \
+ $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_STRIP) --strip-all $(LOCAL_INSTALLED_MODULE)
+endif
+
+$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
+
+# We don't care about installed rlib/static libraries, since the libraries have
+# already been linked into the module at that point. We do, however, care
+# about the NOTICE files for any rlib/static libraries that we use.
+# (see notice_files.mk)
+#
+# Filter out some NDK libraries that are not being exported.
+my_static_libraries := \
+ $(filter-out ndk_libc++_static ndk_libc++abi ndk_libandroid_support ndk_libunwind \
+ ndk_libc++_static.native_bridge ndk_libc++abi.native_bridge \
+ ndk_libandroid_support.native_bridge ndk_libunwind.native_bridge, \
+ $(LOCAL_STATIC_LIBRARIES))
+installed_static_library_notice_file_targets := \
+ $(foreach lib,$(my_static_libraries), \
+ NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-STATIC_LIBRARIES-$(lib))
+installed_static_library_notice_file_targets += \
+ $(foreach lib,$(LOCAL_RLIB_LIBRARIES), \
+ NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-RLIB_LIBRARIES-$(lib))
+
+$(notice_target): | $(installed_static_library_notice_file_targets)
+$(LOCAL_INSTALLED_MODULE): | $(notice_target)
+endif # !skip_module
+
+skip_module :=
+
diff --git a/core/static_java_library.mk b/core/static_java_library.mk
index cb3281a..7eef167 100644
--- a/core/static_java_library.mk
+++ b/core/static_java_library.mk
@@ -74,6 +74,7 @@
endif
LOCAL_PROGUARD_FLAGS := $(addprefix -include ,$(proguard_options_file)) $(LOCAL_PROGUARD_FLAGS)
+LOCAL_PROGUARD_FLAGS_DEPS += $(proguard_options_file)
R_file_stamp := $(intermediates.COMMON)/src/R.stamp
LOCAL_INTERMEDIATE_TARGETS += $(R_file_stamp)
diff --git a/core/tasks/check_boot_jars/package_whitelist.txt b/core/tasks/check_boot_jars/package_whitelist.txt
index 3b63843..8d9878f 100644
--- a/core/tasks/check_boot_jars/package_whitelist.txt
+++ b/core/tasks/check_boot_jars/package_whitelist.txt
@@ -243,3 +243,4 @@
###################################################
# Packages used for Android in Chrome OS
org\.chromium\.arc
+org\.chromium\.arc\..*
diff --git a/core/tasks/cts.mk b/core/tasks/cts.mk
index 33c3a83..f3b4368 100644
--- a/core/tasks/cts.mk
+++ b/core/tasks/cts.mk
@@ -14,7 +14,6 @@
test_suite_name := cts
test_suite_tradefed := cts-tradefed
-# TODO: Fix the following two lines after harness is moved to its own repo
test_suite_dynamic_config := test/suite_harness/tools/cts-tradefed/DynamicConfig.xml
test_suite_readme := test/suite_harness/tools/cts-tradefed/README
diff --git a/core/tasks/cts_instant.mk b/core/tasks/cts_instant.mk
deleted file mode 100644
index 18f1db3..0000000
--- a/core/tasks/cts_instant.mk
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-test_suite_name := cts_instant
-test_suite_tradefed := cts-instant-tradefed
-test_suite_dynamic_config := test/suite_harness/tools/cts-instant-tradefed/DynamicConfig.xml
-test_suite_readme := test/suite_harness/tools/cts-instant-tradefed/README
-
-include $(BUILD_SYSTEM)/tasks/tools/compatibility.mk
-
-.PHONY: cts_instant
-cts_instant: $(compatibility_zip)
-$(call dist-for-goals, cts_instant, $(compatibility_zip))
-
diff --git a/core/tasks/find-shareduid-violation.mk b/core/tasks/find-shareduid-violation.mk
index 45fd937..86052f2 100644
--- a/core/tasks/find-shareduid-violation.mk
+++ b/core/tasks/find-shareduid-violation.mk
@@ -24,7 +24,7 @@
$(INSTALLED_USERDATAIMAGE_TARGET) \
$(INSTALLED_VENDORIMAGE_TARGET) \
$(INSTALLED_PRODUCTIMAGE_TARGET) \
- $(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET)
+ $(INSTALLED_SYSTEM_EXTIMAGE_TARGET)
$(shareduid_violation_modules_filename): $(find_shareduid_script)
$(shareduid_violation_modules_filename): $(AAPT2)
diff --git a/core/tasks/module-info.mk b/core/tasks/module-info.mk
index eb31380..f6cec15 100644
--- a/core/tasks/module-info.mk
+++ b/core/tasks/module-info.mk
@@ -24,10 +24,6 @@
$(hide) echo '}' >> $@
-# If ONE_SHOT_MAKEFILE is set, our view of the world is smaller, so don't
-# rewrite the file in that came.
-ifndef ONE_SHOT_MAKEFILE
droidcore: $(MODULE_INFO_JSON)
-endif
$(call dist-for-goals, general-tests, $(MODULE_INFO_JSON))
diff --git a/core/tasks/mts.mk b/core/tasks/mts.mk
new file mode 100644
index 0000000..56b2390
--- /dev/null
+++ b/core/tasks/mts.mk
@@ -0,0 +1,23 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+test_suite_name := mts
+test_suite_tradefed := mts-tradefed
+test_suite_readme := test/mts/README.md
+
+include $(BUILD_SYSTEM)/tasks/tools/compatibility.mk
+
+.PHONY: mts
+mts: $(compatibility_zip)
+$(call dist-for-goals, mts, $(compatibility_zip))
diff --git a/core/tasks/oem_image.mk b/core/tasks/oem_image.mk
index 489feeb..a847b9d 100644
--- a/core/tasks/oem_image.mk
+++ b/core/tasks/oem_image.mk
@@ -34,10 +34,10 @@
@mkdir -p $(TARGET_OUT_OEM)
@mkdir -p $(oemimage_intermediates) && rm -rf $(oemimage_intermediates)/oem_image_info.txt
$(call generate-image-prop-dictionary, $(oemimage_intermediates)/oem_image_info.txt,oem,skip_fsck=true)
- $(hide) PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
- build/make/tools/releasetools/build_image.py \
- $(TARGET_OUT_OEM) $(oemimage_intermediates)/oem_image_info.txt $@ $(TARGET_OUT)
- $(hide) $(call assert-max-image-size,$@,$(BOARD_OEMIMAGE_PARTITION_SIZE))
+ PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
+ $(BUILD_IMAGE) \
+ $(TARGET_OUT_OEM) $(oemimage_intermediates)/oem_image_info.txt $@ $(TARGET_OUT)
+ $(call assert-max-image-size,$@,$(BOARD_OEMIMAGE_PARTITION_SIZE))
.PHONY: oem_image
oem_image : $(INSTALLED_OEMIMAGE_TARGET)
diff --git a/core/tasks/sdk-addon.mk b/core/tasks/sdk-addon.mk
index 62d9aa6..7f777a5 100644
--- a/core/tasks/sdk-addon.mk
+++ b/core/tasks/sdk-addon.mk
@@ -14,8 +14,6 @@
.PHONY: sdk_addon
-ifndef ONE_SHOT_MAKEFILE
-
# If they didn't define PRODUCT_SDK_ADDON_NAME, then we won't define
# any of these rules.
addon_name := $(PRODUCT_SDK_ADDON_NAME)
@@ -150,5 +148,3 @@
$(error Trying to build sdk_addon, but product '$(INTERNAL_PRODUCT)' does not define one)
endif
endif # addon_name
-
-endif # !ONE_SHOT_MAKEFILE
diff --git a/core/tasks/tools/build_custom_image.mk b/core/tasks/tools/build_custom_image.mk
index b0d1a0c..4721591 100644
--- a/core/tasks/tools/build_custom_image.mk
+++ b/core/tasks/tools/build_custom_image.mk
@@ -152,8 +152,8 @@
$(if $(filter oem,$(PRIVATE_MOUNT_POINT)), \
$(hide) echo "oem.buildnumber=$(BUILD_NUMBER_FROM_FILE)" >> $(PRIVATE_STAGING_DIR)/oem.prop)
$(hide) PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
- build/make/tools/releasetools/build_image.py \
- $(PRIVATE_STAGING_DIR) $(PRIVATE_INTERMEDIATES)/image_info.txt $@ $(TARGET_OUT)
+ $(BUILD_IMAGE) \
+ $(PRIVATE_STAGING_DIR) $(PRIVATE_INTERMEDIATES)/image_info.txt $@ $(TARGET_OUT)
my_installed_custom_image := $(PRODUCT_OUT)/$(notdir $(my_built_custom_image))
$(my_installed_custom_image) : $(my_built_custom_image)
diff --git a/core/tasks/tools/compatibility.mk b/core/tasks/tools/compatibility.mk
index f480300..b6dd39e 100644
--- a/core/tasks/tools/compatibility.mk
+++ b/core/tasks/tools/compatibility.mk
@@ -55,7 +55,7 @@
# Copy tools
$(hide) cp $(PRIVATE_TOOLS) $(PRIVATE_OUT_DIR)/tools
$(if $(PRIVATE_DYNAMIC_CONFIG),$(hide) cp $(PRIVATE_DYNAMIC_CONFIG) $(PRIVATE_OUT_DIR)/testcases/$(PRIVATE_SUITE_NAME).dynamic)
- $(hide) find $(dir $@)/$(PRIVATE_NAME) | sort >$@.list
+ $(hide) find $(PRIVATE_OUT_DIR)/tools $(PRIVATE_OUT_DIR)/testcases | sort >$@.list
$(hide) $(SOONG_ZIP) -d -o $@ -C $(dir $@) -l $@.list
# Reset all input variables
diff --git a/core/tasks/vndk.mk b/core/tasks/vndk.mk
index 90ebd92..b487f53 100644
--- a/core/tasks/vndk.mk
+++ b/core/tasks/vndk.mk
@@ -47,64 +47,28 @@
vndk_snapshot_top := $(call intermediates-dir-for,PACKAGING,vndk-snapshot)
vndk_snapshot_out := $(vndk_snapshot_top)/vndk-snapshot
+vndk_snapshot_soong_dir := $(call intermediates-dir-for,PACKAGING,vndk-snapshot-soong)
vndk_snapshot_configs_out := $(vndk_snapshot_top)/configs
#######################################
# vndk_snapshot_zip
vndk_snapshot_variant := $(vndk_snapshot_out)/$(TARGET_ARCH)
-binder :=
-ifneq ($(TARGET_IS_64_BIT), true)
- ifneq ($(TARGET_USES_64_BIT_BINDER), true)
- binder := binder32
- endif
-endif
-vndk_lib_dir := $(subst $(space),/,$(strip $(vndk_snapshot_variant) $(binder) arch-$(TARGET_ARCH)-$(TARGET_ARCH_VARIANT)))
-vndk_lib_dir_2nd := $(subst $(space),/,$(strip $(vndk_snapshot_variant) $(binder) arch-$(TARGET_2ND_ARCH)-$(TARGET_2ND_ARCH_VARIANT)))
vndk_snapshot_zip := $(PRODUCT_OUT)/android-vndk-$(TARGET_PRODUCT).zip
$(vndk_snapshot_zip): PRIVATE_VNDK_SNAPSHOT_OUT := $(vndk_snapshot_out)
-prebuilts := $(SOONG_VNDK_SNAPSHOT_CORE_LIBS)
-$(vndk_snapshot_zip): PRIVATE_VNDK_CORE_OUT := $(vndk_lib_dir)/shared/vndk-core
-$(vndk_snapshot_zip): PRIVATE_VNDK_CORE_SOONG_PREBUILTS := $(prebuilts)
-$(vndk_snapshot_zip): $(prebuilts)
-prebuilts :=
-
-prebuilts := $(SOONG_VNDK_SNAPSHOT_SP_LIBS)
-$(vndk_snapshot_zip): PRIVATE_VNDK_SP_OUT := $(vndk_lib_dir)/shared/vndk-sp
-$(vndk_snapshot_zip): PRIVATE_VNDK_SP_SOONG_PREBUILTS := $(prebuilts)
-$(vndk_snapshot_zip): $(prebuilts)
-prebuilts :=
-
deps := $(call paths-of-intermediates,$(foreach txt,$(vndk_prebuilt_txts), \
$(txt):$(patsubst %.txt,%.$(PLATFORM_VNDK_VERSION).txt,$(txt))))
-prebuilts := $(SOONG_VNDK_SNAPSHOT_CONFIGS)
$(vndk_snapshot_zip): PRIVATE_CONFIGS_OUT := $(vndk_snapshot_variant)/configs
$(vndk_snapshot_zip): PRIVATE_CONFIGS_INTERMEDIATES := $(deps)
-$(vndk_snapshot_zip): PRIVATE_CONFIGS_SOONG_PREBUILTS := $(prebuilts)
-$(vndk_snapshot_zip): $(foreach d,$(deps),$(call word-colon,1,$(d))) $(prebuilts)
+$(vndk_snapshot_zip): $(foreach d,$(deps),$(call word-colon,1,$(d)))
deps :=
-prebuilts :=
-prebuilts := $(SOONG_VNDK_SNAPSHOT_NOTICES)
-$(vndk_snapshot_zip): PRIVATE_NOTICE_FILES_OUT := $(vndk_snapshot_variant)/NOTICE_FILES
-$(vndk_snapshot_zip): PRIVATE_NOTICE_FILES_SOONG_PREBUILTS := $(prebuilts)
-$(vndk_snapshot_zip): $(prebuilts)
-prebuilts :=
+vndk_snapshot_soong_files := $(call copy-many-files, $(SOONG_VNDK_SNAPSHOT_FILES), $(vndk_snapshot_soong_dir))
-ifdef TARGET_2ND_ARCH
-prebuilts := $(SOONG_VNDK_SNAPSHOT_CORE_LIBS_2ND)
-$(vndk_snapshot_zip): PRIVATE_VNDK_CORE_OUT_2ND := $(vndk_lib_dir_2nd)/shared/vndk-core
-$(vndk_snapshot_zip): PRIVATE_VNDK_CORE_SOONG_PREBUILTS_2ND := $(prebuilts)
-$(vndk_snapshot_zip): $(prebuilts)
-prebuilts :=
-
-prebuilts := $(SOONG_VNDK_SNAPSHOT_SP_LIBS_2ND)
-$(vndk_snapshot_zip): PRIVATE_VNDK_SP_OUT_2ND := $(vndk_lib_dir_2nd)/shared/vndk-sp
-$(vndk_snapshot_zip): PRIVATE_VNDK_SP_SOONG_PREBUILTS_2ND := $(prebuilts)
-$(vndk_snapshot_zip): $(prebuilts)
-prebuilts :=
-endif
+$(vndk_snapshot_zip): PRIVATE_VNDK_SNAPSHOT_SOONG_DIR := $(vndk_snapshot_soong_dir)
+$(vndk_snapshot_zip): PRIVATE_VNDK_SNAPSHOT_SOONG_FILES := $(sort $(vndk_snapshot_soong_files))
+$(vndk_snapshot_zip): $(vndk_snapshot_soong_files)
# Args
# $(1): destination directory
@@ -118,16 +82,6 @@
true \
))
-# Args
-# $(1): destination directory
-# $(2): list of prebuilts to copy
-$(vndk_snapshot_zip): private-copy-prebuilts = \
- $(if $(2),$(strip \
- @mkdir -p $(1) && \
- $(foreach file, $(2), cp $(file) $(1) && ) \
- true \
- ))
-
$(vndk_snapshot_zip): $(SOONG_ZIP)
@echo 'Generating VNDK snapshot: $@'
@rm -f $@
@@ -135,21 +89,8 @@
@mkdir -p $(PRIVATE_VNDK_SNAPSHOT_OUT)
$(call private-copy-intermediates, \
$(PRIVATE_CONFIGS_OUT),$(PRIVATE_CONFIGS_INTERMEDIATES))
- $(call private-copy-prebuilts, \
- $(PRIVATE_VNDK_CORE_OUT),$(PRIVATE_VNDK_CORE_SOONG_PREBUILTS))
- $(call private-copy-prebuilts, \
- $(PRIVATE_VNDK_SP_OUT),$(PRIVATE_VNDK_SP_SOONG_PREBUILTS))
- $(call private-copy-prebuilts, \
- $(PRIVATE_CONFIGS_OUT),$(PRIVATE_CONFIGS_SOONG_PREBUILTS))
- $(call private-copy-prebuilts, \
- $(PRIVATE_NOTICE_FILES_OUT),$(PRIVATE_NOTICE_FILES_SOONG_PREBUILTS))
-ifdef TARGET_2ND_ARCH
- $(call private-copy-prebuilts, \
- $(PRIVATE_VNDK_CORE_OUT_2ND),$(PRIVATE_VNDK_CORE_SOONG_PREBUILTS_2ND))
- $(call private-copy-prebuilts, \
- $(PRIVATE_VNDK_SP_OUT_2ND),$(PRIVATE_VNDK_SP_SOONG_PREBUILTS_2ND))
-endif
- $(hide) $(SOONG_ZIP) -o $@ -C $(PRIVATE_VNDK_SNAPSHOT_OUT) -D $(PRIVATE_VNDK_SNAPSHOT_OUT)
+ $(hide) $(SOONG_ZIP) -o $@ -C $(PRIVATE_VNDK_SNAPSHOT_OUT) -D $(PRIVATE_VNDK_SNAPSHOT_OUT) \
+ -C $(PRIVATE_VNDK_SNAPSHOT_SOONG_DIR) $(foreach f,$(PRIVATE_VNDK_SNAPSHOT_SOONG_FILES),-f $(f))
.PHONY: vndk
vndk: $(vndk_snapshot_zip)
@@ -162,11 +103,10 @@
vndk_prebuilt_txts :=
vndk_snapshot_top :=
vndk_snapshot_out :=
+vndk_snapshot_soong_dir :=
+vndk_snapshot_soong_files :=
vndk_snapshot_configs_out :=
vndk_snapshot_variant :=
-binder :=
-vndk_lib_dir :=
-vndk_lib_dir_2nd :=
else # BOARD_VNDK_RUNTIME_DISABLE is set to 'true'
error_msg := "CANNOT generate VNDK snapshot. BOARD_VNDK_RUNTIME_DISABLE must not be set to 'true'."
diff --git a/core/tasks/vts-core-tests.mk b/core/tasks/vts-core-tests.mk
new file mode 100644
index 0000000..fb1e1c6
--- /dev/null
+++ b/core/tasks/vts-core-tests.mk
@@ -0,0 +1,49 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.PHONY: vts-core
+
+vts-core-zip := $(PRODUCT_OUT)/vts-core-tests.zip
+# Create an artifact to include a list of test config files in vts-core.
+vts-core-list-zip := $(PRODUCT_OUT)/vts-core_list.zip
+# Create an artifact to include all test config files in vts-core.
+vts-core-configs-zip := $(PRODUCT_OUT)/vts-core_configs.zip
+my_host_shared_lib_for_vts_core := $(call copy-many-files,$(COMPATIBILITY.vts-core.HOST_SHARED_LIBRARY.FILES))
+$(vts-core-zip) : .KATI_IMPLICIT_OUTPUTS := $(vts-core-list-zip) $(vts-core-configs-zip)
+$(vts-core-zip) : PRIVATE_vts_core_list := $(PRODUCT_OUT)/vts-core_list
+$(vts-core-zip) : PRIVATE_HOST_SHARED_LIBS := $(my_host_shared_lib_for_vts_core)
+$(vts-core-zip) : $(COMPATIBILITY.vts-core.FILES) $(my_host_shared_lib_for_vts_core) $(SOONG_ZIP)
+ echo $(sort $(COMPATIBILITY.vts-core.FILES)) | tr " " "\n" > $@.list
+ grep $(HOST_OUT_TESTCASES) $@.list > $@-host.list || true
+ grep -e .*\\.config$$ $@-host.list > $@-host-test-configs.list || true
+ $(hide) for shared_lib in $(PRIVATE_HOST_SHARED_LIBS); do \
+ echo $$shared_lib >> $@-host.list; \
+ done
+ grep $(TARGET_OUT_TESTCASES) $@.list > $@-target.list || true
+ grep -e .*\\.config$$ $@-target.list > $@-target-test-configs.list || true
+ $(hide) $(SOONG_ZIP) -d -o $@ -P host -C $(HOST_OUT) -l $@-host.list -P target -C $(PRODUCT_OUT) -l $@-target.list
+ $(hide) $(SOONG_ZIP) -d -o $(vts-core-configs-zip) \
+ -P host -C $(HOST_OUT) -l $@-host-test-configs.list \
+ -P target -C $(PRODUCT_OUT) -l $@-target-test-configs.list
+ rm -f $(PRIVATE_vts_core_list)
+ $(hide) grep -e .*\\.config$$ $@-host.list | sed s%$(HOST_OUT)%host%g > $(PRIVATE_vts_core_list)
+ $(hide) grep -e .*\\.config$$ $@-target.list | sed s%$(PRODUCT_OUT)%target%g >> $(PRIVATE_vts_core_list)
+ $(hide) $(SOONG_ZIP) -d -o $(vts-core-list-zip) -C $(dir $@) -f $(PRIVATE_vts_core_list)
+ rm -f $@.list $@-host.list $@-target.list $@-host-test-configs.list $@-target-test-configs.list \
+ $(PRIVATE_vts_core_list)
+
+vts-core: $(vts-core-zip)
+$(call dist-for-goals, vts-core, $(vts-core-zip) $(vts-core-list-zip) $(vts-core-configs-zip))
+
+tests: vts-core
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index 0a798d5..30890c0 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -84,12 +84,10 @@
# generate the range of allowed SDK versions, so it must have an entry for every
# unreleased API level targetable by this branch, not just those that are valid
# lunch targets for this branch.
-PLATFORM_VERSION.QP1A := Q
PLATFORM_VERSION.RP1A := R
# These are the current development codenames, if the build is not a final
# release build. If this is a final release build, it is simply "REL".
-PLATFORM_VERSION_CODENAME.QP1A := Q
PLATFORM_VERSION_CODENAME.RP1A := R
ifndef PLATFORM_VERSION
@@ -114,7 +112,7 @@
# When you increment the PLATFORM_SDK_VERSION please ensure you also
# clear out the following text file of all older PLATFORM_VERSION's:
# cts/tests/tests/os/assets/platform_versions.txt
- PLATFORM_SDK_VERSION := 28
+ PLATFORM_SDK_VERSION := 29
endif
.KATI_READONLY := PLATFORM_SDK_VERSION
@@ -252,17 +250,13 @@
# It must be of the form "YYYY-MM-DD" on production devices.
# It must match one of the Android Security Patch Level strings of the Public Security Bulletins.
# If there is no $PLATFORM_SECURITY_PATCH set, keep it empty.
- PLATFORM_SECURITY_PATCH := 2019-06-05
+ PLATFORM_SECURITY_PATCH := 2019-09-05
endif
.KATI_READONLY := PLATFORM_SECURITY_PATCH
ifndef PLATFORM_SECURITY_PATCH_TIMESTAMP
# Used to indicate the matching timestamp for the security patch string in PLATFORM_SECURITY_PATCH.
- ifneq (,$(findstring Darwin,$(UNAME)))
- PLATFORM_SECURITY_PATCH_TIMESTAMP := $(shell date -jf '%Y-%m-%d %T %Z' '$(PLATFORM_SECURITY_PATCH) 00:00:00 GMT' +%s)
- else
- PLATFORM_SECURITY_PATCH_TIMESTAMP := $(shell date -d 'TZ="GMT" $(PLATFORM_SECURITY_PATCH)' +%s)
- endif
+ PLATFORM_SECURITY_PATCH_TIMESTAMP := $(shell date -d 'TZ="GMT" $(PLATFORM_SECURITY_PATCH)' +%s)
endif
.KATI_READONLY := PLATFORM_SECURITY_PATCH_TIMESTAMP
@@ -291,11 +285,7 @@
BUILD_DATETIME := $(shell date +%s)
endif
-ifneq (,$(findstring Darwin,$(UNAME)))
-DATE := date -r $(BUILD_DATETIME)
-else
DATE := date -d @$(BUILD_DATETIME)
-endif
.KATI_READONLY := DATE
# Everything should be using BUILD_DATETIME_FROM_FILE instead.
diff --git a/envsetup.sh b/envsetup.sh
index 941c5f7..40f7705 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -23,6 +23,7 @@
- resgrep: Greps on all local res/*.xml files.
- mangrep: Greps on all local AndroidManifest.xml files.
- mgrep: Greps on all local Makefiles and *.bp files.
+- owngrep: Greps on all local OWNERS files.
- sepgrep: Greps on all local sepolicy files.
- sgrep: Greps on all local source files.
- godir: Go to the directory containing a file.
@@ -272,12 +273,12 @@
# Append asuite prebuilts path to ANDROID_BUILD_PATHS.
local os_arch=$(get_build_var HOST_PREBUILT_TAG)
- local ACLOUD_PATH="$T/prebuilts/asuite/acloud/$os_arch:"
- local AIDEGEN_PATH="$T/prebuilts/asuite/aidegen/$os_arch:"
- local ATEST_PATH="$T/prebuilts/asuite/atest/$os_arch:"
- export ANDROID_BUILD_PATHS=$ANDROID_BUILD_PATHS:$ACLOUD_PATH$AIDEGEN_PATH$ATEST_PATH
+ local ACLOUD_PATH="$T/prebuilts/asuite/acloud/$os_arch"
+ local AIDEGEN_PATH="$T/prebuilts/asuite/aidegen/$os_arch"
+ local ATEST_PATH="$T/prebuilts/asuite/atest/$os_arch"
+ export ANDROID_BUILD_PATHS=$ANDROID_BUILD_PATHS:$ACLOUD_PATH:$AIDEGEN_PATH:$ATEST_PATH:
- export PATH=$ANDROID_BUILD_PATHS:$PATH
+ export PATH=$ANDROID_BUILD_PATHS$PATH
# out with the duplicate old
if [ -n $ANDROID_PYTHONPATH ]; then
@@ -285,6 +286,9 @@
fi
# and in with the new
export ANDROID_PYTHONPATH=$T/development/python-packages:
+ if [ -n $VENDOR_PYTHONPATH ]; then
+ ANDROID_PYTHONPATH=$ANDROID_PYTHONPATH$VENDOR_PYTHONPATH
+ fi
export PYTHONPATH=$ANDROID_PYTHONPATH$PYTHONPATH
export ANDROID_JAVA_HOME=$(get_abs_build_var ANDROID_JAVA_HOME)
@@ -992,6 +996,12 @@
-exec grep --color -n "$@" {} +
}
+function owngrep()
+{
+ find . -name .repo -prune -o -name .git -prune -o -path ./out -prune -o -type f -name 'OWNERS' \
+ -exec grep --color -n "$@" {} +
+}
+
function sepgrep()
{
find . -name .repo -prune -o -name .git -prune -o -path ./out -prune -o -name sepolicy -type d \
@@ -1552,6 +1562,7 @@
#
# This allows loading only approved vendorsetup.sh files
function source_vendorsetup() {
+ unset VENDOR_PYTHONPATH
allowed=
for f in $(find -L device vendor product -maxdepth 4 -name 'allowed-vendorsetup_sh-files' 2>/dev/null | sort); do
if [ -n "$allowed" ]; then
diff --git a/help.sh b/help.sh
index be07344..b02b14c 100755
--- a/help.sh
+++ b/help.sh
@@ -40,8 +40,8 @@
Stands for "Vendor, NO Dependencies"
pnod Quickly rebuild the product image from built packages
Stands for "Product, NO Dependencies"
- psnod Quickly rebuild the product_services image from built packages
- Stands for "ProductServices, NO Dependencies"
+ senod Quickly rebuild the system_ext image from built packages
+ Stands for "SystemExt, NO Dependencies"
onod Quickly rebuild the odm image from built packages
Stands for "ODM, NO Dependencies"
diff --git a/target/board/BoardConfigEmuCommon.mk b/target/board/BoardConfigEmuCommon.mk
index ac21918..f941918 100644
--- a/target/board/BoardConfigEmuCommon.mk
+++ b/target/board/BoardConfigEmuCommon.mk
@@ -35,9 +35,23 @@
# 3G + header
BOARD_SUPER_PARTITION_SIZE := 3229614080
BOARD_SUPER_PARTITION_GROUPS := emulator_dynamic_partitions
- BOARD_EMULATOR_DYNAMIC_PARTITIONS_PARTITION_LIST := \
- system \
- vendor
+
+ ifeq ($(QEMU_USE_SYSTEM_EXT_PARTITIONS),true)
+ BOARD_EMULATOR_DYNAMIC_PARTITIONS_PARTITION_LIST := \
+ system \
+ system_ext \
+ product \
+ vendor
+
+ TARGET_COPY_OUT_PRODUCT := product
+ BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE := ext4
+ TARGET_COPY_OUT_SYSTEM_EXT := system_ext
+ BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE := ext4
+ else
+ BOARD_EMULATOR_DYNAMIC_PARTITIONS_PARTITION_LIST := \
+ system \
+ vendor
+ endif
# 3G
BOARD_EMULATOR_DYNAMIC_PARTITIONS_SIZE := 3221225472
diff --git a/target/board/BoardConfigGsiCommon.mk b/target/board/BoardConfigGsiCommon.mk
index 702ef3c..4c783c0 100644
--- a/target/board/BoardConfigGsiCommon.mk
+++ b/target/board/BoardConfigGsiCommon.mk
@@ -19,8 +19,9 @@
# Enable dynamic system image size and reserved 64MB in it.
BOARD_SYSTEMIMAGE_PARTITION_RESERVED_SIZE := 67108864
-# GSI forces product packages to /system for now.
+# GSI forces product and system_ext packages to /system for now.
TARGET_COPY_OUT_PRODUCT := system/product
+TARGET_COPY_OUT_SYSTEM_EXT := system/system_ext
BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE :=
# Creates metadata partition mount point under root for
@@ -40,9 +41,9 @@
# GSI specific System Properties
ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
-TARGET_SYSTEM_PROP := build/make/target/board/gsi_system.prop
+TARGET_SYSTEM_EXT_PROP := build/make/target/board/gsi_system_ext.prop
else
-TARGET_SYSTEM_PROP := build/make/target/board/gsi_system_user.prop
+TARGET_SYSTEM_EXT_PROP := build/make/target/board/gsi_system_ext_user.prop
endif
# Set this to create /cache mount point for non-A/B devices that mounts /cache.
@@ -55,7 +56,3 @@
# Disable 64 bit mediadrmserver
TARGET_ENABLE_MEDIADRM_64 :=
-
-# Ordinary (non-flattened) APEX may require kernel changes. For maximum compatibility,
-# use flattened APEX for GSI
-TARGET_FLATTEN_APEX := true
diff --git a/target/board/BoardConfigMainlineCommon.mk b/target/board/BoardConfigMainlineCommon.mk
index 9bc7e0c..52ba814 100644
--- a/target/board/BoardConfigMainlineCommon.mk
+++ b/target/board/BoardConfigMainlineCommon.mk
@@ -8,10 +8,15 @@
TARGET_USERIMAGES_USE_EXT4 := true
-# Mainline devices must have /vendor and /product partitions.
+# Mainline devices must have /system_ext, /vendor and /product partitions.
+TARGET_COPY_OUT_SYSTEM_EXT := system_ext
TARGET_COPY_OUT_VENDOR := vendor
TARGET_COPY_OUT_PRODUCT := product
+# Creates metadata partition mount point under root for
+# the devices with metadata parition
+BOARD_USES_METADATA_PARTITION := true
+
BOARD_VNDK_VERSION := current
# Required flag for non-64 bit devices from P.
@@ -44,6 +49,3 @@
# Generate an APEX image for experiment b/119800099.
DEXPREOPT_GENERATE_APEX_IMAGE := true
-
-# Mainline devices support apex
-TARGET_FLATTEN_APEX := false
diff --git a/target/board/generic/system.prop b/target/board/generic/system_ext.prop
similarity index 100%
rename from target/board/generic/system.prop
rename to target/board/generic/system_ext.prop
diff --git a/target/board/generic_arm64/system.prop b/target/board/generic_arm64/system_ext.prop
similarity index 100%
rename from target/board/generic_arm64/system.prop
rename to target/board/generic_arm64/system_ext.prop
diff --git a/target/board/generic_x86/system.prop b/target/board/generic_x86/system_ext.prop
similarity index 100%
rename from target/board/generic_x86/system.prop
rename to target/board/generic_x86/system_ext.prop
diff --git a/target/board/generic_x86_64/system.prop b/target/board/generic_x86_64/system_ext.prop
similarity index 100%
rename from target/board/generic_x86_64/system.prop
rename to target/board/generic_x86_64/system_ext.prop
diff --git a/target/board/generic_x86_arm/BoardConfig.mk b/target/board/generic_x86_arm/BoardConfig.mk
index 6fae411..e879001 100644
--- a/target/board/generic_x86_arm/BoardConfig.mk
+++ b/target/board/generic_x86_arm/BoardConfig.mk
@@ -18,14 +18,10 @@
TARGET_ARCH := x86
TARGET_ARCH_VARIANT := x86
-TARGET_2ND_ARCH := arm
-TARGET_2ND_CPU_ABI := armeabi-v7a
-TARGET_2ND_CPU_ABI2 := armeabi
-TARGET_2ND_ARCH_VARIANT := armv7-a-neon
-TARGET_2ND_CPU_VARIANT := generic
-
-TARGET_CPU_ABI_LIST := x86 armeabi-v7a armeabi
-TARGET_TRANSLATE_2ND_ARCH := true
+TARGET_NATIVE_BRIDGE_ARCH := arm
+TARGET_NATIVE_BRIDGE_ARCH_VARIANT := armv7-a-neon
+TARGET_NATIVE_BRIDGE_CPU_VARIANT := generic
+TARGET_NATIVE_BRIDGE_ABI := armeabi-v7a armeabi
BUILD_BROKEN_DUP_RULES := true
diff --git a/target/board/generic_x86_arm/system.prop b/target/board/generic_x86_arm/system_ext.prop
similarity index 100%
rename from target/board/generic_x86_arm/system.prop
rename to target/board/generic_x86_arm/system_ext.prop
diff --git a/target/board/go_defaults.prop b/target/board/go_defaults.prop
new file mode 100644
index 0000000..93071cd
--- /dev/null
+++ b/target/board/go_defaults.prop
@@ -0,0 +1,15 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/target/board/go_defaults_512.prop b/target/board/go_defaults_512.prop
new file mode 100644
index 0000000..a8eea9c
--- /dev/null
+++ b/target/board/go_defaults_512.prop
@@ -0,0 +1,23 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# 512MB specific properties.
+
+# lmkd can kill more now.
+ro.lmk.medium=700
+
+# madvise random in ART to reduce page cache thrashing.
+dalvik.vm.madvise-random=true
diff --git a/target/board/go_defaults_common.prop b/target/board/go_defaults_common.prop
new file mode 100644
index 0000000..d4989e0
--- /dev/null
+++ b/target/board/go_defaults_common.prop
@@ -0,0 +1,42 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Sets Android Go recommended default values for propreties.
+
+# Set lowram options
+ro.lmk.critical_upgrade=true
+ro.lmk.upgrade_pressure=40
+ro.lmk.downgrade_pressure=60
+ro.lmk.kill_heaviest_task=false
+ro.statsd.enable=true
+
+# set threshold to filter unused apps
+pm.dexopt.downgrade_after_inactive_days=10
+
+# set the compiler filter for shared apks to quicken.
+# Rationale: speed has a lot of dex code expansion, it uses more ram and space
+# compared to quicken. Using quicken for shared APKs on Go devices may save RAM.
+# Note that this is a trade-off: here we trade clean pages for dirty pages,
+# extra cpu and battery. That's because the quicken files will be jit-ed in all
+# the processes that load of shared apk and the code cache is not shared.
+# Some notable apps that will be affected by this are gms and chrome.
+# b/65591595.
+pm.dexopt.shared=quicken
+
+# Default heap sizes. Allow up to 256m for large heaps to make sure a single app
+# doesn't take all of the RAM.
+dalvik.vm.heapgrowthlimit=128m
+dalvik.vm.heapsize=256m
diff --git a/target/board/gsi_system.prop b/target/board/gsi_system_ext.prop
similarity index 77%
rename from target/board/gsi_system.prop
rename to target/board/gsi_system_ext.prop
index 780aadc..dd3227e 100644
--- a/target/board/gsi_system.prop
+++ b/target/board/gsi_system_ext.prop
@@ -12,3 +12,8 @@
# TODO(b/78105955): disable privapp_permissions checking before the bug solved
ro.control_privapp_permissions=disable
+
+# TODO(b/136212765): the default for LMK
+ro.lmk.kill_heaviest_task=true
+ro.lmk.kill_timeout_ms=100
+ro.lmk.use_minfree_levels=true
diff --git a/target/board/gsi_system.prop b/target/board/gsi_system_ext_user.prop
similarity index 75%
copy from target/board/gsi_system.prop
copy to target/board/gsi_system_ext_user.prop
index 780aadc..db6d880 100644
--- a/target/board/gsi_system.prop
+++ b/target/board/gsi_system_ext_user.prop
@@ -1,9 +1,6 @@
# GSI always generate dex pre-opt in system image
ro.cp_system_other_odex=0
-# GSI always disables adb authentication
-ro.adb.secure=0
-
# GSI disables non-AOSP nnapi extensions on product partition
ro.nnapi.extensions.deny_on_product=true
@@ -12,3 +9,8 @@
# TODO(b/78105955): disable privapp_permissions checking before the bug solved
ro.control_privapp_permissions=disable
+
+# TODO(b/136212765): the default for LMK
+ro.lmk.kill_heaviest_task=true
+ro.lmk.kill_timeout_ms=100
+ro.lmk.use_minfree_levels=true
diff --git a/target/board/gsi_system_user.prop b/target/board/gsi_system_user.prop
deleted file mode 100644
index 217bd01..0000000
--- a/target/board/gsi_system_user.prop
+++ /dev/null
@@ -1,11 +0,0 @@
-# GSI always generate dex pre-opt in system image
-ro.cp_system_other_odex=0
-
-# GSI disables non-AOSP nnapi extensions on product partition
-ro.nnapi.extensions.deny_on_product=true
-
-# TODO(b/120679683): disable RescueParty before all problem apps solved
-persist.sys.disable_rescue=true
-
-# TODO(b/78105955): disable privapp_permissions checking before the bug solved
-ro.control_privapp_permissions=disable
diff --git a/target/product/aosp_arm.mk b/target/product/aosp_arm.mk
index 0fdd313..2ff2b20 100644
--- a/target/product/aosp_arm.mk
+++ b/target/product/aosp_arm.mk
@@ -23,19 +23,38 @@
# - VNDK enforcement
# - compatible property override enabled
-# GSI for system/product
-$(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_common.mk)
+#
+# All components inherited here go to system image
+#
+$(call inherit-product, $(SRC_TARGET_DIR)/product/mainline_system.mk)
# Enable mainline checking for excat this product name
ifeq (aosp_arm,$(TARGET_PRODUCT))
PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
endif
-# Emulator for vendor
+PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
+
+#
+# All components inherited here go to product image
+#
+$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
+
+#
+# All components inherited here go to vendor image
+#
$(call inherit-product-if-exists, device/generic/goldfish/arm32-vendor.mk)
$(call inherit-product, $(SRC_TARGET_DIR)/product/emulator_vendor.mk)
$(call inherit-product, $(SRC_TARGET_DIR)/board/generic_x86/device.mk)
+#
+# Special settings for GSI releasing
+#
+ifeq (aosp_arm,$(TARGET_PRODUCT))
+$(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_release.mk)
+endif
+
+
PRODUCT_NAME := aosp_arm
PRODUCT_DEVICE := generic
PRODUCT_BRAND := Android
diff --git a/target/product/aosp_arm64.mk b/target/product/aosp_arm64.mk
index 8ef2023..cc4785a 100644
--- a/target/product/aosp_arm64.mk
+++ b/target/product/aosp_arm64.mk
@@ -28,14 +28,11 @@
# build quite specifically for the emulator, and might not be
# entirely appropriate to inherit from for on-device configurations.
-# GSI for system/product
+#
+# All components inherited here go to system image
+#
$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_common.mk)
-
-# Emulator for vendor
-$(call inherit-product-if-exists, device/generic/goldfish/arm64-vendor.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/emulator_vendor.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/board/generic_arm64/device.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/mainline_system.mk)
# Enable mainline checking for excat this product name
ifeq (aosp_arm64,$(TARGET_PRODUCT))
@@ -43,16 +40,27 @@
endif
PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
- root/init.zygote32_64.rc \
root/init.zygote64_32.rc \
-# Copy different zygote settings for vendor.img to select by setting property
-# ro.zygote=zygote64_32 or ro.zygote=zygote32_64:
-# 1. 64-bit primary, 32-bit secondary OR
-# 2. 32-bit primary, 64-bit secondary
-# init.zygote64_32.rc is in the core_64_bit.mk below
-PRODUCT_COPY_FILES += \
- system/core/rootdir/init.zygote32_64.rc:root/init.zygote32_64.rc
+#
+# All components inherited here go to product image
+#
+$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
+
+#
+# All components inherited here go to vendor image
+#
+$(call inherit-product-if-exists, device/generic/goldfish/arm64-vendor.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/emulator_vendor.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/board/generic_arm64/device.mk)
+
+#
+# Special settings for GSI releasing
+#
+ifeq (aosp_arm64,$(TARGET_PRODUCT))
+$(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_release.mk)
+endif
+
PRODUCT_NAME := aosp_arm64
PRODUCT_DEVICE := generic_arm64
diff --git a/target/product/aosp_product.mk b/target/product/aosp_product.mk
new file mode 100644
index 0000000..cda977b
--- /dev/null
+++ b/target/product/aosp_product.mk
@@ -0,0 +1,59 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Includes all AOSP product packages
+$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_product.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_product.mk)
+
+# Default AOSP sounds
+$(call inherit-product-if-exists, frameworks/base/data/sounds/AllAudio.mk)
+
+# TODO(b/133643923): Clean up the mainline whitelist
+PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
+ system/app/messaging/messaging.apk \
+ system/app/messaging/oat/% \
+ system/app/WAPPushManager/WAPPushManager.apk \
+ system/app/WAPPushManager/oat/% \
+ system/bin/healthd \
+ system/etc/init/healthd.rc \
+ system/etc/vintf/manifest/manifest_healthd.xml \
+ system/lib/libframesequence.so \
+ system/lib/libgiftranscode.so \
+ system/lib64/libframesequence.so \
+ system/lib64/libgiftranscode.so \
+
+
+# Additional settings used in all AOSP builds
+PRODUCT_PRODUCT_PROPERTIES += \
+ ro.config.ringtone=Ring_Synth_04.ogg \
+ ro.config.notification_sound=pixiedust.ogg \
+
+# More AOSP packages
+PRODUCT_PACKAGES += \
+ messaging \
+ PhotoTable \
+ WAPPushManager \
+ WallpaperPicker \
+
+# Telephony:
+# Provide a APN configuration to GSI product
+PRODUCT_COPY_FILES += \
+ device/sample/etc/apns-full-conf.xml:$(TARGET_COPY_OUT_PRODUCT)/etc/apns-conf.xml
+
+# NFC:
+# Provide a libnfc-nci.conf to GSI product
+PRODUCT_COPY_FILES += \
+ device/generic/common/nfc/libnfc-nci.conf:$(TARGET_COPY_OUT_PRODUCT)/etc/libnfc-nci.conf
diff --git a/target/product/aosp_x86.mk b/target/product/aosp_x86.mk
index 1c71948..e557aa8 100644
--- a/target/product/aosp_x86.mk
+++ b/target/product/aosp_x86.mk
@@ -23,19 +23,37 @@
# - VNDK enforcement
# - compatible property override enabled
-# GSI for system/product
-$(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_common.mk)
-
-# Emulator for vendor
-$(call inherit-product-if-exists, device/generic/goldfish/x86-vendor.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/emulator_vendor.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/board/generic_x86/device.mk)
+#
+# All components inherited here go to system image
+#
+$(call inherit-product, $(SRC_TARGET_DIR)/product/mainline_system.mk)
# Enable mainline checking for excat this product name
ifeq (aosp_x86,$(TARGET_PRODUCT))
PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
endif
+#
+# All components inherited here go to product image
+#
+$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
+
+#
+# All components inherited here go to vendor image
+#
+$(call inherit-product-if-exists, device/generic/goldfish/x86-vendor.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/emulator_vendor.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/board/generic_x86/device.mk)
+
+
+#
+# Special settings for GSI releasing
+#
+ifeq (aosp_x86,$(TARGET_PRODUCT))
+$(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_release.mk)
+endif
+
+
PRODUCT_NAME := aosp_x86
PRODUCT_DEVICE := generic_x86
PRODUCT_BRAND := Android
diff --git a/target/product/aosp_x86_64.mk b/target/product/aosp_x86_64.mk
index 9dfa2f4..a471702 100644
--- a/target/product/aosp_x86_64.mk
+++ b/target/product/aosp_x86_64.mk
@@ -28,14 +28,11 @@
# build quite specifically for the emulator, and might not be
# entirely appropriate to inherit from for on-device configurations.
-# GSI for system/product
+#
+# All components inherited here go to system image
+#
$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_common.mk)
-
-# Emulator for vendor
-$(call inherit-product-if-exists, device/generic/goldfish/x86_64-vendor.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/emulator_vendor.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/board/generic_x86_64/device.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/mainline_system.mk)
# Enable mainline checking for excat this product name
ifeq (aosp_x86_64,$(TARGET_PRODUCT))
@@ -43,16 +40,27 @@
endif
PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
- root/init.zygote32_64.rc \
root/init.zygote64_32.rc \
-# Copy different zygote settings for vendor.img to select by setting property
-# ro.zygote=zygote64_32 or ro.zygote=zygote32_64:
-# 1. 64-bit primary, 32-bit secondary OR
-# 2. 32-bit primary, 64-bit secondary
-# init.zygote64_32.rc is in the core_64_bit.mk below
-PRODUCT_COPY_FILES += \
- system/core/rootdir/init.zygote32_64.rc:root/init.zygote32_64.rc
+#
+# All components inherited here go to product image
+#
+$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
+
+#
+# All components inherited here go to vendor image
+#
+$(call inherit-product-if-exists, device/generic/goldfish/x86_64-vendor.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/emulator_vendor.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/board/generic_x86_64/device.mk)
+
+#
+# Special settings for GSI releasing
+#
+ifeq (aosp_x86_64,$(TARGET_PRODUCT))
+$(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_release.mk)
+endif
+
PRODUCT_NAME := aosp_x86_64
PRODUCT_DEVICE := generic_x86_64
diff --git a/target/product/base_product.mk b/target/product/base_product.mk
index 82557bf..749d2c2 100644
--- a/target/product/base_product.mk
+++ b/target/product/base_product.mk
@@ -16,7 +16,9 @@
# Base modules and settings for the product partition.
PRODUCT_PACKAGES += \
+ group_product \
healthd \
ModuleMetadata \
+ passwd_product \
product_compatibility_matrix.xml \
product_manifest.xml \
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 162fbed..44fc7e4 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -19,6 +19,9 @@
abb \
adbd \
am \
+ android.hardware.neuralnetworks@1.0 \
+ android.hardware.neuralnetworks@1.1 \
+ android.hardware.neuralnetworks@1.2 \
android.hidl.allocator@1.0-service \
android.hidl.base-V1.0-java \
android.hidl.manager-V1.0-java \
@@ -51,8 +54,12 @@
charger \
cmd \
com.android.conscrypt \
+ com.android.i18n \
com.android.location.provider \
+ com.android.media \
+ com.android.media.swcodec \
com.android.resolv \
+ com.android.neuralnetworks \
com.android.tzdata \
ContactsProvider \
content \
@@ -60,13 +67,14 @@
CtsShimPrebuilt \
CtsShimPrivPrebuilt \
debuggerd\
- DefaultContainerService \
+ device_config \
dmctl \
dnsmasq \
DownloadProvider \
dpm \
dumpstate \
dumpsys \
+ DynamicSystemInstallationService \
e2fsck \
ExtServices \
ExtShared \
@@ -77,7 +85,9 @@
fsck_msdos \
fs_config_files_system \
fs_config_dirs_system \
+ group_system \
gsid \
+ gsi_tool \
heapprofd \
heapprofd_client \
gatekeeperd \
@@ -85,6 +95,8 @@
hid \
hwservicemanager \
idmap \
+ idmap2 \
+ idmap2d \
ime \
ims-common \
incident \
@@ -105,6 +117,7 @@
ld.config.txt \
ld.mc \
libaaudio \
+ libamidi \
libandroid \
libandroidfw \
libandroid_runtime \
@@ -112,10 +125,6 @@
libartpalette-system \
libashmemd_client \
libaudioeffect_jni \
- libaudioflinger \
- libaudiopolicymanager \
- libaudiopolicyservice \
- libaudioutils \
libbinder \
libbinder_ndk \
libc.bootstrap \
@@ -148,12 +157,10 @@
libmedia \
libmedia_jni \
libmediandk \
- libmediaplayerservice \
libmtp \
libnetd_client \
libnetlink \
libnetutils \
- libneuralnetworks \
libOpenMAXAL \
libOpenSLES \
libpdfium \
@@ -163,12 +170,11 @@
libradio_metadata \
librtp_jni \
libsensorservice \
+ libsfplugin_ccodec \
libskia \
libsonic \
libsonivox \
libsoundpool \
- libsoundtrigger \
- libsoundtriggerservice \
libspeexresampler \
libsqlite \
libstagefright \
@@ -189,6 +195,7 @@
linker \
linkerconfig \
lmkd \
+ LocalTransport \
locksettings \
logcat \
logd \
@@ -196,6 +203,7 @@
lshal \
mdnsd \
media \
+ mediacodec.policy \
mediadrmserver \
mediaextractor \
mediametrics \
@@ -210,7 +218,10 @@
NetworkStack \
org.apache.http.legacy \
otacerts \
+ PackageInstaller \
+ passwd_system \
perfetto \
+ PermissionController \
ping \
ping6 \
platform.xml \
@@ -222,6 +233,7 @@
resize2fs \
rss_hwm_reset \
run-as \
+ sanitizer.libraries.txt \
schedtest \
screencap \
sdcard \
@@ -246,7 +258,6 @@
tc \
telecom \
telephony-common \
- thermalserviced \
tombstoned \
traced \
traced_probes \
@@ -281,8 +292,9 @@
e2fsck \
fastboot \
flags_health_check \
- icu-data_host_runtime_apex \
+ icu-data_host_i18n_apex \
icu_tzdata.dat_host_tzdata_apex \
+ idmap2 \
incident_report \
ld.mc \
lpdump \
@@ -316,10 +328,12 @@
ext \
telephony-common \
voip-common \
- ims-common
-PRODUCT_UPDATABLE_BOOT_MODULES := conscrypt
+ ims-common \
+ updatable-media
+PRODUCT_UPDATABLE_BOOT_MODULES := conscrypt updatable-media
PRODUCT_UPDATABLE_BOOT_LOCATIONS := \
- /apex/com.android.conscrypt/javalib/conscrypt.jar
+ /apex/com.android.conscrypt/javalib/conscrypt.jar \
+ /apex/com.android.media/javalib/updatable-media.jar
PRODUCT_COPY_FILES += \
@@ -350,6 +364,7 @@
gdbserver \
init-debug.rc \
iotop \
+ iperf3 \
iw \
logpersist.start \
logtagd.rc \
@@ -374,7 +389,9 @@
WallpaperBackup
# Packages included only for eng/userdebug builds, when building with SANITIZE_TARGET=address
-PRODUCT_PACKAGES_DEBUG_ASAN :=
+PRODUCT_PACKAGES_DEBUG_ASAN := \
+ fuzz \
+ honggfuzz
PRODUCT_PACKAGES_DEBUG_JAVA_COVERAGE := \
libdumpcoverage
diff --git a/target/product/base_vendor.mk b/target/product/base_vendor.mk
index bb39cd9..f3705ea 100644
--- a/target/product/base_vendor.mk
+++ b/target/product/base_vendor.mk
@@ -36,18 +36,20 @@
make_f2fs \
PRODUCT_HOST_PACKAGES += \
- icu-data_host_runtime_apex
+ icu-data_host_i18n_apex
# Base modules and settings for the vendor partition.
PRODUCT_PACKAGES += \
- android.hardware.cas@1.0-service \
+ android.hardware.cas@1.1-service \
android.hardware.configstore@1.1-service \
android.hardware.media.omx@1.0-service \
dumpsys_vendor \
fs_config_files_nonsystem \
fs_config_dirs_nonsystem \
gralloc.default \
- group \
+ group_odm \
+ group_system_ext \
+ group_vendor \
init_vendor \
libashmemd_hidl_client \
libbundlewrapper \
@@ -62,7 +64,9 @@
libreverbwrapper \
libril \
libvisualizer \
- passwd \
+ passwd_odm \
+ passwd_system_ext \
+ passwd_vendor \
selinux_policy_nonsystem \
shell_and_utilities_vendor \
vndservice \
@@ -71,3 +75,7 @@
# VINTF data for vendor image
PRODUCT_PACKAGES += \
device_compatibility_matrix.xml \
+
+PRODUCT_PACKAGES += \
+ libprotobuf-cpp-lite-vendorcompat \
+ libprotobuf-cpp-full-vendorcompat \
diff --git a/target/product/emulator.mk b/target/product/emulator.mk
index f6e1011..322eab4 100644
--- a/target/product/emulator.mk
+++ b/target/product/emulator.mk
@@ -53,14 +53,14 @@
#config.disable_location=true
# Enable Perfetto traced
-PRODUCT_SYSTEM_DEFAULT_PROPERTIES += \
+PRODUCT_SYSTEM_EXT_PROPERTIES += \
persist.traced.enable=1
# enable Google-specific location features,
# like NetworkLocationProvider and LocationCollector
-PRODUCT_SYSTEM_DEFAULT_PROPERTIES += \
+PRODUCT_SYSTEM_EXT_PROPERTIES += \
ro.com.google.locationfeatures=1
# disable setupwizard
-PRODUCT_SYSTEM_DEFAULT_PROPERTIES += \
+PRODUCT_SYSTEM_EXT_PROPERTIES += \
ro.setupwizard.mode=DISABLED
diff --git a/target/product/emulator_vendor.mk b/target/product/emulator_vendor.mk
index f0a5354..9c3be8e 100644
--- a/target/product/emulator_vendor.mk
+++ b/target/product/emulator_vendor.mk
@@ -29,6 +29,7 @@
system/lib/egl/libGLES_android.so \
system/lib64/egl/libGLES_android.so \
system/priv-app/SdkSetup/SdkSetup.apk \
+ system/priv-app/SdkSetup/oat/% \
# Device modules
PRODUCT_PACKAGES += \
@@ -57,14 +58,14 @@
#config.disable_location=true
# Enable Perfetto traced
-PRODUCT_SYSTEM_DEFAULT_PROPERTIES += \
+PRODUCT_SYSTEM_EXT_PROPERTIES += \
persist.traced.enable=1
# enable Google-specific location features,
# like NetworkLocationProvider and LocationCollector
-PRODUCT_SYSTEM_DEFAULT_PROPERTIES += \
+PRODUCT_SYSTEM_EXT_PROPERTIES += \
ro.com.google.locationfeatures=1
# disable setupwizard
-PRODUCT_SYSTEM_DEFAULT_PROPERTIES += \
+PRODUCT_SYSTEM_EXT_PROPERTIES += \
ro.setupwizard.mode=DISABLED
diff --git a/target/product/go_defaults.mk b/target/product/go_defaults.mk
index cb9383f..b717486 100644
--- a/target/product/go_defaults.mk
+++ b/target/product/go_defaults.mk
@@ -17,3 +17,6 @@
# Inherit common Android Go defaults.
$(call inherit-product, build/make/target/product/go_defaults_common.mk)
+# Add the system properties.
+TARGET_SYSTEM_PROP += \
+ build/make/target/board/go_defaults.prop
diff --git a/target/product/go_defaults_512.mk b/target/product/go_defaults_512.mk
index 985912f..70d067e 100644
--- a/target/product/go_defaults_512.mk
+++ b/target/product/go_defaults_512.mk
@@ -17,12 +17,6 @@
# Inherit common Android Go defaults.
$(call inherit-product, build/make/target/product/go_defaults_common.mk)
-# 512MB specific properties.
-
-# lmkd can kill more now.
-PRODUCT_PROPERTY_OVERRIDES += \
- ro.lmk.medium=700 \
-
-# madvise random in ART to reduce page cache thrashing.
-PRODUCT_PROPERTY_OVERRIDES += \
- dalvik.vm.madvise-random=true
+# Add the system properties.
+TARGET_SYSTEM_PROP += \
+ build/make/target/board/go_defaults_512.prop
diff --git a/target/product/go_defaults_common.mk b/target/product/go_defaults_common.mk
index 7042f6d..d4655f1 100644
--- a/target/product/go_defaults_common.mk
+++ b/target/product/go_defaults_common.mk
@@ -14,21 +14,13 @@
# limitations under the License.
#
-# Sets Android Go recommended default values for propreties.
+# Sets Android Go recommended default product options..
-# Set lowram options
+
+# Set lowram options and enable traced by default
PRODUCT_PROPERTY_OVERRIDES += \
ro.config.low_ram=true \
- ro.lmk.critical_upgrade=true \
- ro.lmk.upgrade_pressure=40 \
- ro.lmk.downgrade_pressure=60 \
- ro.lmk.kill_heaviest_task=false \
- ro.statsd.enable=false
-
-# set threshold to filter unused apps
-PRODUCT_PROPERTY_OVERRIDES += \
- pm.dexopt.downgrade_after_inactive_days=10
-
+ persist.traced.enable=1 \
# Speed profile services and wifi-service to reduce RAM and storage.
PRODUCT_SYSTEM_SERVER_COMPILER_FILTER := speed-profile
@@ -42,27 +34,12 @@
PRODUCT_USE_PROFILE_FOR_BOOT_IMAGE := true
PRODUCT_DEX_PREOPT_BOOT_IMAGE_PROFILE_LOCATION := frameworks/base/config/boot-image-profile.txt
-# set the compiler filter for shared apks to quicken.
-# Rationale: speed has a lot of dex code expansion, it uses more ram and space
-# compared to quicken. Using quicken for shared APKs on Go devices may save RAM.
-# Note that this is a trade-off: here we trade clean pages for dirty pages,
-# extra cpu and battery. That's because the quicken files will be jit-ed in all
-# the processes that load of shared apk and the code cache is not shared.
-# Some notable apps that will be affected by this are gms and chrome.
-# b/65591595.
-PRODUCT_PROPERTY_OVERRIDES += \
- pm.dexopt.shared=quicken
-
-# Default heap sizes. Allow up to 256m for large heaps to make sure a single app
-# doesn't take all of the RAM.
-PRODUCT_PROPERTY_OVERRIDES += dalvik.vm.heapgrowthlimit=128m
-PRODUCT_PROPERTY_OVERRIDES += dalvik.vm.heapsize=256m
-
# Do not generate libartd.
PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD := false
# Do not spin up a separate process for the network stack on go devices, use an in-process APK.
PRODUCT_PACKAGES += InProcessNetworkStack
+PRODUCT_PACKAGES += CellBroadcastAppPlatform
# Strip the local variable table and the local variable type table to reduce
# the size of the system image. This has no bearing on stack traces, but will
@@ -73,3 +50,14 @@
ifneq (,$(filter eng, $(TARGET_BUILD_VARIANT)))
PRODUCT_DISABLE_SCUDO := true
endif
+
+# Add the system properties.
+TARGET_SYSTEM_PROP += \
+ build/make/target/board/go_defaults_common.prop
+
+# use the go specific handheld_core_hardware.xml from frameworks
+PRODUCT_COPY_FILES += \
+ frameworks/native/data/etc/go_handheld_core_hardware.xml:$(TARGET_COPY_OUT_VENDOR)/etc/permissions/handheld_core_hardware.xml
+
+# Dedupe VNDK libraries with identical core variants.
+TARGET_VNDK_USE_CORE_VARIANT := true
diff --git a/target/product/gsi/29.txt b/target/product/gsi/29.txt
new file mode 100644
index 0000000..14faba5
--- /dev/null
+++ b/target/product/gsi/29.txt
@@ -0,0 +1,273 @@
+LLNDK: libEGL.so
+LLNDK: libGLESv1_CM.so
+LLNDK: libGLESv2.so
+LLNDK: libGLESv3.so
+LLNDK: libRS.so
+LLNDK: libandroid_net.so
+LLNDK: libc.so
+LLNDK: libcgrouprc.so
+LLNDK: libdl.so
+LLNDK: libft2.so
+LLNDK: liblog.so
+LLNDK: libm.so
+LLNDK: libmediandk.so
+LLNDK: libnativewindow.so
+LLNDK: libneuralnetworks.so
+LLNDK: libsync.so
+LLNDK: libvndksupport.so
+LLNDK: libvulkan.so
+VNDK-SP: android.hardware.graphics.common@1.0.so
+VNDK-SP: android.hardware.graphics.common@1.1.so
+VNDK-SP: android.hardware.graphics.common@1.2.so
+VNDK-SP: android.hardware.graphics.mapper@2.0.so
+VNDK-SP: android.hardware.graphics.mapper@2.1.so
+VNDK-SP: android.hardware.graphics.mapper@3.0.so
+VNDK-SP: android.hardware.renderscript@1.0.so
+VNDK-SP: android.hidl.memory.token@1.0.so
+VNDK-SP: android.hidl.memory@1.0.so
+VNDK-SP: android.hidl.memory@1.0-impl.so
+VNDK-SP: android.hidl.safe_union@1.0.so
+VNDK-SP: libRSCpuRef.so
+VNDK-SP: libRSDriver.so
+VNDK-SP: libRS_internal.so
+VNDK-SP: libbacktrace.so
+VNDK-SP: libbase.so
+VNDK-SP: libbcinfo.so
+VNDK-SP: libbinderthreadstate.so
+VNDK-SP: libblas.so
+VNDK-SP: libc++.so
+VNDK-SP: libcompiler_rt.so
+VNDK-SP: libcutils.so
+VNDK-SP: libhardware.so
+VNDK-SP: libhidlbase.so
+VNDK-SP: libhidlmemory.so
+VNDK-SP: libhidltransport.so
+VNDK-SP: libhwbinder.so
+VNDK-SP: libhwbinder_noltopgo.so
+VNDK-SP: libion.so
+VNDK-SP: libjsoncpp.so
+VNDK-SP: liblzma.so
+VNDK-SP: libprocessgroup.so
+VNDK-SP: libunwindstack.so
+VNDK-SP: libutils.so
+VNDK-SP: libutilscallstack.so
+VNDK-SP: libz.so
+VNDK-core: android.frameworks.cameraservice.common@2.0.so
+VNDK-core: android.frameworks.cameraservice.device@2.0.so
+VNDK-core: android.frameworks.cameraservice.service@2.0.so
+VNDK-core: android.frameworks.displayservice@1.0.so
+VNDK-core: android.frameworks.schedulerservice@1.0.so
+VNDK-core: android.frameworks.sensorservice@1.0.so
+VNDK-core: android.frameworks.stats@1.0.so
+VNDK-core: android.frameworks.vr.composer@1.0.so
+VNDK-core: android.hardware.atrace@1.0.so
+VNDK-core: android.hardware.audio.common@2.0.so
+VNDK-core: android.hardware.audio.common@4.0.so
+VNDK-core: android.hardware.audio.common@5.0.so
+VNDK-core: android.hardware.audio.effect@2.0.so
+VNDK-core: android.hardware.audio.effect@4.0.so
+VNDK-core: android.hardware.audio.effect@5.0.so
+VNDK-core: android.hardware.audio@2.0.so
+VNDK-core: android.hardware.audio@4.0.so
+VNDK-core: android.hardware.audio@5.0.so
+VNDK-core: android.hardware.authsecret@1.0.so
+VNDK-core: android.hardware.automotive.audiocontrol@1.0.so
+VNDK-core: android.hardware.automotive.evs@1.0.so
+VNDK-core: android.hardware.automotive.vehicle@2.0.so
+VNDK-core: android.hardware.biometrics.face@1.0.so
+VNDK-core: android.hardware.biometrics.fingerprint@2.1.so
+VNDK-core: android.hardware.bluetooth.a2dp@1.0.so
+VNDK-core: android.hardware.bluetooth.audio@2.0.so
+VNDK-core: android.hardware.bluetooth@1.0.so
+VNDK-core: android.hardware.boot@1.0.so
+VNDK-core: android.hardware.broadcastradio@1.0.so
+VNDK-core: android.hardware.broadcastradio@1.1.so
+VNDK-core: android.hardware.broadcastradio@2.0.so
+VNDK-core: android.hardware.camera.common@1.0.so
+VNDK-core: android.hardware.camera.device@1.0.so
+VNDK-core: android.hardware.camera.device@3.2.so
+VNDK-core: android.hardware.camera.device@3.3.so
+VNDK-core: android.hardware.camera.device@3.4.so
+VNDK-core: android.hardware.camera.device@3.5.so
+VNDK-core: android.hardware.camera.metadata@3.2.so
+VNDK-core: android.hardware.camera.metadata@3.3.so
+VNDK-core: android.hardware.camera.metadata@3.4.so
+VNDK-core: android.hardware.camera.provider@2.4.so
+VNDK-core: android.hardware.camera.provider@2.5.so
+VNDK-core: android.hardware.cas.native@1.0.so
+VNDK-core: android.hardware.cas@1.0.so
+VNDK-core: android.hardware.cas@1.1.so
+VNDK-core: android.hardware.configstore-utils.so
+VNDK-core: android.hardware.configstore@1.0.so
+VNDK-core: android.hardware.configstore@1.1.so
+VNDK-core: android.hardware.confirmationui-support-lib.so
+VNDK-core: android.hardware.confirmationui@1.0.so
+VNDK-core: android.hardware.contexthub@1.0.so
+VNDK-core: android.hardware.drm@1.0.so
+VNDK-core: android.hardware.drm@1.1.so
+VNDK-core: android.hardware.drm@1.2.so
+VNDK-core: android.hardware.dumpstate@1.0.so
+VNDK-core: android.hardware.fastboot@1.0.so
+VNDK-core: android.hardware.gatekeeper@1.0.so
+VNDK-core: android.hardware.gnss.measurement_corrections@1.0.so
+VNDK-core: android.hardware.gnss.visibility_control@1.0.so
+VNDK-core: android.hardware.gnss@1.0.so
+VNDK-core: android.hardware.gnss@1.1.so
+VNDK-core: android.hardware.gnss@2.0.so
+VNDK-core: android.hardware.graphics.allocator@2.0.so
+VNDK-core: android.hardware.graphics.allocator@3.0.so
+VNDK-core: android.hardware.graphics.bufferqueue@1.0.so
+VNDK-core: android.hardware.graphics.bufferqueue@2.0.so
+VNDK-core: android.hardware.graphics.composer@2.1.so
+VNDK-core: android.hardware.graphics.composer@2.2.so
+VNDK-core: android.hardware.graphics.composer@2.3.so
+VNDK-core: android.hardware.health.storage@1.0.so
+VNDK-core: android.hardware.health@1.0.so
+VNDK-core: android.hardware.health@2.0.so
+VNDK-core: android.hardware.input.classifier@1.0.so
+VNDK-core: android.hardware.input.common@1.0.so
+VNDK-core: android.hardware.ir@1.0.so
+VNDK-core: android.hardware.keymaster@3.0.so
+VNDK-core: android.hardware.keymaster@4.0.so
+VNDK-core: android.hardware.light@2.0.so
+VNDK-core: android.hardware.media.bufferpool@1.0.so
+VNDK-core: android.hardware.media.bufferpool@2.0.so
+VNDK-core: android.hardware.media.c2@1.0.so
+VNDK-core: android.hardware.media.omx@1.0.so
+VNDK-core: android.hardware.media@1.0.so
+VNDK-core: android.hardware.memtrack@1.0.so
+VNDK-core: android.hardware.neuralnetworks@1.0.so
+VNDK-core: android.hardware.neuralnetworks@1.1.so
+VNDK-core: android.hardware.neuralnetworks@1.2.so
+VNDK-core: android.hardware.nfc@1.0.so
+VNDK-core: android.hardware.nfc@1.1.so
+VNDK-core: android.hardware.nfc@1.2.so
+VNDK-core: android.hardware.oemlock@1.0.so
+VNDK-core: android.hardware.power.stats@1.0.so
+VNDK-core: android.hardware.power@1.0.so
+VNDK-core: android.hardware.power@1.1.so
+VNDK-core: android.hardware.power@1.2.so
+VNDK-core: android.hardware.power@1.3.so
+VNDK-core: android.hardware.radio.config@1.0.so
+VNDK-core: android.hardware.radio.config@1.1.so
+VNDK-core: android.hardware.radio.config@1.2.so
+VNDK-core: android.hardware.radio.deprecated@1.0.so
+VNDK-core: android.hardware.radio@1.0.so
+VNDK-core: android.hardware.radio@1.1.so
+VNDK-core: android.hardware.radio@1.2.so
+VNDK-core: android.hardware.radio@1.3.so
+VNDK-core: android.hardware.radio@1.4.so
+VNDK-core: android.hardware.secure_element@1.0.so
+VNDK-core: android.hardware.secure_element@1.1.so
+VNDK-core: android.hardware.sensors@1.0.so
+VNDK-core: android.hardware.sensors@2.0.so
+VNDK-core: android.hardware.soundtrigger@2.0.so
+VNDK-core: android.hardware.soundtrigger@2.0-core.so
+VNDK-core: android.hardware.soundtrigger@2.1.so
+VNDK-core: android.hardware.soundtrigger@2.2.so
+VNDK-core: android.hardware.tetheroffload.config@1.0.so
+VNDK-core: android.hardware.tetheroffload.control@1.0.so
+VNDK-core: android.hardware.thermal@1.0.so
+VNDK-core: android.hardware.thermal@1.1.so
+VNDK-core: android.hardware.thermal@2.0.so
+VNDK-core: android.hardware.tv.cec@1.0.so
+VNDK-core: android.hardware.tv.cec@2.0.so
+VNDK-core: android.hardware.tv.input@1.0.so
+VNDK-core: android.hardware.usb.gadget@1.0.so
+VNDK-core: android.hardware.usb@1.0.so
+VNDK-core: android.hardware.usb@1.1.so
+VNDK-core: android.hardware.usb@1.2.so
+VNDK-core: android.hardware.vibrator@1.0.so
+VNDK-core: android.hardware.vibrator@1.1.so
+VNDK-core: android.hardware.vibrator@1.2.so
+VNDK-core: android.hardware.vibrator@1.3.so
+VNDK-core: android.hardware.vr@1.0.so
+VNDK-core: android.hardware.weaver@1.0.so
+VNDK-core: android.hardware.wifi.hostapd@1.0.so
+VNDK-core: android.hardware.wifi.hostapd@1.1.so
+VNDK-core: android.hardware.wifi.offload@1.0.so
+VNDK-core: android.hardware.wifi.supplicant@1.0.so
+VNDK-core: android.hardware.wifi.supplicant@1.1.so
+VNDK-core: android.hardware.wifi.supplicant@1.2.so
+VNDK-core: android.hardware.wifi@1.0.so
+VNDK-core: android.hardware.wifi@1.1.so
+VNDK-core: android.hardware.wifi@1.2.so
+VNDK-core: android.hardware.wifi@1.3.so
+VNDK-core: android.hidl.allocator@1.0.so
+VNDK-core: android.hidl.memory.block@1.0.so
+VNDK-core: android.hidl.token@1.0.so
+VNDK-core: android.hidl.token@1.0-utils.so
+VNDK-core: android.system.net.netd@1.0.so
+VNDK-core: android.system.net.netd@1.1.so
+VNDK-core: android.system.suspend@1.0.so
+VNDK-core: android.system.wifi.keystore@1.0.so
+VNDK-core: libadf.so
+VNDK-core: libaudioroute.so
+VNDK-core: libaudioutils.so
+VNDK-core: libbinder.so
+VNDK-core: libcamera_metadata.so
+VNDK-core: libcap.so
+VNDK-core: libcn-cbor.so
+VNDK-core: libcodec2.so
+VNDK-core: libcrypto.so
+VNDK-core: libcrypto_utils.so
+VNDK-core: libcurl.so
+VNDK-core: libdiskconfig.so
+VNDK-core: libdumpstateutil.so
+VNDK-core: libevent.so
+VNDK-core: libexif.so
+VNDK-core: libexpat.so
+VNDK-core: libfmq.so
+VNDK-core: libgatekeeper.so
+VNDK-core: libgui.so
+VNDK-core: libhardware_legacy.so
+VNDK-core: libhidlallocatorutils.so
+VNDK-core: libhidlcache.so
+VNDK-core: libjpeg.so
+VNDK-core: libkeymaster_messages.so
+VNDK-core: libkeymaster_portable.so
+VNDK-core: libldacBT_abr.so
+VNDK-core: libldacBT_enc.so
+VNDK-core: liblz4.so
+VNDK-core: libmedia_helper.so
+VNDK-core: libmedia_omx.so
+VNDK-core: libmemtrack.so
+VNDK-core: libminijail.so
+VNDK-core: libmkbootimg_abi_check.so
+VNDK-core: libnetutils.so
+VNDK-core: libnl.so
+VNDK-core: libpcre2.so
+VNDK-core: libpiex.so
+VNDK-core: libpng.so
+VNDK-core: libpower.so
+VNDK-core: libprocinfo.so
+VNDK-core: libprotobuf-cpp-full.so
+VNDK-core: libprotobuf-cpp-lite.so
+VNDK-core: libpuresoftkeymasterdevice.so
+VNDK-core: libradio_metadata.so
+VNDK-core: libselinux.so
+VNDK-core: libsoftkeymasterdevice.so
+VNDK-core: libspeexresampler.so
+VNDK-core: libsqlite.so
+VNDK-core: libssl.so
+VNDK-core: libstagefright_bufferpool@2.0.so
+VNDK-core: libstagefright_bufferqueue_helper.so
+VNDK-core: libstagefright_foundation.so
+VNDK-core: libstagefright_omx.so
+VNDK-core: libstagefright_omx_utils.so
+VNDK-core: libstagefright_xmlparser.so
+VNDK-core: libsysutils.so
+VNDK-core: libtinyalsa.so
+VNDK-core: libtinyxml2.so
+VNDK-core: libui.so
+VNDK-core: libusbhost.so
+VNDK-core: libwifi-system-iface.so
+VNDK-core: libxml2.so
+VNDK-core: libyuv.so
+VNDK-core: libziparchive.so
+VNDK-private: libbacktrace.so
+VNDK-private: libbinderthreadstate.so
+VNDK-private: libblas.so
+VNDK-private: libcompiler_rt.so
+VNDK-private: libft2.so
+VNDK-private: libgui.so
diff --git a/target/product/gsi/Android.mk b/target/product/gsi/Android.mk
index 5693234..1987c9c 100644
--- a/target/product/gsi/Android.mk
+++ b/target/product/gsi/Android.mk
@@ -38,6 +38,7 @@
droidcore: check-vndk-list
check-vndk-list-timestamp := $(call intermediates-dir-for,PACKAGING,vndk)/check-list-timestamp
+check-vndk-abi-dump-list-timestamp := $(call intermediates-dir-for,PACKAGING,vndk)/check-abi-dump-list-timestamp
ifeq ($(TARGET_IS_64_BIT)|$(TARGET_2ND_ARCH),true|)
# TODO(b/110429754) remove this condition when we support 64-bit-only device
@@ -50,6 +51,9 @@
check-vndk-list: ;
else
check-vndk-list: $(check-vndk-list-timestamp)
+ifneq ($(SKIP_ABI_CHECKS),true)
+check-vndk-list: $(check-vndk-abi-dump-list-timestamp)
+endif
endif
_vndk_check_failure_message := " error: VNDK library list has been changed.\n"
@@ -97,16 +101,57 @@
endif
@chmod a+x $@
+#####################################################################
+# Check that all ABI reference dumps have corresponding NDK/VNDK
+# libraries.
+
+# $(1): The directory containing ABI dumps.
+# Return a list of ABI dump paths ending with .so.lsdump.
+define find-abi-dump-paths
+$(if $(wildcard $(1)), \
+ $(addprefix $(1)/, \
+ $(call find-files-in-subdirs,$(1),"*.so.lsdump" -and -type f,.)))
+endef
+
+VNDK_ABI_DUMP_DIR := prebuilts/abi-dumps/vndk/$(PLATFORM_VNDK_VERSION)
+NDK_ABI_DUMP_DIR := prebuilts/abi-dumps/ndk/$(PLATFORM_VNDK_VERSION)
+VNDK_ABI_DUMPS := $(call find-abi-dump-paths,$(VNDK_ABI_DUMP_DIR))
+NDK_ABI_DUMPS := $(call find-abi-dump-paths,$(NDK_ABI_DUMP_DIR))
+
+$(check-vndk-abi-dump-list-timestamp): $(VNDK_ABI_DUMPS) $(NDK_ABI_DUMPS)
+ $(eval added_vndk_abi_dumps := $(strip $(sort $(filter-out \
+ $(addsuffix .so.lsdump,$(filter-out $(NDK_MIGRATED_LIBS) $(VNDK_PRIVATE_LIBRARIES),$(LLNDK_LIBRARIES) $(VNDK_SAMEPROCESS_LIBRARIES) $(VNDK_CORE_LIBRARIES))), \
+ $(notdir $(VNDK_ABI_DUMPS))))))
+ $(if $(added_vndk_abi_dumps), \
+ echo -e "Found ABI reference dumps for non-VNDK libraries. Run \`find \$${ANDROID_BUILD_TOP}/$(VNDK_ABI_DUMP_DIR) '(' -name $(subst $(space), -or -name ,$(added_vndk_abi_dumps)) ')' -delete\` to delete the dumps.")
+
+ $(eval added_ndk_abi_dumps := $(strip $(sort $(filter-out \
+ $(addsuffix .so.lsdump,$(NDK_MIGRATED_LIBS)), \
+ $(notdir $(NDK_ABI_DUMPS))))))
+ $(if $(added_ndk_abi_dumps), \
+ echo -e "Found ABI reference dumps for non-NDK libraries. Run \`find \$${ANDROID_BUILD_TOP}/$(NDK_ABI_DUMP_DIR) '(' -name $(subst $(space), -or -name ,$(added_ndk_abi_dumps)) ')' -delete\` to delete the dumps.")
+
+ $(if $(added_vndk_abi_dumps)$(added_ndk_abi_dumps),exit 1)
+ $(hide) mkdir -p $(dir $@)
+ $(hide) touch $@
+
+#####################################################################
+# VNDK package and snapshot.
+
ifneq ($(BOARD_VNDK_VERSION),)
include $(CLEAR_VARS)
LOCAL_MODULE := vndk_package
+# Filter LLNDK libs moved to APEX to avoid pulling them into /system/LIB
LOCAL_REQUIRED_MODULES := \
- $(LLNDK_LIBRARIES)
+ $(filter-out $(LLNDK_MOVED_TO_APEX_LIBRARIES),$(LLNDK_LIBRARIES))
+
ifneq ($(TARGET_SKIP_CURRENT_VNDK),true)
LOCAL_REQUIRED_MODULES += \
llndk.libraries.txt \
vndksp.libraries.txt \
+ vndkcore.libraries.txt \
+ vndkprivate.libraries.txt \
$(addsuffix .vendor,$(VNDK_CORE_LIBRARIES)) \
$(addsuffix .vendor,$(VNDK_SAMEPROCESS_LIBRARIES))
endif
@@ -126,3 +171,38 @@
include $(BUILD_PHONY_PACKAGE)
endif # BOARD_VNDK_VERSION is set
+
+#####################################################################
+# skip_mount.cfg, read by init to skip mounting some partitions when GSI is used.
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := gsi_skip_mount.cfg
+LOCAL_MODULE_STEM := skip_mount.cfg
+LOCAL_SRC_FILES := $(LOCAL_MODULE)
+LOCAL_MODULE_CLASS := ETC
+LOCAL_SYSTEM_EXT_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := init/config
+
+# Adds a symlink under /system/etc/init/config pointing to /system/system_ext/etc/init/config
+# because first-stage init in Android 10.0 will read the skip_mount.cfg from /system/etc/* after
+# chroot /system.
+# TODO: remove this symlink when no need to support new GSI on Android 10.
+# The actual file needs to be under /system/system_ext because it's GSI-specific and does not
+# belong to core CSI.
+LOCAL_POST_INSTALL_CMD := \
+ mkdir -p $(TARGET_OUT)/etc/init; \
+ ln -sf /system/system_ext/etc/init/config $(TARGET_OUT)/etc/init/config
+
+include $(BUILD_PREBUILT)
+
+#####################################################################
+# init.gsi.rc, GSI-specific init script.
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := init.gsi.rc
+LOCAL_SRC_FILES := $(LOCAL_MODULE)
+LOCAL_MODULE_CLASS := ETC
+LOCAL_SYSTEM_EXT_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := init
+
+include $(BUILD_PREBUILT)
diff --git a/target/product/gsi/current.txt b/target/product/gsi/current.txt
index e936d45..a11531e 100644
--- a/target/product/gsi/current.txt
+++ b/target/product/gsi/current.txt
@@ -4,6 +4,7 @@
LLNDK: libGLESv3.so
LLNDK: libRS.so
LLNDK: libandroid_net.so
+LLNDK: libbinder_ndk.so
LLNDK: libc.so
LLNDK: libcgrouprc.so
LLNDK: libdl.so
@@ -18,8 +19,10 @@
LLNDK: libvulkan.so
VNDK-SP: android.hardware.graphics.common@1.0.so
VNDK-SP: android.hardware.graphics.common@1.1.so
+VNDK-SP: android.hardware.graphics.common@1.2.so
VNDK-SP: android.hardware.graphics.mapper@2.0.so
VNDK-SP: android.hardware.graphics.mapper@2.1.so
+VNDK-SP: android.hardware.graphics.mapper@3.0.so
VNDK-SP: android.hardware.renderscript@1.0.so
VNDK-SP: android.hidl.memory.token@1.0.so
VNDK-SP: android.hidl.memory@1.0.so
@@ -41,7 +44,6 @@
VNDK-SP: libhidlmemory.so
VNDK-SP: libhidltransport.so
VNDK-SP: libhwbinder.so
-VNDK-SP: libhwbinder_noltopgo.so
VNDK-SP: libion.so
VNDK-SP: libjsoncpp.so
VNDK-SP: liblzma.so
@@ -50,16 +52,17 @@
VNDK-SP: libutils.so
VNDK-SP: libutilscallstack.so
VNDK-SP: libz.so
+VNDK-core: android.frameworks.cameraservice.common@2.0.so
+VNDK-core: android.frameworks.cameraservice.device@2.0.so
+VNDK-core: android.frameworks.cameraservice.service@2.0.so
VNDK-core: android.frameworks.displayservice@1.0.so
VNDK-core: android.frameworks.schedulerservice@1.0.so
VNDK-core: android.frameworks.sensorservice@1.0.so
+VNDK-core: android.frameworks.stats@1.0.so
VNDK-core: android.frameworks.vr.composer@1.0.so
VNDK-core: android.hardware.atrace@1.0.so
-VNDK-core: android.hardware.audio.common-util.so
VNDK-core: android.hardware.audio.common@2.0.so
-VNDK-core: android.hardware.audio.common@2.0-util.so
VNDK-core: android.hardware.audio.common@4.0.so
-VNDK-core: android.hardware.audio.common@4.0-util.so
VNDK-core: android.hardware.audio.common@5.0.so
VNDK-core: android.hardware.audio.effect@2.0.so
VNDK-core: android.hardware.audio.effect@4.0.so
@@ -71,6 +74,7 @@
VNDK-core: android.hardware.automotive.audiocontrol@1.0.so
VNDK-core: android.hardware.automotive.evs@1.0.so
VNDK-core: android.hardware.automotive.vehicle@2.0.so
+VNDK-core: android.hardware.biometrics.face@1.0.so
VNDK-core: android.hardware.biometrics.fingerprint@2.1.so
VNDK-core: android.hardware.bluetooth.a2dp@1.0.so
VNDK-core: android.hardware.bluetooth.audio@2.0.so
@@ -84,11 +88,15 @@
VNDK-core: android.hardware.camera.device@3.2.so
VNDK-core: android.hardware.camera.device@3.3.so
VNDK-core: android.hardware.camera.device@3.4.so
+VNDK-core: android.hardware.camera.device@3.5.so
VNDK-core: android.hardware.camera.metadata@3.2.so
VNDK-core: android.hardware.camera.metadata@3.3.so
+VNDK-core: android.hardware.camera.metadata@3.4.so
VNDK-core: android.hardware.camera.provider@2.4.so
+VNDK-core: android.hardware.camera.provider@2.5.so
VNDK-core: android.hardware.cas.native@1.0.so
VNDK-core: android.hardware.cas@1.0.so
+VNDK-core: android.hardware.cas@1.1.so
VNDK-core: android.hardware.configstore-utils.so
VNDK-core: android.hardware.configstore@1.0.so
VNDK-core: android.hardware.configstore@1.1.so
@@ -97,23 +105,34 @@
VNDK-core: android.hardware.contexthub@1.0.so
VNDK-core: android.hardware.drm@1.0.so
VNDK-core: android.hardware.drm@1.1.so
+VNDK-core: android.hardware.drm@1.2.so
VNDK-core: android.hardware.dumpstate@1.0.so
VNDK-core: android.hardware.fastboot@1.0.so
VNDK-core: android.hardware.gatekeeper@1.0.so
+VNDK-core: android.hardware.gnss.measurement_corrections@1.0.so
+VNDK-core: android.hardware.gnss.visibility_control@1.0.so
VNDK-core: android.hardware.gnss@1.0.so
VNDK-core: android.hardware.gnss@1.1.so
+VNDK-core: android.hardware.gnss@2.0.so
VNDK-core: android.hardware.graphics.allocator@2.0.so
+VNDK-core: android.hardware.graphics.allocator@3.0.so
VNDK-core: android.hardware.graphics.bufferqueue@1.0.so
+VNDK-core: android.hardware.graphics.bufferqueue@2.0.so
VNDK-core: android.hardware.graphics.composer@2.1.so
VNDK-core: android.hardware.graphics.composer@2.2.so
+VNDK-core: android.hardware.graphics.composer@2.3.so
VNDK-core: android.hardware.health.storage@1.0.so
VNDK-core: android.hardware.health@1.0.so
VNDK-core: android.hardware.health@2.0.so
+VNDK-core: android.hardware.input.classifier@1.0.so
+VNDK-core: android.hardware.input.common@1.0.so
VNDK-core: android.hardware.ir@1.0.so
VNDK-core: android.hardware.keymaster@3.0.so
VNDK-core: android.hardware.keymaster@4.0.so
VNDK-core: android.hardware.light@2.0.so
VNDK-core: android.hardware.media.bufferpool@1.0.so
+VNDK-core: android.hardware.media.bufferpool@2.0.so
+VNDK-core: android.hardware.media.c2@1.0.so
VNDK-core: android.hardware.media.omx@1.0.so
VNDK-core: android.hardware.media@1.0.so
VNDK-core: android.hardware.memtrack@1.0.so
@@ -141,30 +160,40 @@
VNDK-core: android.hardware.secure_element@1.0.so
VNDK-core: android.hardware.secure_element@1.1.so
VNDK-core: android.hardware.sensors@1.0.so
+VNDK-core: android.hardware.sensors@2.0.so
VNDK-core: android.hardware.soundtrigger@2.0.so
VNDK-core: android.hardware.soundtrigger@2.0-core.so
VNDK-core: android.hardware.soundtrigger@2.1.so
+VNDK-core: android.hardware.soundtrigger@2.2.so
VNDK-core: android.hardware.tetheroffload.config@1.0.so
VNDK-core: android.hardware.tetheroffload.control@1.0.so
VNDK-core: android.hardware.thermal@1.0.so
VNDK-core: android.hardware.thermal@1.1.so
+VNDK-core: android.hardware.thermal@2.0.so
VNDK-core: android.hardware.tv.cec@1.0.so
+VNDK-core: android.hardware.tv.cec@2.0.so
VNDK-core: android.hardware.tv.input@1.0.so
+VNDK-core: android.hardware.tv.tuner@1.0.so
VNDK-core: android.hardware.usb.gadget@1.0.so
VNDK-core: android.hardware.usb@1.0.so
VNDK-core: android.hardware.usb@1.1.so
+VNDK-core: android.hardware.usb@1.2.so
VNDK-core: android.hardware.vibrator@1.0.so
VNDK-core: android.hardware.vibrator@1.1.so
VNDK-core: android.hardware.vibrator@1.2.so
+VNDK-core: android.hardware.vibrator@1.3.so
VNDK-core: android.hardware.vr@1.0.so
VNDK-core: android.hardware.weaver@1.0.so
VNDK-core: android.hardware.wifi.hostapd@1.0.so
+VNDK-core: android.hardware.wifi.hostapd@1.1.so
VNDK-core: android.hardware.wifi.offload@1.0.so
VNDK-core: android.hardware.wifi.supplicant@1.0.so
VNDK-core: android.hardware.wifi.supplicant@1.1.so
+VNDK-core: android.hardware.wifi.supplicant@1.2.so
VNDK-core: android.hardware.wifi@1.0.so
VNDK-core: android.hardware.wifi@1.1.so
VNDK-core: android.hardware.wifi@1.2.so
+VNDK-core: android.hardware.wifi@1.3.so
VNDK-core: android.hidl.allocator@1.0.so
VNDK-core: android.hidl.memory.block@1.0.so
VNDK-core: android.hidl.token@1.0.so
@@ -181,6 +210,7 @@
VNDK-core: libcamera_metadata.so
VNDK-core: libcap.so
VNDK-core: libcn-cbor.so
+VNDK-core: libcodec2.so
VNDK-core: libcrypto.so
VNDK-core: libcrypto_utils.so
VNDK-core: libcurl.so
@@ -194,7 +224,6 @@
VNDK-core: libgui.so
VNDK-core: libhardware_legacy.so
VNDK-core: libhidlallocatorutils.so
-VNDK-core: libhidlcache.so
VNDK-core: libjpeg.so
VNDK-core: libkeymaster_messages.so
VNDK-core: libkeymaster_portable.so
@@ -208,7 +237,6 @@
VNDK-core: libmkbootimg_abi_check.so
VNDK-core: libnetutils.so
VNDK-core: libnl.so
-VNDK-core: libopus.so
VNDK-core: libpcre2.so
VNDK-core: libpiex.so
VNDK-core: libpng.so
@@ -223,41 +251,17 @@
VNDK-core: libspeexresampler.so
VNDK-core: libsqlite.so
VNDK-core: libssl.so
-VNDK-core: libstagefright_amrnb_common.so
+VNDK-core: libstagefright_bufferpool@2.0.so
VNDK-core: libstagefright_bufferqueue_helper.so
-VNDK-core: libstagefright_enc_common.so
-VNDK-core: libstagefright_flacdec.so
VNDK-core: libstagefright_foundation.so
VNDK-core: libstagefright_omx.so
VNDK-core: libstagefright_omx_utils.so
-VNDK-core: libstagefright_soft_aacdec.so
-VNDK-core: libstagefright_soft_aacenc.so
-VNDK-core: libstagefright_soft_amrdec.so
-VNDK-core: libstagefright_soft_amrnbenc.so
-VNDK-core: libstagefright_soft_amrwbenc.so
-VNDK-core: libstagefright_soft_avcdec.so
-VNDK-core: libstagefright_soft_avcenc.so
-VNDK-core: libstagefright_soft_flacdec.so
-VNDK-core: libstagefright_soft_flacenc.so
-VNDK-core: libstagefright_soft_g711dec.so
-VNDK-core: libstagefright_soft_gsmdec.so
-VNDK-core: libstagefright_soft_hevcdec.so
-VNDK-core: libstagefright_soft_mp3dec.so
-VNDK-core: libstagefright_soft_mpeg2dec.so
-VNDK-core: libstagefright_soft_mpeg4dec.so
-VNDK-core: libstagefright_soft_mpeg4enc.so
-VNDK-core: libstagefright_soft_opusdec.so
-VNDK-core: libstagefright_soft_rawdec.so
-VNDK-core: libstagefright_soft_vorbisdec.so
-VNDK-core: libstagefright_soft_vpxdec.so
-VNDK-core: libstagefright_soft_vpxenc.so
VNDK-core: libstagefright_xmlparser.so
VNDK-core: libsysutils.so
VNDK-core: libtinyalsa.so
VNDK-core: libtinyxml2.so
VNDK-core: libui.so
VNDK-core: libusbhost.so
-VNDK-core: libvorbisidec.so
VNDK-core: libwifi-system-iface.so
VNDK-core: libxml2.so
VNDK-core: libyuv.so
diff --git a/target/product/gsi/gsi_skip_mount.cfg b/target/product/gsi/gsi_skip_mount.cfg
new file mode 100644
index 0000000..3f812cb
--- /dev/null
+++ b/target/product/gsi/gsi_skip_mount.cfg
@@ -0,0 +1,2 @@
+/product
+/system_ext
diff --git a/target/product/gsi/skip_mount.cfg b/target/product/gsi/skip_mount.cfg
deleted file mode 100644
index 549767e..0000000
--- a/target/product/gsi/skip_mount.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-/product
-/product_services
diff --git a/target/product/gsi_arm64.mk b/target/product/gsi_arm64.mk
index b711d88..09fb633 100644
--- a/target/product/gsi_arm64.mk
+++ b/target/product/gsi_arm64.mk
@@ -14,22 +14,28 @@
# limitations under the License.
#
+#
+# All components inherited here go to system image
+#
$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_common.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/mainline_system.mk)
# Enable mainline checking
PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
+
PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
- root/init.zygote32_64.rc \
root/init.zygote64_32.rc \
-# Copy different zygote settings for vendor.img to select by setting property
-# ro.zygote=zygote64_32 or ro.zygote=zygote32_64:
-# 1. 64-bit primary, 32-bit secondary OR
-# 2. 32-bit primary, 64-bit secondary
-# init.zygote64_32.rc is in the core_64_bit.mk below
-PRODUCT_COPY_FILES += \
- system/core/rootdir/init.zygote32_64.rc:root/init.zygote32_64.rc
+#
+# All components inherited here go to product image
+#
+$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
+
+#
+# Special settings for GSI releasing
+#
+$(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_release.mk)
+
PRODUCT_NAME := gsi_arm64
PRODUCT_DEVICE := gsi_arm64
diff --git a/target/product/gsi_common.mk b/target/product/gsi_common.mk
index 7578f92..bfecc95 100644
--- a/target/product/gsi_common.mk
+++ b/target/product/gsi_common.mk
@@ -50,15 +50,10 @@
# Some GSI builds enable dexpreopt, whitelist these preopt files
PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += %.odex %.vdex %.art
-# Exclude GSI specific files
-PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
- system/etc/init/config/skip_mount.cfg \
- system/etc/init/init.gsi.rc \
-
-# Exclude all files under system/product and system/product_services
+# Exclude all files under system/product and system/system_ext
PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
system/product/% \
- system/product_services/%
+ system/system_ext/%
# Split selinux policy
@@ -71,9 +66,9 @@
PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE := true
# GSI specific tasks on boot
-PRODUCT_COPY_FILES += \
- build/make/target/product/gsi/skip_mount.cfg:system/etc/init/config/skip_mount.cfg \
- build/make/target/product/gsi/init.gsi.rc:system/etc/init/init.gsi.rc \
+PRODUCT_PACKAGES += \
+ gsi_skip_mount.cfg \
+ init.gsi.rc
# Support addtional P vendor interface
PRODUCT_EXTRA_VNDK_VERSIONS := 28
diff --git a/target/product/gsi_release.mk b/target/product/gsi_release.mk
new file mode 100644
index 0000000..cab3916
--- /dev/null
+++ b/target/product/gsi_release.mk
@@ -0,0 +1,69 @@
+#
+# Copyright (C) 2019 The Android Open-Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# The makefile contains the special settings for GSI releasing.
+# This makefile is used for the build targets which used for releasing GSI.
+#
+# For example:
+# - Released GSI contains skip_mount.cfg to skip mounting prodcut paritition
+# - Released GSI contains more VNDK packages to support old version vendors
+# - etc.
+#
+
+# Exclude all files under system/product and system/system_ext
+PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
+ system/product/% \
+ system/system_ext/%
+
+
+# GSI doesn't support apex for now.
+# Properties set in product take precedence over those in vendor.
+PRODUCT_PRODUCT_PROPERTIES += \
+ ro.apex.updatable=false
+
+# Split selinux policy
+PRODUCT_FULL_TREBLE_OVERRIDE := true
+
+# Enable dynamic partition size
+PRODUCT_USE_DYNAMIC_PARTITION_SIZE := true
+
+# Needed by Pi newly launched device to pass VtsTrebleSysProp on GSI
+PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE := true
+
+# GSI specific tasks on boot
+PRODUCT_PACKAGES += \
+ gsi_skip_mount.cfg \
+ init.gsi.rc
+
+# Support addtional P and Q VNDK packages
+PRODUCT_EXTRA_VNDK_VERSIONS := 28 29
+
+# The 64 bits GSI build targets inhiert core_64_bit.mk to enable 64 bits and
+# include the init.zygote64_32.rc.
+# 64 bits GSI for releasing need to includes different zygote settings for
+# vendor.img to select by setting property ro.zygote=zygote64_32 or
+# ro.zygote=zygote32_64:
+# 1. 64-bit primary, 32-bit secondary, or
+# 2. 32-bit primary, 64-bit secondary
+# Here includes the init.zygote32_64.rc if it had inhierted core_64_bit.mk.
+ifeq (true|true,$(TARGET_SUPPORTS_32_BIT_APPS)|$(TARGET_SUPPORTS_64_BIT_APPS))
+PRODUCT_COPY_FILES += \
+ system/core/rootdir/init.zygote32_64.rc:root/init.zygote32_64.rc
+
+PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
+ root/init.zygote32_64.rc
+endif
diff --git a/target/product/handheld_product.mk b/target/product/handheld_product.mk
index 758fa9b..54dcaf2 100644
--- a/target/product/handheld_product.mk
+++ b/target/product/handheld_product.mk
@@ -27,7 +27,6 @@
Camera2 \
Contacts \
DeskClock \
- Email \
Gallery2 \
LatinIME \
Launcher3QuickStep \
@@ -40,3 +39,7 @@
StorageManager \
SystemUI \
WallpaperCropper \
+ frameworks-base-overlays
+
+PRODUCT_PACKAGES_DEBUG += \
+ frameworks-base-overlays-debug
diff --git a/target/product/handheld_system.mk b/target/product/handheld_system.mk
index a4dd6d7..6463a54 100644
--- a/target/product/handheld_system.mk
+++ b/target/product/handheld_system.mk
@@ -24,6 +24,7 @@
$(call inherit-product-if-exists, external/google-fonts/carrois-gothic-sc/fonts.mk)
$(call inherit-product-if-exists, external/google-fonts/coming-soon/fonts.mk)
$(call inherit-product-if-exists, external/google-fonts/cutive-mono/fonts.mk)
+$(call inherit-product-if-exists, external/google-fonts/source-sans-pro/fonts.mk)
$(call inherit-product-if-exists, external/noto-fonts/fonts.mk)
$(call inherit-product-if-exists, external/roboto-fonts/fonts.mk)
$(call inherit-product-if-exists, external/hyphenation-patterns/patterns.mk)
@@ -56,6 +57,7 @@
MtpDocumentsProvider \
MusicFX \
NfcNci \
+ OsuLogin \
PacProcessor \
PrintRecommendationService \
PrintSpooler \
diff --git a/target/product/handheld_vendor.mk b/target/product/handheld_vendor.mk
index ca7760a..cb7cf74 100644
--- a/target/product/handheld_vendor.mk
+++ b/target/product/handheld_vendor.mk
@@ -23,10 +23,7 @@
# /vendor packages
PRODUCT_PACKAGES += \
audio.primary.default \
- DisplayCutoutEmulationCornerOverlay \
- DisplayCutoutEmulationDoubleOverlay \
- DisplayCutoutEmulationTallOverlay \
local_time.default \
power.default \
- SysuiDarkThemeOverlay \
vibrator.default \
+
diff --git a/target/product/mainline_arm64.mk b/target/product/mainline_arm64.mk
index c098c9f..6d998d6 100644
--- a/target/product/mainline_arm64.mk
+++ b/target/product/mainline_arm64.mk
@@ -28,46 +28,9 @@
PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
root/init.zygote64_32.rc \
- system/etc/seccomp_policy/crash_dump.arm.policy \
- system/etc/seccomp_policy/mediacodec.policy \
-# Modules that are to be moved to /product
+# Modules that should probably be moved to /product
PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
- system/app/Browser2/Browser2.apk \
- system/app/Calendar/Calendar.apk \
- system/app/Camera2/Camera2.apk \
- system/app/DeskClock/DeskClock.apk \
- system/app/DeskClock/oat/arm64/DeskClock.odex \
- system/app/DeskClock/oat/arm64/DeskClock.vdex \
- system/app/Email/Email.apk \
- system/app/Gallery2/Gallery2.apk \
- system/app/LatinIME/LatinIME.apk \
- system/app/LatinIME/oat/arm64/LatinIME.odex \
- system/app/LatinIME/oat/arm64/LatinIME.vdex \
- system/app/Music/Music.apk \
- system/app/QuickSearchBox/QuickSearchBox.apk \
- system/app/webview/webview.apk \
system/bin/healthd \
system/etc/init/healthd.rc \
system/etc/vintf/manifest/manifest_healthd.xml \
- system/lib64/libjni_eglfence.so \
- system/lib64/libjni_filtershow_filters.so \
- system/lib64/libjni_jpegstream.so \
- system/lib64/libjni_jpegutil.so \
- system/lib64/libjni_latinime.so \
- system/lib64/libjni_tinyplanet.so \
- system/priv-app/CarrierConfig/CarrierConfig.apk \
- system/priv-app/CarrierConfig/oat/arm64/CarrierConfig.odex \
- system/priv-app/CarrierConfig/oat/arm64/CarrierConfig.vdex \
- system/priv-app/Contacts/Contacts.apk \
- system/priv-app/Dialer/Dialer.apk \
- system/priv-app/Launcher3QuickStep/Launcher3QuickStep.apk \
- system/priv-app/OneTimeInitializer/OneTimeInitializer.apk \
- system/priv-app/Provision/Provision.apk \
- system/priv-app/Settings/Settings.apk \
- system/priv-app/SettingsIntelligence/SettingsIntelligence.apk \
- system/priv-app/StorageManager/StorageManager.apk \
- system/priv-app/SystemUI/SystemUI.apk \
- system/priv-app/SystemUI/oat/arm64/SystemUI.odex \
- system/priv-app/SystemUI/oat/arm64/SystemUI.vdex \
- system/priv-app/WallpaperCropper/WallpaperCropper.apk \
diff --git a/target/product/mainline_system.mk b/target/product/mainline_system.mk
index 8bcc212..43bc45f 100644
--- a/target/product/mainline_system.mk
+++ b/target/product/mainline_system.mk
@@ -18,8 +18,6 @@
$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_system.mk)
$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system.mk)
$(call inherit-product, $(SRC_TARGET_DIR)/product/languages_default.mk)
-# Enable updating of APEXes
-#$(call inherit-product, $(SRC_TARGET_DIR)/product/updatable_apex.mk)
# Add adb keys to debuggable AOSP builds (if they exist)
$(call inherit-product-if-exists, vendor/google/security/adb/vendor_key.mk)
@@ -73,6 +71,8 @@
android.hardware.radio@1.0 \
android.hardware.radio@1.1 \
android.hardware.radio@1.2 \
+ android.hardware.radio@1.3 \
+ android.hardware.radio@1.4 \
android.hardware.radio.config@1.0 \
android.hardware.radio.deprecated@1.0 \
android.hardware.secure_element@1.0 \
@@ -85,6 +85,11 @@
libnl \
libprotobuf-cpp-full \
+# Camera service uses 'libdepthphoto' for adding dynamic depth
+# metadata inside depth jpegs.
+PRODUCT_PACKAGES += \
+ libdepthphoto \
+
PRODUCT_PACKAGES_DEBUG += \
avbctl \
bootctl \
@@ -100,6 +105,9 @@
# Enable dynamic partition size
PRODUCT_USE_DYNAMIC_PARTITION_SIZE := true
+PRODUCT_PACKAGES += \
+ com.android.apex.cts.shim.v1_prebuilt
+
PRODUCT_NAME := mainline_system
PRODUCT_BRAND := generic
diff --git a/target/product/mainline_system_arm64.mk b/target/product/mainline_system_arm64.mk
index f01cc54..b9ac1e3 100644
--- a/target/product/mainline_system_arm64.mk
+++ b/target/product/mainline_system_arm64.mk
@@ -21,7 +21,7 @@
PRODUCT_BUILD_CACHE_IMAGE := false
PRODUCT_BUILD_ODM_IMAGE := false
PRODUCT_BUILD_PRODUCT_IMAGE := false
-PRODUCT_BUILD_PRODUCT_SERVICES_IMAGE := false
+PRODUCT_BUILD_SYSTEM_EXT_IMAGE := false
PRODUCT_BUILD_RAMDISK_IMAGE := false
PRODUCT_BUILD_SYSTEM_IMAGE := true
PRODUCT_BUILD_SYSTEM_OTHER_IMAGE := false
diff --git a/target/product/media_system.mk b/target/product/media_system.mk
index 2ba7005..5c0902d 100644
--- a/target/product/media_system.mk
+++ b/target/product/media_system.mk
@@ -31,31 +31,9 @@
fsck.f2fs \
HTMLViewer \
libfilterpack_imageproc \
- libstagefright_soft_aacdec \
- libstagefright_soft_aacenc \
- libstagefright_soft_amrdec \
- libstagefright_soft_amrnbenc \
- libstagefright_soft_amrwbenc \
- libstagefright_soft_avcdec \
- libstagefright_soft_avcenc \
- libstagefright_soft_flacdec \
- libstagefright_soft_flacenc \
- libstagefright_soft_g711dec \
- libstagefright_soft_gsmdec \
- libstagefright_soft_hevcdec \
- libstagefright_soft_mp3dec \
- libstagefright_soft_mpeg2dec \
- libstagefright_soft_mpeg4dec \
- libstagefright_soft_mpeg4enc \
- libstagefright_soft_opusdec \
- libstagefright_soft_rawdec \
- libstagefright_soft_vorbisdec \
- libstagefright_soft_vpxdec \
- libstagefright_soft_vpxenc \
libwebviewchromium_loader \
libwebviewchromium_plat_support \
make_f2fs \
- PackageInstaller \
requestsync \
StatementService \
vndk_snapshot_package \
diff --git a/target/product/profile_boot_common.mk b/target/product/profile_boot_common.mk
index fc19954..a40b3e9 100644
--- a/target/product/profile_boot_common.mk
+++ b/target/product/profile_boot_common.mk
@@ -20,29 +20,11 @@
# Ideally we would just generate an empty boot.art but we don't have the build
# support to separate the image from the compile code.
PRODUCT_DEX_PREOPT_BOOT_IMAGE_PROFILE_LOCATION := build/make/target/product/empty-profile
-PRODUCT_DEX_PREOPT_BOOT_FLAGS := --count-hotness-in-compiled-code
DEX_PREOPT_DEFAULT := nostripping
-# Disable uncompressing priv apps so that there is enough space to build the system partition.
-DONT_UNCOMPRESS_PRIV_APPS_DEXS := true
-
-# Use an empty preloaded-classes list.
-PRODUCT_COPY_FILES += \
- build/make/target/product/empty-preloaded-classes:system/etc/preloaded-classes
-
# Boot image property overrides.
PRODUCT_PROPERTY_OVERRIDES += \
- dalvik.vm.jitinitialsize=32m \
- dalvik.vm.jitmaxsize=32m \
- dalvik.vm.usejitprofiles=true \
- dalvik.vm.hot-startup-method-samples=256 \
dalvik.vm.profilesystemserver=true \
- dalvik.vm.profilebootimage=true
-
-# Use speed compiler filter since system server doesn't have JIT.
-PRODUCT_DEX_PREOPT_BOOT_FLAGS += --compiler-filter=speed
-# System server is speed compiled and doesn't have a separate preopt flag,
-# so we enable hotness in compiled code for everything.
-PRODUCT_DEX_PREOPT_DEFAULT_FLAGS := --count-hotness-in-compiled-code
+ dalvik.vm.profilebootclasspath=true
PRODUCT_DIST_BOOT_AND_SYSTEM_JARS := true
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index 5db32f2..581a72b 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -33,9 +33,12 @@
PRODUCT_PACKAGES += \
ext \
-# Android Runtime APEX module.
+# Runtime (Bionic) APEX module.
PRODUCT_PACKAGES += com.android.runtime
-PRODUCT_HOST_PACKAGES += com.android.runtime
+
+# ART APEX module.
+PRODUCT_PACKAGES += com.android.art
+PRODUCT_HOST_PACKAGES += com.android.art
# Certificates.
PRODUCT_PACKAGES += \
diff --git a/target/product/sdk_phone_arm64.mk b/target/product/sdk_phone_arm64.mk
index 96f0bfd..ad72633 100644
--- a/target/product/sdk_phone_arm64.mk
+++ b/target/product/sdk_phone_arm64.mk
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+QEMU_USE_SYSTEM_EXT_PARTITIONS := true
$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_arm64.mk)
diff --git a/target/product/sdk_phone_armv7.mk b/target/product/sdk_phone_armv7.mk
index 04d8d6a..77b8b50 100644
--- a/target/product/sdk_phone_armv7.mk
+++ b/target/product/sdk_phone_armv7.mk
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+QEMU_USE_SYSTEM_EXT_PARTITIONS := true
$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_arm.mk)
diff --git a/target/product/sdk_phone_x86.mk b/target/product/sdk_phone_x86.mk
index b34e5b6..efb3c6e 100644
--- a/target/product/sdk_phone_x86.mk
+++ b/target/product/sdk_phone_x86.mk
@@ -13,6 +13,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+QEMU_USE_SYSTEM_EXT_PARTITIONS := true
+PRODUCT_USE_DYNAMIC_PARTITIONS := true
+
+#
+# All components inherited here go to system image
+#
+$(call inherit-product, $(SRC_TARGET_DIR)/product/mainline_system.mk)
+
+# Enable mainline checking for excat this product name
+ifeq (sdk_phone_x86,$(TARGET_PRODUCT))
+PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
+endif
+
+#
+# All components inherited here go to product image
+#
+$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
+
+#
+# All components inherited here go to vendor image
+#
+$(call inherit-product-if-exists, device/generic/goldfish/x86-vendor.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/emulator_vendor.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/board/generic_x86/device.mk)
$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_x86.mk)
diff --git a/target/product/sdk_phone_x86_64.mk b/target/product/sdk_phone_x86_64.mk
index 37c078e..267796f 100644
--- a/target/product/sdk_phone_x86_64.mk
+++ b/target/product/sdk_phone_x86_64.mk
@@ -13,8 +13,34 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+QEMU_USE_SYSTEM_EXT_PARTITIONS := true
+PRODUCT_USE_DYNAMIC_PARTITIONS := true
-$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_x86_64.mk)
+#
+# All components inherited here go to system image
+#
+$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/mainline_system.mk)
+
+# Enable mainline checking for excat this product name
+ifeq (sdk_phone_x86_64,$(TARGET_PRODUCT))
+PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
+endif
+
+PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
+ root/init.zygote64_32.rc \
+
+#
+# All components inherited here go to product image
+#
+$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
+
+#
+# All components inherited here go to vendor image
+#
+$(call inherit-product-if-exists, device/generic/goldfish/x86_64-vendor.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/emulator_vendor.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/board/generic_x86_64/device.mk)
# Define the host tools and libs that are parts of the SDK.
-include sdk/build/product_sdk.mk
diff --git a/target/product/telephony_system.mk b/target/product/telephony_system.mk
index 584cf1e..4da9bdf 100644
--- a/target/product/telephony_system.mk
+++ b/target/product/telephony_system.mk
@@ -21,6 +21,6 @@
ONS \
CarrierDefaultApp \
CallLogBackup \
- CellBroadcastReceiver \
+ CellBroadcastAppPlatform \
PRODUCT_COPY_FILES := \
diff --git a/target/product/updatable_apex.mk b/target/product/updatable_apex.mk
index 038f66e..a9f4baf 100644
--- a/target/product/updatable_apex.mk
+++ b/target/product/updatable_apex.mk
@@ -17,5 +17,4 @@
# Inherit this when the target needs to support updating APEXes
PRODUCT_PROPERTY_OVERRIDES := ro.apex.updatable=true
-PRODUCT_PACKAGES := com.android.apex.cts.shim.v1_prebuilt
TARGET_FLATTEN_APEX := false
diff --git a/target/product/virtual_ab_ota.mk b/target/product/virtual_ab_ota.mk
new file mode 100644
index 0000000..c00b0ed
--- /dev/null
+++ b/target/product/virtual_ab_ota.mk
@@ -0,0 +1,19 @@
+#
+# Copyright (C) 2019 The Android Open-Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+PRODUCT_VIRTUAL_AB_OTA := true
+
+PRODUCT_PRODUCT_PROPERTIES += ro.virtual_ab.enabled=true
diff --git a/target/product/virtual_ab_ota_retrofit.mk b/target/product/virtual_ab_ota_retrofit.mk
new file mode 100644
index 0000000..b492fad
--- /dev/null
+++ b/target/product/virtual_ab_ota_retrofit.mk
@@ -0,0 +1,21 @@
+#
+# Copyright (C) 2019 The Android Open-Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota.mk)
+
+PRODUCT_VIRTUAL_AB_OTA_RETROFIT := true
+
+PRODUCT_PRODUCT_PROPERTIES += ro.virtual_ab.retrofit=true
diff --git a/tools/auto_gen_test_config_test.py b/tools/auto_gen_test_config_test.py
index e68c27f..51a8583 100644
--- a/tools/auto_gen_test_config_test.py
+++ b/tools/auto_gen_test_config_test.py
@@ -34,7 +34,7 @@
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="com.android.my.tests.x">
<instrumentation
- android:name="android.support.test.runner.AndroidJUnitRunner"
+ android:name="androidx.test.runner.AndroidJUnitRunner"
android:targetPackage="com.android.my.tests" />
</manifest>
"""
@@ -72,7 +72,7 @@
<test class="com.android.tradefed.testtype.AndroidJUnitTest" >
<option name="package" value="com.android.my.tests.x" />
- <option name="runner" value="android.support.test.runner.AndroidJUnitRunner" />
+ <option name="runner" value="androidx.test.runner.AndroidJUnitRunner" />
</test>
</configuration>
"""
diff --git a/tools/check_builds.sh b/tools/check_builds.sh
deleted file mode 100644
index 7e4ea7c..0000000
--- a/tools/check_builds.sh
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright (C) 2009 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the 'License');
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an 'AS IS' BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Usage:
-#
-# Source this file into your environment. Then:
-#
-# $ golden_builds sdk-sdk generic-eng generic-userdebug dream-eng
-#
-# will build a set of combos. This might take a while. Then you can
-# go make changes, and run:
-#
-# $ check_builds sdk-sdk generic-eng generic-userdebug dream-eng
-#
-# Go get dinner, and when you get back, there will be a file
-# test-builds/sizes.html that has a pretty chart of which files are
-# in which tree, and how big they are. In that chart, cells for files
-# that are missing are red, and rows where the file sizes are not all
-# the same will be blue.
-#
-
-TEST_BUILD_DIR=test-builds
-
-function do_builds
-{
- PREFIX=$1
- shift
- while [ -n "$1" ]
- do
- rm -rf $TEST_BUILD_DIR/$PREFIX-$1
- make PRODUCT-$(echo $1 | sed "s/-.*//" )-installclean
- make -j16 PRODUCT-$1 dist DIST_DIR=$TEST_BUILD_DIR/$PREFIX-$1
- if [ $? -ne 0 ] ; then
- echo FAILED
- return
- fi
- shift
- done
-}
-
-function golden_builds
-{
- rm -rf $TEST_BUILD_DIR/golden-* $TEST_BUILD_DIR/dist-*
- do_builds golden "$@"
-}
-
-function compare_builds
-{
- local inputs=
- while [ -n "$1" ]
- do
- inputs="$inputs $TEST_BUILD_DIR/golden-$1/installed-files.txt"
- inputs="$inputs $TEST_BUILD_DIR/dist-$1/installed-files.txt"
- shift
- done
- build/make/tools/compare_fileslist.py $inputs > $TEST_BUILD_DIR/sizes.html
-}
-
-function check_builds
-{
- rm -rf $TEST_BUILD_DIR/dist-*
- do_builds dist "$@"
- compare_builds "$@"
-}
-
-function diff_builds
-{
- local inputs=
- while [ -n "$1" ]
- do
- diff $TEST_BUILD_DIR/golden-$1/installed-files.txt $TEST_BUILD_DIR/dist-$1/installed-files.txt &> /dev/null
- if [ $? != 0 ]; then
- echo =========== $1 ===========
- diff $TEST_BUILD_DIR/golden-$1/installed-files.txt $TEST_BUILD_DIR/dist-$1/installed-files.txt
- fi
- shift
- done
- build/make/tools/compare_fileslist.py $inputs > $TEST_BUILD_DIR/sizes.html
-}
-
diff --git a/tools/check_link_type.py b/tools/check_link_type.py
deleted file mode 100755
index 40754ad..0000000
--- a/tools/check_link_type.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Utility to verify modules link against acceptable module types"""
-
-from __future__ import print_function
-import argparse
-import os
-import sys
-
-WARNING_MSG = ('\033[1m%(makefile)s: \033[35mwarning:\033[0m\033[1m '
- '%(module)s (%(type)s) should not link to %(dep_name)s (%(dep_type)s)'
- '\033[0m')
-ERROR_MSG = ('\033[1m%(makefile)s: \033[31merror:\033[0m\033[1m '
- '%(module)s (%(type)s) should not link to %(dep_name)s (%(dep_type)s)'
- '\033[0m')
-
-def parse_args():
- """Parse commandline arguments."""
- parser = argparse.ArgumentParser(description='Check link types')
- parser.add_argument('--makefile', help='Makefile defining module')
- parser.add_argument('--module', help='The module being checked')
- parser.add_argument('--type', help='The link type of module')
- parser.add_argument('--allowed', help='Allow deps to use these types',
- action='append', default=[], metavar='TYPE')
- parser.add_argument('--warn', help='Warn if deps use these types',
- action='append', default=[], metavar='TYPE')
- parser.add_argument('deps', help='The dependencies to check',
- metavar='DEP', nargs='*')
- return parser.parse_args()
-
-def print_msg(msg, args, dep_name, dep_type):
- """Print a warning or error message"""
- print(msg % {
- "makefile": args.makefile,
- "module": args.module,
- "type": args.type,
- "dep_name": dep_name,
- "dep_type": dep_type}, file=sys.stderr)
-
-def main():
- """Program entry point."""
- args = parse_args()
-
- failed = False
- for dep in args.deps:
- dep_name = os.path.basename(os.path.dirname(dep))
- if dep_name.endswith('_intermediates'):
- dep_name = dep_name[:len(dep_name)-len('_intermediates')]
-
- with open(dep, 'r') as dep_file:
- dep_types = dep_file.read().strip().split(' ')
-
- for dep_type in dep_types:
- if dep_type in args.allowed:
- continue
- if dep_type in args.warn:
- print_msg(WARNING_MSG, args, dep_name, dep_type)
- else:
- print_msg(ERROR_MSG, args, dep_name, dep_type)
- failed = True
-
- if failed:
- sys.exit(1)
-
-if __name__ == '__main__':
- main()
diff --git a/tools/dump-package-stats b/tools/dump-package-stats
deleted file mode 100755
index 7814368..0000000
--- a/tools/dump-package-stats
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2007 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-PROGNAME=`basename $0`
-
-function fail ()
-{
- if [ ! -z "$@" ]
- then
- echo "$PROGNAME: ERROR: $@" >&2
- fi
- echo "$PROGNAME: ERROR: failed." >&2
- exit 1
-}
-
-function usage ()
-{
- cat << HERE
-usage: $PROGNAME <.jar/.apk-file-list>
- Dumps a summary of the compressed and uncompressed sizes of various
- types of files in each package. Emits one line per package.
- Packages must be zipfiles, readable using "unzip".
-
- Example output line:
-
- filesize=642684 all=603288/919304 dex=119529/353815 name="out/App.apk"
-
- filesize: the size of the package on disk
- name: the name of the package as passed to $PROGNAME
-
- These fields are presented as <uncompressed bytes>/<compressed bytes>:
-
- all: the sum of all entries in the package
- dex: the sum of all "*.dex" entries in the package
-HERE
- exit 1
-}
-
-if [ $# -lt 1 ]
-then
- usage
-fi
-
-UNAME=`uname`
-if [ "x$UNAME" = "xDarwin" ]
-then
- statArgs="-f %z"
-elif [ "x$UNAME" = "xLinux" ]
-then
- statArgs="-c %s"
-else
- fail "Unknown uname $UNAME"
-fi
-
-function printFileSize ()
-{
- stat $statArgs $1
-}
-
-for file
-do
- if [ ! -f "$file" ]
- then
- fail "$file doesn't exist or isn't a file"
- fi
- unzip -lvq "$file" | awk '
- BEGIN {
- total_compressed = 0;
- total_uncompressed = 0;
- dex_compressed = 0;
- dex_uncompressed = 0;
- }
-
- # Make sure the output of unzip -lv looks like something we expect.
- #
- NR == "1" {
- if (NF != "8" ||
- $1 != "Length" ||
- $2 != "Method" ||
- $3 != "Size" ||
- ($4 != "Ratio" && $4 != "Cmpr") ||
- $5 != "Date" ||
- $6 != "Time" ||
- $7 != "CRC-32" ||
- $8 != "Name")
- {
- print "'$PROGNAME': ERROR: Unexpected zip listing format" > \
- "/dev/stderr";
- print "'$PROGNAME': ERROR: Line 2 is \"" $0 "\"" > \
- "/dev/stderr";
- failed = 1;
- exit 1;
- } else {
- saw_listing = 1;
- }
- }
-
- # Only look for lines where the ratio is the fourth column;
- # this filters out the header and footer.
- #
- $4 ~ /%$/ {
- uncompressed = $1;
- compressed = $3;
- if ($0 ~ /.dex$/) {
- dex_compressed += compressed;
- dex_uncompressed += uncompressed;
- }
- total_compressed += compressed;
- total_uncompressed += uncompressed;
- }
- { next }
-
- END {
- if (!failed && saw_listing) {
- print "filesize='$(printFileSize "$file")'",
- "all=" total_compressed "/" total_uncompressed,
- "dex=" dex_compressed "/" dex_uncompressed,
- "name=\"'"$file"'\"";
- } else {
- exit 1;
- }
- }
- '
- if [ $? -ne 0 ]
- then
- fail "Could not get stats for $file"
- fi
-done
diff --git a/tools/extract_kernel.py b/tools/extract_kernel.py
index 16ccb22..42561cf 100755
--- a/tools/extract_kernel.py
+++ b/tools/extract_kernel.py
@@ -47,7 +47,10 @@
null_idx = input_bytes.find('\x00', start_idx)
if null_idx < 0:
return None
- linux_banner = input_bytes[start_idx:null_idx].decode()
+ try:
+ linux_banner = input_bytes[start_idx:null_idx].decode()
+ except UnicodeDecodeError:
+ return None
mo = re.match(LINUX_BANNER_REGEX, linux_banner)
if mo:
return mo.group(1)
diff --git a/tools/fs_config/Android.bp b/tools/fs_config/Android.bp
index d9a48d7..8c69417 100644
--- a/tools/fs_config/Android.bp
+++ b/tools/fs_config/Android.bp
@@ -20,7 +20,7 @@
"soong-genrule",
],
srcs: [
- "fs_config.go"
+ "fs_config.go",
],
pluginFor: ["soong_build"],
}
@@ -56,13 +56,13 @@
export_generated_headers: ["oemaids_header_gen"],
}
-// Generate the vendor/etc/passwd text file for the target
-// This file may be empty if no AIDs are defined in
+// Generate the */etc/passwd text files for the target
+// These files may be empty if no AIDs are defined in
// TARGET_FS_CONFIG_GEN files.
genrule {
- name: "passwd_gen",
+ name: "passwd_gen_system",
tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) passwd --required-prefix=vendor_ --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ cmd: "$(location fs_config_generator.py) passwd --partition=system --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -71,18 +71,90 @@
}
prebuilt_etc {
- name: "passwd",
- vendor: true,
- src: ":passwd_gen",
+ name: "passwd_system",
+ filename: "passwd",
+ src: ":passwd_gen_system",
}
-// Generate the vendor/etc/group text file for the target
-// This file may be empty if no AIDs are defined in
+genrule {
+ name: "passwd_gen_vendor",
+ tool_files: ["fs_config_generator.py"],
+ cmd: "$(location fs_config_generator.py) passwd --partition=vendor --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ srcs: [
+ ":target_fs_config_gen",
+ ":android_filesystem_config_header",
+ ],
+ out: ["passwd"],
+}
+
+prebuilt_etc {
+ name: "passwd_vendor",
+ filename: "passwd",
+ vendor: true,
+ src: ":passwd_gen_vendor",
+}
+
+genrule {
+ name: "passwd_gen_odm",
+ tool_files: ["fs_config_generator.py"],
+ cmd: "$(location fs_config_generator.py) passwd --partition=odm --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ srcs: [
+ ":target_fs_config_gen",
+ ":android_filesystem_config_header",
+ ],
+ out: ["passwd"],
+}
+
+prebuilt_etc {
+ name: "passwd_odm",
+ filename: "passwd",
+ device_specific: true,
+ src: ":passwd_gen_odm",
+}
+
+genrule {
+ name: "passwd_gen_product",
+ tool_files: ["fs_config_generator.py"],
+ cmd: "$(location fs_config_generator.py) passwd --partition=product --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ srcs: [
+ ":target_fs_config_gen",
+ ":android_filesystem_config_header",
+ ],
+ out: ["passwd"],
+}
+
+prebuilt_etc {
+ name: "passwd_product",
+ filename: "passwd",
+ product_specific: true,
+ src: ":passwd_gen_product",
+}
+
+genrule {
+ name: "passwd_gen_system_ext",
+ tool_files: ["fs_config_generator.py"],
+ cmd: "$(location fs_config_generator.py) passwd --partition=system_ext --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ srcs: [
+ ":target_fs_config_gen",
+ ":android_filesystem_config_header",
+ ],
+ out: ["passwd"],
+}
+
+prebuilt_etc {
+ name: "passwd_system_ext",
+ filename: "passwd",
+ system_ext_specific: true,
+ src: ":passwd_gen_system_ext",
+}
+
+// Generate the */etc/group text files for the target
+// These files may be empty if no AIDs are defined in
// TARGET_FS_CONFIG_GEN files.
genrule {
- name: "group_gen",
+ name: "group_gen_system",
tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) group --required-prefix=vendor_ --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ cmd: "$(location fs_config_generator.py) group --partition=system --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -91,7 +163,79 @@
}
prebuilt_etc {
- name: "group",
+ name: "group_system",
+ filename: "group",
+ src: ":group_gen_system",
+}
+
+genrule {
+ name: "group_gen_vendor",
+ tool_files: ["fs_config_generator.py"],
+ cmd: "$(location fs_config_generator.py) group --partition=vendor --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ srcs: [
+ ":target_fs_config_gen",
+ ":android_filesystem_config_header",
+ ],
+ out: ["group"],
+}
+
+prebuilt_etc {
+ name: "group_vendor",
+ filename: "group",
vendor: true,
- src: ":group_gen",
+ src: ":group_gen_vendor",
+}
+
+genrule {
+ name: "group_gen_odm",
+ tool_files: ["fs_config_generator.py"],
+ cmd: "$(location fs_config_generator.py) group --partition=odm --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ srcs: [
+ ":target_fs_config_gen",
+ ":android_filesystem_config_header",
+ ],
+ out: ["group"],
+}
+
+prebuilt_etc {
+ name: "group_odm",
+ filename: "group",
+ device_specific: true,
+ src: ":group_gen_odm",
+}
+
+genrule {
+ name: "group_gen_product",
+ tool_files: ["fs_config_generator.py"],
+ cmd: "$(location fs_config_generator.py) group --partition=product --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ srcs: [
+ ":target_fs_config_gen",
+ ":android_filesystem_config_header",
+ ],
+ out: ["group"],
+}
+
+prebuilt_etc {
+ name: "group_product",
+ filename: "group",
+ product_specific: true,
+ src: ":group_gen_product",
+}
+
+genrule {
+ name: "group_gen_system_ext",
+ tool_files: ["fs_config_generator.py"],
+ cmd: "$(location fs_config_generator.py) group --partition=system_ext --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ srcs: [
+ ":target_fs_config_gen",
+ ":android_filesystem_config_header",
+ ],
+ out: ["group"],
+}
+
+prebuilt_etc {
+ name: "group_system_ext",
+ filename: "group",
+ system_ext_specific: true,
+ src: ":group_gen_system_ext",
}
diff --git a/tools/fs_config/Android.mk b/tools/fs_config/Android.mk
index af0da46..64fabe6 100644
--- a/tools/fs_config/Android.mk
+++ b/tools/fs_config/Android.mk
@@ -27,13 +27,13 @@
system_android_filesystem_config := system/core/include/private/android_filesystem_config.h
system_capability_header := bionic/libc/kernel/uapi/linux/capability.h
-# List of supported vendor, oem, odm, product and product_services Partitions
+# List of supported vendor, oem, odm, product and system_ext Partitions
fs_config_generate_extra_partition_list := $(strip \
$(if $(BOARD_USES_VENDORIMAGE)$(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE),vendor) \
$(if $(BOARD_USES_OEMIMAGE)$(BOARD_OEMIMAGE_FILE_SYSTEM_TYPE),oem) \
$(if $(BOARD_USES_ODMIMAGE)$(BOARD_ODMIMAGE_FILE_SYSTEM_TYPE),odm) \
$(if $(BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE),product) \
- $(if $(BOARD_PRODUCT_SERVICESIMAGE_FILE_SYSTEM_TYPE),product_services) \
+ $(if $(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE),system_ext) \
)
##################################
@@ -332,17 +332,17 @@
$(or $(PRIVATE_TARGET_FS_CONFIG_GEN),/dev/null)
endif
-ifneq ($(filter product_services,$(fs_config_generate_extra_partition_list)),)
+ifneq ($(filter system_ext,$(fs_config_generate_extra_partition_list)),)
##################################
-# Generate the product_services/etc/fs_config_dirs binary file for the target
-# Add fs_config_dirs or fs_config_dirs_product_services to PRODUCT_PACKAGES in
+# Generate the system_ext/etc/fs_config_dirs binary file for the target
+# Add fs_config_dirs or fs_config_dirs_system_ext to PRODUCT_PACKAGES in
# the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_dirs_product_services
+LOCAL_MODULE := fs_config_dirs_system_ext
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_dirs
-LOCAL_MODULE_PATH := $(TARGET_OUT_PRODUCT_SERVICES)/etc
+LOCAL_MODULE_PATH := $(TARGET_OUT_SYSTEM_EXT)/etc
include $(BUILD_SYSTEM)/base_rules.mk
$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(system_android_filesystem_config)
$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(system_capability_header)
@@ -352,21 +352,21 @@
$< fsconfig \
--aid-header $(PRIVATE_ANDROID_FS_HDR) \
--capability-header $(PRIVATE_ANDROID_CAP_HDR) \
- --partition product_services \
+ --partition system_ext \
--dirs \
--out_file $@ \
$(or $(PRIVATE_TARGET_FS_CONFIG_GEN),/dev/null)
##################################
-# Generate the product_services/etc/fs_config_files binary file for the target
-# Add fs_config_files of fs_config_files_product_services to PRODUCT_PACKAGES in
+# Generate the system_ext/etc/fs_config_files binary file for the target
+# Add fs_config_files of fs_config_files_system_ext to PRODUCT_PACKAGES in
# the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_files_product_services
+LOCAL_MODULE := fs_config_files_system_ext
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_files
-LOCAL_MODULE_PATH := $(TARGET_OUT_PRODUCT_SERVICES)/etc
+LOCAL_MODULE_PATH := $(TARGET_OUT_SYSTEM_EXT)/etc
include $(BUILD_SYSTEM)/base_rules.mk
$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(system_android_filesystem_config)
$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(system_capability_header)
@@ -376,7 +376,7 @@
$< fsconfig \
--aid-header $(PRIVATE_ANDROID_FS_HDR) \
--capability-header $(PRIVATE_ANDROID_CAP_HDR) \
- --partition product_services \
+ --partition system_ext \
--files \
--out_file $@ \
$(or $(PRIVATE_TARGET_FS_CONFIG_GEN),/dev/null)
diff --git a/tools/fs_config/fs_config_generator.py b/tools/fs_config/fs_config_generator.py
index 4400466..1405fd3 100755
--- a/tools/fs_config/fs_config_generator.py
+++ b/tools/fs_config/fs_config_generator.py
@@ -312,13 +312,12 @@
re.compile(r'%sUSER' % AID.PREFIX)
]
_AID_DEFINE = re.compile(r'\s*#define\s+%s.*' % AID.PREFIX)
- _OEM_START_KW = 'START'
- _OEM_END_KW = 'END'
- _OEM_RANGE = re.compile('%sOEM_RESERVED_[0-9]*_{0,1}(%s|%s)' %
- (AID.PREFIX, _OEM_START_KW, _OEM_END_KW))
+ _RESERVED_RANGE = re.compile(
+ r'#define AID_(.+)_RESERVED_\d*_*(START|END)\s+(\d+)')
+
# AID lines cannot end with _START or _END, ie AID_FOO is OK
# but AID_FOO_START is skiped. Note that AID_FOOSTART is NOT skipped.
- _AID_SKIP_RANGE = ['_' + _OEM_START_KW, '_' + _OEM_END_KW]
+ _AID_SKIP_RANGE = ['_START', '_END']
_COLLISION_OK = ['AID_APP', 'AID_APP_START', 'AID_USER', 'AID_USER_OFFSET']
def __init__(self, aid_header):
@@ -330,7 +329,7 @@
self._aid_header = aid_header
self._aid_name_to_value = {}
self._aid_value_to_name = {}
- self._oem_ranges = {}
+ self._ranges = {}
with open(aid_header) as open_file:
self._parse(open_file)
@@ -355,6 +354,23 @@
return 'Error "{}" in file: "{}" on line: {}'.format(
msg, self._aid_header, str(lineno))
+ range_match = self._RESERVED_RANGE.match(line)
+ if range_match:
+ partition = range_match.group(1).lower()
+ value = int(range_match.group(3), 0)
+
+ if partition == 'oem':
+ partition = 'vendor'
+
+ if partition in self._ranges:
+ if isinstance(self._ranges[partition][-1], int):
+ self._ranges[partition][-1] = (
+ self._ranges[partition][-1], value)
+ else:
+ self._ranges[partition].append(value)
+ else:
+ self._ranges[partition] = [value]
+
if AIDHeaderParser._AID_DEFINE.match(line):
chunks = line.split()
identifier = chunks[1]
@@ -366,9 +382,7 @@
continue
try:
- if AIDHeaderParser._is_oem_range(identifier):
- self._handle_oem_range(identifier, value)
- elif not any(
+ if not any(
identifier.endswith(x)
for x in AIDHeaderParser._AID_SKIP_RANGE):
self._handle_aid(identifier, value)
@@ -404,67 +418,6 @@
self._aid_name_to_value[aid.friendly] = aid
self._aid_value_to_name[value] = aid.friendly
- def _handle_oem_range(self, identifier, value):
- """Handle an OEM range C #define.
-
- When encountering special AID defines, notably for the OEM ranges
- this method handles sanity checking and adding them to the internal
- maps. For internal use only.
-
- Args:
- identifier (str): The name of the #define identifier.
- ie AID_OEM_RESERVED_START/END.
- value (str): The value associated with the identifier.
-
- Raises:
- ValueError: With message set to indicate the error.
- """
-
- try:
- int_value = int(value, 0)
- except ValueError:
- raise ValueError(
- 'Could not convert "%s" to integer value, got: "%s"' %
- (identifier, value))
-
- # convert AID_OEM_RESERVED_START or AID_OEM_RESERVED_<num>_START
- # to AID_OEM_RESERVED or AID_OEM_RESERVED_<num>
- is_start = identifier.endswith(AIDHeaderParser._OEM_START_KW)
-
- if is_start:
- tostrip = len(AIDHeaderParser._OEM_START_KW)
- else:
- tostrip = len(AIDHeaderParser._OEM_END_KW)
-
- # ending _
- tostrip = tostrip + 1
-
- strip = identifier[:-tostrip]
- if strip not in self._oem_ranges:
- self._oem_ranges[strip] = []
-
- if len(self._oem_ranges[strip]) > 2:
- raise ValueError('Too many same OEM Ranges "%s"' % identifier)
-
- if len(self._oem_ranges[strip]) == 1:
- tmp = self._oem_ranges[strip][0]
-
- if tmp == int_value:
- raise ValueError('START and END values equal %u' % int_value)
- elif is_start and tmp < int_value:
- raise ValueError(
- 'END value %u less than START value %u' % (tmp, int_value))
- elif not is_start and tmp > int_value:
- raise ValueError(
- 'END value %u less than START value %u' % (int_value, tmp))
-
- # Add START values to the head of the list and END values at the end.
- # Thus, the list is ordered with index 0 as START and index 1 as END.
- if is_start:
- self._oem_ranges[strip].insert(0, int_value)
- else:
- self._oem_ranges[strip].append(int_value)
-
def _process_and_check(self):
"""Process, check and populate internal data structures.
@@ -475,36 +428,32 @@
ValueError: With the message set to indicate the specific error.
"""
- # tuplefy the lists since range() does not like them mutable.
- self._oem_ranges = [
- AIDHeaderParser._convert_lst_to_tup(k, v)
- for k, v in self._oem_ranges.iteritems()
- ]
-
# Check for overlapping ranges
- for i, range1 in enumerate(self._oem_ranges):
- for range2 in self._oem_ranges[i + 1:]:
- if AIDHeaderParser._is_overlap(range1, range2):
- raise ValueError("Overlapping OEM Ranges found %s and %s" %
- (str(range1), str(range2)))
+ for ranges in self._ranges.values():
+ for i, range1 in enumerate(ranges):
+ for range2 in ranges[i + 1:]:
+ if AIDHeaderParser._is_overlap(range1, range2):
+ raise ValueError(
+ "Overlapping OEM Ranges found %s and %s" %
+ (str(range1), str(range2)))
# No core AIDs should be within any oem range.
for aid in self._aid_value_to_name:
-
- if Utils.in_any_range(aid, self._oem_ranges):
- name = self._aid_value_to_name[aid]
- raise ValueError(
- 'AID "%s" value: %u within reserved OEM Range: "%s"' %
- (name, aid, str(self._oem_ranges)))
+ for ranges in self._ranges.values():
+ if Utils.in_any_range(aid, ranges):
+ name = self._aid_value_to_name[aid]
+ raise ValueError(
+ 'AID "%s" value: %u within reserved OEM Range: "%s"' %
+ (name, aid, str(ranges)))
@property
- def oem_ranges(self):
+ def ranges(self):
"""Retrieves the OEM closed ranges as a list of tuples.
Returns:
A list of closed range tuples: [ (0, 42), (50, 105) ... ]
"""
- return self._oem_ranges
+ return self._ranges
@property
def aids(self):
@@ -516,39 +465,6 @@
return self._aid_name_to_value.values()
@staticmethod
- def _convert_lst_to_tup(name, lst):
- """Converts a mutable list to a non-mutable tuple.
-
- Used ONLY for ranges and thus enforces a length of 2.
-
- Args:
- lst (List): list that should be "tuplefied".
-
- Raises:
- ValueError if lst is not a list or len is not 2.
-
- Returns:
- Tuple(lst)
- """
- if not lst or len(lst) != 2:
- raise ValueError('Mismatched range for "%s"' % name)
-
- return tuple(lst)
-
- @staticmethod
- def _is_oem_range(aid):
- """Detects if a given aid is within the reserved OEM range.
-
- Args:
- aid (int): The aid to test
-
- Returns:
- True if it is within the range, False otherwise.
- """
-
- return AIDHeaderParser._OEM_RANGE.match(aid)
-
- @staticmethod
def _is_overlap(range_a, range_b):
"""Calculates the overlap of two range tuples.
@@ -588,12 +504,12 @@
_SECTIONS = [('_handle_aid', ('value', )),
('_handle_path', ('mode', 'user', 'group', 'caps'))]
- def __init__(self, config_files, oem_ranges):
+ def __init__(self, config_files, ranges):
"""
Args:
config_files ([str]): The list of config.fs files to parse.
Note the filename is not important.
- oem_ranges ([(),()]): range tuples indicating reserved OEM ranges.
+ ranges ({str,[()]): Dictionary of partitions and a list of tuples that correspond to their ranges
"""
self._files = []
@@ -604,7 +520,7 @@
# (name to file, value to aid)
self._seen_aids = ({}, {})
- self._oem_ranges = oem_ranges
+ self._ranges = ranges
self._config_files = config_files
@@ -669,6 +585,27 @@
# within the generated file.
self._aids.sort(key=lambda item: item.normalized_value)
+ def _verify_valid_range(self, aid):
+ """Verified an AID entry is in a valid range"""
+
+ ranges = None
+
+ partitions = self._ranges.keys()
+ partitions.sort(key=len, reverse=True)
+ for partition in partitions:
+ if aid.friendly.startswith(partition):
+ ranges = self._ranges[partition]
+ break
+
+ if ranges is None:
+ sys.exit('AID "%s" must be prefixed with a partition name' %
+ aid.friendly)
+
+ if not Utils.in_any_range(int(aid.value, 0), ranges):
+ emsg = '"value" for aid "%s" not in valid range %s, got: %s'
+ emsg = emsg % (aid.friendly, str(ranges), aid.value)
+ sys.exit(emsg)
+
def _handle_aid(self, file_name, section_name, config):
"""Verifies an AID entry and adds it to the aid list.
@@ -702,15 +639,11 @@
sys.exit(error_message('Found specified but unset "value"'))
try:
- aid = AID(section_name, value, file_name, '/vendor/bin/sh')
+ aid = AID(section_name, value, file_name, '/bin/sh')
except ValueError as exception:
sys.exit(error_message(exception))
- # Values must be within OEM range
- if not Utils.in_any_range(int(aid.value, 0), self._oem_ranges):
- emsg = '"value" not in valid range %s, got: %s'
- emsg = emsg % (str(self._oem_ranges), value)
- sys.exit(error_message(emsg))
+ self._verify_valid_range(aid)
# use the normalized int value in the dict and detect
# duplicate definitions of the same value
@@ -1000,7 +933,7 @@
args['capability_header'])
self._base_parser = AIDHeaderParser(args['aid_header'])
self._oem_parser = FSConfigFileParser(args['fsconfig'],
- self._base_parser.oem_ranges)
+ self._base_parser.ranges)
self._partition = args['partition']
self._all_partitions = args['all_partitions']
@@ -1104,7 +1037,7 @@
caps_split = caps.split(',')
for cap in caps_split:
if cap not in caps_dict:
- sys.exit('Unkonwn cap "%s" found!' % cap)
+ sys.exit('Unknown cap "%s" found!' % cap)
caps_value += 1 << caps_dict[cap]
path_length_with_null = len(path) + 1
@@ -1133,6 +1066,8 @@
path = fs_config.path
if self._partition == 'system':
+ if not self._all_partitions:
+ return True
for skip_partition in self._all_partitions.split(','):
if path.startswith(skip_partition) or path.startswith(
'system/' + skip_partition):
@@ -1265,7 +1200,7 @@
hdr_parser = AIDHeaderParser(args['aid_header'])
- parser = FSConfigFileParser(args['fsconfig'], hdr_parser.oem_ranges)
+ parser = FSConfigFileParser(args['fsconfig'], hdr_parser.ranges)
print OEMAidGen._GENERATED
@@ -1313,17 +1248,19 @@
'to parse AIDs and OEM Ranges from')
opt_group.add_argument(
- '--required-prefix',
- required=False,
- help='A prefix that the names are required to contain.')
+ '--partition',
+ required=True,
+ help=
+ 'Filter the input file and only output entries for the given partition.'
+ )
def __call__(self, args):
hdr_parser = AIDHeaderParser(args['aid_header'])
- parser = FSConfigFileParser(args['fsconfig'], hdr_parser.oem_ranges)
+ parser = FSConfigFileParser(args['fsconfig'], hdr_parser.ranges)
- required_prefix = args['required_prefix']
+ filter_partition = args['partition']
aids = parser.aids
@@ -1331,13 +1268,22 @@
if not aids:
return
+ aids_by_partition = {}
+ partitions = hdr_parser.ranges.keys()
+ partitions.sort(key=len, reverse=True)
+
for aid in aids:
- if required_prefix is None or aid.friendly.startswith(
- required_prefix):
+ for partition in partitions:
+ if aid.friendly.startswith(partition):
+ if partition in aids_by_partition:
+ aids_by_partition[partition].append(aid)
+ else:
+ aids_by_partition[partition] = [aid]
+ break
+
+ if filter_partition in aids_by_partition:
+ for aid in aids_by_partition[filter_partition]:
self._print_formatted_line(aid)
- else:
- sys.exit("%s: AID '%s' must start with '%s'" %
- (args['fsconfig'], aid.friendly, required_prefix))
def _print_formatted_line(self, aid):
"""Prints the aid to stdout in the passwd format. Internal use only.
diff --git a/tools/generate-self-extracting-archive.py b/tools/generate-self-extracting-archive.py
new file mode 100755
index 0000000..f0b7568
--- /dev/null
+++ b/tools/generate-self-extracting-archive.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generates a self extracting archive with a license click through.
+
+Usage:
+ generate-self-extracting-archive.py $OUTPUT_FILE $INPUT_ARCHIVE $COMMENT $LICENSE_FILE
+
+ The comment will be included at the beginning of the output archive file.
+
+Output:
+ The output of the script is a single executable file that when run will
+ display the provided license and if the user accepts extract the wrapped
+ archive.
+
+ The layout of the output file is roughly:
+ * Executable shell script that extracts the archive
+ * Actual archive contents
+ * Zip file containing the license
+"""
+
+import tempfile
+import sys
+import os
+import zipfile
+
+_HEADER_TEMPLATE = """#!/bin/sh
+#
+{comment_line}
+#
+# Usage is subject to the enclosed license agreement
+
+echo
+echo The license for this software will now be displayed.
+echo You must agree to this license before using this software.
+echo
+echo -n Press Enter to view the license
+read dummy
+echo
+more << EndOfLicense
+{license}
+EndOfLicense
+
+if test $? != 0
+then
+ echo "ERROR: Couldn't display license file" 1>&2
+ exit 1
+fi
+echo
+echo -n 'Type "I ACCEPT" if you agree to the terms of the license: '
+read typed
+if test "$typed" != "I ACCEPT"
+then
+ echo
+ echo "You didn't accept the license. Extraction aborted."
+ exit 2
+fi
+echo
+{extract_command}
+if test $? != 0
+then
+ echo
+ echo "ERROR: Couldn't extract files." 1>&2
+ exit 3
+else
+ echo
+ echo "Files extracted successfully."
+fi
+exit 0
+"""
+
+_PIPE_CHUNK_SIZE = 1048576
+def _pipe_bytes(src, dst):
+ while True:
+ b = src.read(_PIPE_CHUNK_SIZE)
+ if not b:
+ break
+ dst.write(b)
+
+_MAX_OFFSET_WIDTH = 8
+def _generate_extract_command(start, end, extract_name):
+ """Generate the extract command.
+
+ The length of this string must be constant no matter what the start and end
+ offsets are so that its length can be computed before the actual command is
+ generated.
+
+ Args:
+ start: offset in bytes of the start of the wrapped file
+ end: offset in bytes of the end of the wrapped file
+ extract_name: of the file to create when extracted
+
+ """
+ # start gets an extra character for the '+'
+ # for tail +1 is the start of the file, not +0
+ start_str = ('+%d' % (start + 1)).rjust(_MAX_OFFSET_WIDTH + 1)
+ if len(start_str) != _MAX_OFFSET_WIDTH + 1:
+ raise Exception('Start offset too large (%d)' % start)
+
+ end_str = ('%d' % end).rjust(_MAX_OFFSET_WIDTH)
+ if len(end_str) != _MAX_OFFSET_WIDTH:
+ raise Exception('End offset too large (%d)' % end)
+
+ return "tail -c %s $0 | head -c %s > %s\n" % (start_str, end_str, extract_name)
+
+
+def main(argv):
+ output_filename = argv[1]
+ input_archive_filename = argv[2]
+ comment = argv[3]
+ license_filename = argv[4]
+
+ input_archive_size = os.stat(input_archive_filename).st_size
+
+ with open(license_filename, 'r') as license_file:
+ license = license_file.read()
+
+ comment_line = '# %s\n' % comment
+ extract_name = os.path.basename(input_archive_filename)
+
+ # Compute the size of the header before writing the file out. This is required
+ # so that the extract command, which uses the contents offset, can be created
+ # and included inside the header.
+ header_for_size = _HEADER_TEMPLATE.format(
+ comment_line=comment_line,
+ license=license,
+ extract_command=_generate_extract_command(0, 0, extract_name),
+ )
+ header_size = len(header_for_size.encode('utf-8'))
+
+ # write the final output
+ with open(output_filename, 'wb') as output:
+ output.write(_HEADER_TEMPLATE.format(
+ comment_line=comment_line,
+ license=license,
+ extract_command=_generate_extract_command(header_size, input_archive_size, extract_name),
+ ).encode('utf-8'))
+
+ with open(input_archive_filename, 'rb') as input_file:
+ _pipe_bytes(input_file, output)
+
+ with tempfile.TemporaryFile() as trailing_zip:
+ with zipfile.ZipFile(trailing_zip, 'w') as myzip:
+ myzip.writestr('license.txt', license, compress_type=zipfile.ZIP_STORED)
+
+ # append the trailing zip to the end of the file
+ trailing_zip.seek(0)
+ _pipe_bytes(trailing_zip, output)
+
+if __name__ == "__main__":
+ main(sys.argv)
diff --git a/tools/makeparallel/.gitignore b/tools/makeparallel/.gitignore
deleted file mode 100644
index a7d6181..0000000
--- a/tools/makeparallel/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-makeparallel
-*.o
-*.d
-test.out
diff --git a/tools/makeparallel/Android.bp b/tools/makeparallel/Android.bp
deleted file mode 100644
index 898db68..0000000
--- a/tools/makeparallel/Android.bp
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-cc_binary_host {
- name: "makeparallel",
- srcs: [
- "makeparallel.cpp",
- ],
- cflags: ["-Wall", "-Werror"],
-}
diff --git a/tools/makeparallel/Makefile b/tools/makeparallel/Makefile
deleted file mode 100644
index 82a4abf..0000000
--- a/tools/makeparallel/Makefile
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2015 Google Inc. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Find source file location from path to this Makefile
-MAKEPARALLEL_SRC_PATH := $(patsubst %/,%,$(dir $(lastword $(MAKEFILE_LIST))))
-ifndef MAKEPARALLEL_SRC_PATH
- MAKEPARALLEL_SRC_PATH := .
-endif
-
-# Set defaults if they weren't set by the including Makefile
-MAKEPARALLEL_CXX ?= $(CXX)
-MAKEPARALLEL_LD ?= $(CXX)
-MAKEPARALLEL_INTERMEDIATES_PATH ?= .
-MAKEPARALLEL_BIN_PATH ?= .
-
-MAKEPARALLEL_CXX_SRCS := \
- makeparallel.cpp
-
-MAKEPARALLEL_CXXFLAGS := -Wall -Werror -MMD -MP
-
-MAKEPARALLEL_CXX_SRCS := $(addprefix $(MAKEPARALLEL_SRC_PATH)/,\
- $(MAKEPARALLEL_CXX_SRCS))
-
-MAKEPARALLEL_CXX_OBJS := $(patsubst $(MAKEPARALLEL_SRC_PATH)/%.cpp,$(MAKEPARALLEL_INTERMEDIATES_PATH)/%.o,$(MAKEPARALLEL_CXX_SRCS))
-
-MAKEPARALLEL := $(MAKEPARALLEL_BIN_PATH)/makeparallel
-
-ifeq ($(shell uname),Linux)
-MAKEPARALLEL_LIBS := -lrt -lpthread
-endif
-
-# Rule to build makeparallel into MAKEPARALLEL_BIN_PATH
-$(MAKEPARALLEL): $(MAKEPARALLEL_CXX_OBJS)
- @mkdir -p $(dir $@)
- $(MAKEPARALLEL_LD) -std=c++11 $(MAKEPARALLEL_CXXFLAGS) -o $@ $^ $(MAKEPARALLEL_LIBS)
-
-# Rule to build source files into object files in MAKEPARALLEL_INTERMEDIATES_PATH
-$(MAKEPARALLEL_CXX_OBJS): $(MAKEPARALLEL_INTERMEDIATES_PATH)/%.o: $(MAKEPARALLEL_SRC_PATH)/%.cpp
- @mkdir -p $(dir $@)
- $(MAKEPARALLEL_CXX) -c -std=c++11 $(MAKEPARALLEL_CXXFLAGS) -o $@ $<
-
-makeparallel_clean:
- rm -rf $(MAKEPARALLEL)
- rm -rf $(MAKEPARALLEL_INTERMEDIATES_PATH)/*.o
- rm -rf $(MAKEPARALLEL_INTERMEDIATES_PATH)/*.d
-
-.PHONY: makeparallel_clean
-
--include $(MAKEPARALLEL_INTERMEDIATES_PATH)/*.d
-
-.PHONY: makeparallel_test
-MAKEPARALLEL_TEST := MAKEFLAGS= MAKELEVEL= MAKEPARALLEL=$(MAKEPARALLEL) $(MAKE) -f Makefile.test test
-MAKEPARALLEL_NINJA_TEST := MAKEFLAGS= MAKELEVEL= MAKEPARALLEL="$(MAKEPARALLEL) --ninja" $(MAKE) -f Makefile.test test
-makeparallel_test: $(MAKEPARALLEL)
- @EXPECTED="-j1234" $(MAKEPARALLEL_TEST) -j1234
- @EXPECTED="-j123" $(MAKEPARALLEL_TEST) -j123
- @EXPECTED="" $(MAKEPARALLEL_TEST) -j1
- @EXPECTED="-j$$(($$(nproc) + 2))" $(MAKEPARALLEL_TEST) -j
- @EXPECTED="" $(MAKEPARALLEL_TEST)
-
- @EXPECTED="-j1234" $(MAKEPARALLEL_NINJA_TEST) -j1234
- @EXPECTED="-j123" $(MAKEPARALLEL_NINJA_TEST) -j123
- @EXPECTED="-j1" $(MAKEPARALLEL_NINJA_TEST) -j1
- @EXPECTED="-j1" $(MAKEPARALLEL_NINJA_TEST)
- @EXPECTED="" $(MAKEPARALLEL_NINJA_TEST) -j
- @EXPECTED="" $(MAKEPARALLEL_NINJA_TEST) -j -l
-
- @EXPECTED="-j1234" $(MAKEPARALLEL_TEST) --no-print-directory -j1234
- @EXPECTED="-j1234" $(MAKEPARALLEL_TEST) --no-print-directory -k -j1234
- @EXPECTED="-j1234" $(MAKEPARALLEL_TEST) -k -j1234
- @EXPECTED="-j1234" $(MAKEPARALLEL_TEST) -j1234 -k
- @EXPECTED="-j1234" $(MAKEPARALLEL_TEST) -kt -j1234
-
- @EXPECTED="-j1234" $(MAKEPARALLEL_NINJA_TEST) --no-print-directory -j1234
- @EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) --no-print-directory -k -j1234
- @EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) -k -j1234
- @EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) -j1234 -k
- @EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) -kt -j1234
-
- @EXPECTED="" $(MAKEPARALLEL_TEST) A=-j1234
-
- @EXPECTED="-j1234 args" ARGS="args" $(MAKEPARALLEL_TEST) -j1234
diff --git a/tools/makeparallel/Makefile.test b/tools/makeparallel/Makefile.test
deleted file mode 100644
index cf53684..0000000
--- a/tools/makeparallel/Makefile.test
+++ /dev/null
@@ -1,12 +0,0 @@
-MAKEPARALLEL ?= ./makeparallel
-
-.PHONY: test
-test:
- @+echo MAKEFLAGS=$${MAKEFLAGS}; \
- result=$$($(MAKEPARALLEL) echo $(ARGS)); \
- echo result: $${result}; \
- if [ "$${result}" = "$(EXPECTED)" ]; then \
- echo SUCCESS && echo; \
- else \
- echo FAILED expected $(EXPECTED) && false; \
- fi
diff --git a/tools/makeparallel/README.md b/tools/makeparallel/README.md
deleted file mode 100644
index 2e5fbf9..0000000
--- a/tools/makeparallel/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
-<!---
-Copyright (C) 2015 The Android Open Source Project
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-makeparallel
-============
-makeparallel communicates with the [GNU make jobserver](http://make.mad-scientist.net/papers/jobserver-implementation/)
-in order claim all available jobs, and then passes the number of jobs
-claimed to a subprocess with `-j<jobs>`.
-
-The number of available jobs is determined by reading tokens from the jobserver
-until a read would block. If the makeparallel rule is the only one running the
-number of jobs will be the total size of the jobserver pool, i.e. the value
-passed to make with `-j`. Any jobs running in parallel with with the
-makeparellel rule will reduce the measured value, and thus reduce the
-parallelism available to the subprocess.
-
-To run a multi-thread or multi-process binary inside GNU make using
-makeparallel, add
-```Makefile
- +makeparallel subprocess arguments
-```
-to a rule. For example, to wrap ninja in make, use something like:
-```Makefile
- +makeparallel ninja -f build.ninja
-```
-
-To determine the size of the jobserver pool, add
-```Makefile
- +makeparallel echo > make.jobs
-```
-to a rule that is guarantee to run alone (i.e. all other rules are either
-dependencies of the makeparallel rule, or the depend on the makeparallel
-rule. The output file will contain the `-j<num>` flag passed to the parent
-make process, or `-j1` if no flag was found. Since GNU make will run
-makeparallel during the execution phase, after all variables have been
-set and evaluated, it is not possible to get the output of makeparallel
-into a make variable. Instead, use a shell substitution to read the output
-file directly in a recipe. For example:
-```Makefile
- echo Make was started with $$(cat make.jobs)
-```
diff --git a/tools/makeparallel/makeparallel.cpp b/tools/makeparallel/makeparallel.cpp
deleted file mode 100644
index 66babdf..0000000
--- a/tools/makeparallel/makeparallel.cpp
+++ /dev/null
@@ -1,421 +0,0 @@
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// makeparallel communicates with the GNU make jobserver
-// (http://make.mad-scientist.net/papers/jobserver-implementation/)
-// in order claim all available jobs, and then passes the number of jobs
-// claimed to a subprocess with -j<jobs>.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <getopt.h>
-#include <poll.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-
-#include <string>
-#include <vector>
-
-#ifdef __linux__
-#include <error.h>
-#endif
-
-#ifdef __APPLE__
-#include <err.h>
-#define error(code, eval, fmt, ...) errc(eval, code, fmt, ##__VA_ARGS__)
-// Darwin does not interrupt syscalls by default.
-#define TEMP_FAILURE_RETRY(exp) (exp)
-#endif
-
-// Throw an error if fd is not valid.
-static void CheckFd(int fd) {
- int ret = fcntl(fd, F_GETFD);
- if (ret < 0) {
- if (errno == EBADF) {
- error(errno, 0, "no jobserver pipe, prefix recipe command with '+'");
- } else {
- error(errno, errno, "fnctl failed");
- }
- }
-}
-
-// Extract flags from MAKEFLAGS that need to be propagated to subproccess
-static std::vector<std::string> ReadMakeflags() {
- std::vector<std::string> args;
-
- const char* makeflags_env = getenv("MAKEFLAGS");
- if (makeflags_env == nullptr) {
- return args;
- }
-
- // The MAKEFLAGS format is pretty useless. The first argument might be empty
- // (starts with a leading space), or it might be a set of one-character flags
- // merged together with no leading space, or it might be a variable
- // definition.
-
- std::string makeflags = makeflags_env;
-
- // Split makeflags into individual args on spaces. Multiple spaces are
- // elided, but an initial space will result in a blank arg.
- size_t base = 0;
- size_t found;
- do {
- found = makeflags.find_first_of(" ", base);
- args.push_back(makeflags.substr(base, found - base));
- base = found + 1;
- } while (found != makeflags.npos);
-
- // Drop the first argument if it is empty
- while (args.size() > 0 && args[0].size() == 0) {
- args.erase(args.begin());
- }
-
- // Prepend a - to the first argument if it does not have one and is not a
- // variable definition
- if (args.size() > 0 && args[0][0] != '-') {
- if (args[0].find('=') == makeflags.npos) {
- args[0] = '-' + args[0];
- }
- }
-
- return args;
-}
-
-static bool ParseMakeflags(std::vector<std::string>& args,
- int* in_fd, int* out_fd, bool* parallel, bool* keep_going) {
-
- std::vector<char*> getopt_argv;
- // getopt starts reading at argv[1]
- getopt_argv.reserve(args.size() + 1);
- getopt_argv.push_back(strdup(""));
- for (std::string& v : args) {
- getopt_argv.push_back(strdup(v.c_str()));
- }
-
- opterr = 0;
- optind = 1;
- while (1) {
- const static option longopts[] = {
- {"jobserver-fds", required_argument, 0, 0},
- {0, 0, 0, 0},
- };
- int longopt_index = 0;
-
- int c = getopt_long(getopt_argv.size(), getopt_argv.data(), "kj",
- longopts, &longopt_index);
-
- if (c == -1) {
- break;
- }
-
- switch (c) {
- case 0:
- switch (longopt_index) {
- case 0:
- {
- // jobserver-fds
- if (sscanf(optarg, "%d,%d", in_fd, out_fd) != 2) {
- error(EXIT_FAILURE, 0, "incorrect format for --jobserver-fds: %s", optarg);
- }
- // TODO: propagate in_fd, out_fd
- break;
- }
- default:
- abort();
- }
- break;
- case 'j':
- *parallel = true;
- break;
- case 'k':
- *keep_going = true;
- break;
- case '?':
- // ignore unknown arguments
- break;
- default:
- abort();
- }
- }
-
- for (char *v : getopt_argv) {
- free(v);
- }
-
- return true;
-}
-
-// Read a single byte from fd, with timeout in milliseconds. Returns true if
-// a byte was read, false on timeout. Throws away the read value.
-// Non-reentrant, uses timer and signal handler global state, plus static
-// variable to communicate with signal handler.
-//
-// Uses a SIGALRM timer to fire a signal after timeout_ms that will interrupt
-// the read syscall if it hasn't yet completed. If the timer fires before the
-// read the read could block forever, so read from a dup'd fd and close it from
-// the signal handler, which will cause the read to return EBADF if it occurs
-// after the signal.
-// The dup/read/close combo is very similar to the system described to avoid
-// a deadlock between SIGCHLD and read at
-// http://make.mad-scientist.net/papers/jobserver-implementation/
-static bool ReadByteTimeout(int fd, int timeout_ms) {
- // global variable to communicate with the signal handler
- static int dup_fd = -1;
-
- // dup the fd so the signal handler can close it without losing the real one
- dup_fd = dup(fd);
- if (dup_fd < 0) {
- error(errno, errno, "dup failed");
- }
-
- // set up a signal handler that closes dup_fd on SIGALRM
- struct sigaction action = {};
- action.sa_flags = SA_SIGINFO,
- action.sa_sigaction = [](int, siginfo_t*, void*) {
- close(dup_fd);
- };
- struct sigaction oldaction = {};
- int ret = sigaction(SIGALRM, &action, &oldaction);
- if (ret < 0) {
- error(errno, errno, "sigaction failed");
- }
-
- // queue a SIGALRM after timeout_ms
- const struct itimerval timeout = {{}, {0, timeout_ms * 1000}};
- ret = setitimer(ITIMER_REAL, &timeout, NULL);
- if (ret < 0) {
- error(errno, errno, "setitimer failed");
- }
-
- // start the blocking read
- char buf;
- int read_ret = read(dup_fd, &buf, 1);
- int read_errno = errno;
-
- // cancel the alarm in case it hasn't fired yet
- const struct itimerval cancel = {};
- ret = setitimer(ITIMER_REAL, &cancel, NULL);
- if (ret < 0) {
- error(errno, errno, "reset setitimer failed");
- }
-
- // remove the signal handler
- ret = sigaction(SIGALRM, &oldaction, NULL);
- if (ret < 0) {
- error(errno, errno, "reset sigaction failed");
- }
-
- // clean up the dup'd fd in case the signal never fired
- close(dup_fd);
- dup_fd = -1;
-
- if (read_ret == 0) {
- error(EXIT_FAILURE, 0, "EOF on jobserver pipe");
- } else if (read_ret > 0) {
- return true;
- } else if (read_errno == EINTR || read_errno == EBADF) {
- return false;
- } else {
- error(read_errno, read_errno, "read failed");
- }
- abort();
-}
-
-// Measure the size of the jobserver pool by reading from in_fd until it blocks
-static int GetJobserverTokens(int in_fd) {
- int tokens = 0;
- pollfd pollfds[] = {{in_fd, POLLIN, 0}};
- int ret;
- while ((ret = TEMP_FAILURE_RETRY(poll(pollfds, 1, 0))) != 0) {
- if (ret < 0) {
- error(errno, errno, "poll failed");
- } else if (pollfds[0].revents != POLLIN) {
- error(EXIT_FAILURE, 0, "unexpected event %d\n", pollfds[0].revents);
- }
-
- // There is probably a job token in the jobserver pipe. There is a chance
- // another process reads it first, which would cause a blocking read to
- // block forever (or until another process put a token back in the pipe).
- // The file descriptor can't be set to O_NONBLOCK as that would affect
- // all users of the pipe, including the parent make process.
- // ReadByteTimeout emulates a non-blocking read on a !O_NONBLOCK socket
- // using a SIGALRM that fires after a short timeout.
- bool got_token = ReadByteTimeout(in_fd, 10);
- if (!got_token) {
- // No more tokens
- break;
- } else {
- tokens++;
- }
- }
-
- // This process implicitly gets a token, so pool size is measured size + 1
- return tokens;
-}
-
-// Return tokens to the jobserver pool.
-static void PutJobserverTokens(int out_fd, int tokens) {
- // Return all the tokens to the pipe
- char buf = '+';
- for (int i = 0; i < tokens; i++) {
- int ret = TEMP_FAILURE_RETRY(write(out_fd, &buf, 1));
- if (ret < 0) {
- error(errno, errno, "write failed");
- } else if (ret == 0) {
- error(EXIT_FAILURE, 0, "EOF on jobserver pipe");
- }
- }
-}
-
-int main(int argc, char* argv[]) {
- int in_fd = -1;
- int out_fd = -1;
- bool parallel = false;
- bool keep_going = false;
- bool ninja = false;
- int tokens = 0;
-
- if (argc > 1 && strcmp(argv[1], "--ninja") == 0) {
- ninja = true;
- argv++;
- argc--;
- }
-
- if (argc < 2) {
- error(EXIT_FAILURE, 0, "expected command to run");
- }
-
- const char* path = argv[1];
- std::vector<char*> args({argv[1]});
-
- std::vector<std::string> makeflags = ReadMakeflags();
- if (ParseMakeflags(makeflags, &in_fd, &out_fd, ¶llel, &keep_going)) {
- if (in_fd >= 0 && out_fd >= 0) {
- CheckFd(in_fd);
- CheckFd(out_fd);
- fcntl(in_fd, F_SETFD, FD_CLOEXEC);
- fcntl(out_fd, F_SETFD, FD_CLOEXEC);
- tokens = GetJobserverTokens(in_fd);
- }
- }
-
- std::string jarg;
- if (parallel) {
- if (tokens == 0) {
- if (ninja) {
- // ninja is parallel by default
- jarg = "";
- } else {
- // make -j with no argument, guess a reasonable parallelism like ninja does
- jarg = "-j" + std::to_string(sysconf(_SC_NPROCESSORS_ONLN) + 2);
- }
- } else {
- jarg = "-j" + std::to_string(tokens + 1);
- }
- }
-
-
- if (ninja) {
- if (!parallel) {
- // ninja is parallel by default, pass -j1 to disable parallelism if make wasn't parallel
- args.push_back(strdup("-j1"));
- } else {
- if (jarg != "") {
- args.push_back(strdup(jarg.c_str()));
- }
- }
- if (keep_going) {
- args.push_back(strdup("-k0"));
- }
- } else {
- if (jarg != "") {
- args.push_back(strdup(jarg.c_str()));
- }
- }
-
- args.insert(args.end(), &argv[2], &argv[argc]);
-
- args.push_back(nullptr);
-
- static pid_t pid;
-
- // Set up signal handlers to forward SIGTERM to child.
- // Assume that all other signals are sent to the entire process group,
- // and that we'll wait for our child to exit instead of handling them.
- struct sigaction action = {};
- action.sa_flags = SA_RESTART;
- action.sa_handler = [](int signal) {
- if (signal == SIGTERM && pid > 0) {
- kill(pid, signal);
- }
- };
-
- int ret = 0;
- if (!ret) ret = sigaction(SIGHUP, &action, NULL);
- if (!ret) ret = sigaction(SIGINT, &action, NULL);
- if (!ret) ret = sigaction(SIGQUIT, &action, NULL);
- if (!ret) ret = sigaction(SIGTERM, &action, NULL);
- if (!ret) ret = sigaction(SIGALRM, &action, NULL);
- if (ret < 0) {
- error(errno, errno, "sigaction failed");
- }
-
- pid = fork();
- if (pid < 0) {
- error(errno, errno, "fork failed");
- } else if (pid == 0) {
- // child
- unsetenv("MAKEFLAGS");
- unsetenv("MAKELEVEL");
-
- // make 3.81 sets the stack ulimit to unlimited, which may cause problems
- // for child processes
- struct rlimit rlim{};
- if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur == RLIM_INFINITY) {
- rlim.rlim_cur = 8*1024*1024;
- setrlimit(RLIMIT_STACK, &rlim);
- }
-
- int ret = execvp(path, args.data());
- if (ret < 0) {
- error(errno, errno, "exec %s failed", path);
- }
- abort();
- }
-
- // parent
-
- siginfo_t status = {};
- int exit_status = 0;
- ret = waitid(P_PID, pid, &status, WEXITED);
- if (ret < 0) {
- error(errno, errno, "waitpid failed");
- } else if (status.si_code == CLD_EXITED) {
- exit_status = status.si_status;
- } else {
- exit_status = -(status.si_status);
- }
-
- if (tokens > 0) {
- PutJobserverTokens(out_fd, tokens);
- }
- exit(exit_status);
-}
diff --git a/tools/mktarball.sh b/tools/mktarball.sh
deleted file mode 100755
index ced7e17..0000000
--- a/tools/mktarball.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/bin/bash
-
-# $1: path to fs_get_stats program
-# $2: start dir
-# $3: subdir to tar up (from $2)
-# $4: target tar name
-# $5: target tarball name (usually $(3).bz2)
-# $6: TARGET_OUT path to query device specific FS configs
-
-if [ $# -ne 6 ]; then
- echo "Error: wrong number of arguments in cmd: $0 $* "
- exit 1
-fi
-
-fs_get_stats=`readlink -f $1`
-start_dir=`readlink -f $2`
-dir_to_tar=$3
-target_tar=`readlink -f $4`
-target_tarball=`readlink -f $5`
-target_out=`readlink -f $6`
-
-cd $2
-
-#tar --no-recursion -cvf ${target_tar} ${dir_to_tar}
-rm ${target_tar} > /dev/null 2>&1
-
-# do dirs first
-subdirs=`find ${dir_to_tar} -type d -print`
-files=`find ${dir_to_tar} \! -type d -print`
-for f in ${subdirs} ${files} ; do
- curr_perms=`stat -c 0%a $f`
- [ -d "$f" ] && is_dir=1 || is_dir=0
- new_info=`${fs_get_stats} ${curr_perms} ${is_dir} ${f} ${target_out}`
- new_uid=`echo ${new_info} | awk '{print $1;}'`
- new_gid=`echo ${new_info} | awk '{print $2;}'`
- new_perms=`echo ${new_info} | awk '{print $3;}'`
-# echo "$f: dir: $is_dir curr: $curr_perms uid: $new_uid gid: $new_gid "\
-# "perms: $new_perms"
- tar --no-recursion --numeric-owner --owner $new_uid \
- --group $new_gid --mode $new_perms -rf ${target_tar} ${f}
-done
-
-if [ $? -eq 0 ] ; then
- case "${target_tarball}" in
- *.bz2 )
- bzip2 -c ${target_tar} > ${target_tarball}
- ;;
- *.gz )
- gzip -c ${target_tar} > ${target_tarball}
- ;;
- esac
- success=$?
- [ $success -eq 0 ] || rm -f ${target_tarball}
- rm -f ${target_tar}
- exit $success
-fi
-
-rm -f ${target_tar}
-exit 1
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index 8cf3fab..6cde77e 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -12,44 +12,439 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// Module-specific defaults.
+//
+// For module X, if we need to build it both as a library and an executable:
+// - A default rule `releasetools_X_defaults` is created, which lists `srcs`, `libs` and
+// `required` properties.
+// - `python_library_host` and `python_binary_host` are created by listing
+// `releasetools_X_defaults` in their defaults.
+//
+
python_defaults {
- name: "releasetools_test_defaults",
+ name: "releasetools_add_img_to_target_files_defaults",
+ srcs: [
+ "add_img_to_target_files.py",
+ ],
+ libs: [
+ "releasetools_build_image",
+ "releasetools_build_super_image",
+ "releasetools_common",
+ ],
+ required: [
+ "care_map_generator",
+ ],
+}
+
+python_defaults {
+ name: "releasetools_build_image_defaults",
+ srcs: [
+ "build_image.py",
+ ],
+ libs: [
+ "releasetools_common",
+ "releasetools_verity_utils",
+ ],
+ required: [
+ "blk_alloc_to_base_fs",
+ "e2fsck",
+ "simg2img",
+ "tune2fs",
+ ],
+}
+
+python_defaults {
+ name: "releasetools_build_super_image_defaults",
+ srcs: [
+ "build_super_image.py",
+ ],
+ libs: [
+ "releasetools_common",
+ ],
+}
+
+python_defaults {
+ name: "releasetools_img_from_target_files_defaults",
+ srcs: [
+ "img_from_target_files.py",
+ ],
+ libs: [
+ "releasetools_build_super_image",
+ "releasetools_common",
+ ],
+ required: [
+ "zip2zip",
+ ],
+}
+
+python_defaults {
+ name: "releasetools_check_target_files_vintf_defaults",
+ srcs: [
+ "check_target_files_vintf.py",
+ ],
+ libs: [
+ "releasetools_common",
+ ],
+ required: [
+ "checkvintf",
+ ],
+}
+
+python_defaults {
+ name: "releasetools_ota_from_target_files_defaults",
+ srcs: [
+ "edify_generator.py",
+ "ota_from_target_files.py",
+ "target_files_diff.py",
+ ],
+ libs: [
+ "releasetools_check_target_files_vintf",
+ "releasetools_common",
+ "releasetools_verity_utils",
+ ],
+ required: [
+ "brillo_update_payload",
+ "checkvintf",
+ ],
+}
+
+//
+// Host libraries.
+//
+
+python_defaults {
+ name: "releasetools_library_defaults",
version: {
py2: {
enabled: true,
- embedded_launcher: false,
},
py3: {
- enabled: false,
+ enabled: true,
},
},
}
python_library_host {
- name: "releasetools_lib",
- defaults: ["releasetools_test_defaults"],
+ name: "releasetools_add_img_to_target_files",
+ defaults: [
+ "releasetools_library_defaults",
+ "releasetools_add_img_to_target_files_defaults",
+ ],
+}
+
+python_library_host {
+ name: "releasetools_apex_utils",
+ defaults: ["releasetools_library_defaults"],
srcs: [
- "add_img_to_target_files.py",
"apex_utils.py",
+ ],
+ libs: [
+ "releasetools_common",
+ ],
+}
+
+python_library_host {
+ name: "releasetools_build_image",
+ defaults: [
+ "releasetools_library_defaults",
+ "releasetools_build_image_defaults",
+ ],
+}
+
+python_library_host {
+ name: "releasetools_build_super_image",
+ defaults: [
+ "releasetools_library_defaults",
+ "releasetools_build_super_image_defaults",
+ ],
+}
+
+python_library_host {
+ name: "releasetools_check_target_files_vintf",
+ defaults: [
+ "releasetools_library_defaults",
+ "releasetools_check_target_files_vintf_defaults",
+ ],
+}
+
+python_library_host {
+ name: "releasetools_common",
+ defaults: ["releasetools_library_defaults"],
+ srcs: [
"blockimgdiff.py",
- "build_image.py",
- "build_super_image.py",
- "check_ota_package_signature.py",
- "check_target_files_signatures.py",
"common.py",
- "edify_generator.py",
- "img_from_target_files.py",
+ "images.py",
+ "rangelib.py",
+ "sparse_img.py",
+ ],
+ // Only the tools that are referenced directly are listed as required modules. For example,
+ // `avbtool` is not here, as the script always uses the one from info_dict['avb_avbtool'].
+ required: [
+ "aapt2",
+ "boot_signer",
+ "brotli",
+ "bsdiff",
+ "imgdiff",
+ "minigzip",
+ "mkbootfs",
+ ],
+}
+
+python_library_host {
+ name: "releasetools_img_from_target_files",
+ defaults: [
+ "releasetools_library_defaults",
+ "releasetools_img_from_target_files_defaults",
+ ],
+}
+
+python_library_host {
+ name: "releasetools_ota_from_target_files",
+ defaults: [
+ "releasetools_library_defaults",
+ "releasetools_ota_from_target_files_defaults",
+ ],
+}
+
+python_library_host {
+ name: "releasetools_verity_utils",
+ defaults: ["releasetools_library_defaults"],
+ srcs: [
+ "verity_utils.py",
+ ],
+ required: [
+ "append2simg",
+ "build_verity_metadata",
+ "build_verity_tree",
+ "fec",
+ ],
+}
+
+//
+// Host binaries.
+//
+
+python_defaults {
+ name: "releasetools_binary_defaults",
+ version: {
+ py2: {
+ enabled: true,
+ embedded_launcher: true,
+ },
+ py3: {
+ enabled: false,
+ embedded_launcher: false,
+ },
+ },
+}
+
+python_binary_host {
+ name: "add_img_to_target_files",
+ defaults: [
+ "releasetools_binary_defaults",
+ "releasetools_add_img_to_target_files_defaults",
+ ],
+}
+
+python_binary_host {
+ name: "build_image",
+ defaults: [
+ "releasetools_binary_defaults",
+ "releasetools_build_image_defaults",
+ ],
+}
+
+python_binary_host {
+ name: "build_super_image",
+ defaults: [
+ "releasetools_binary_defaults",
+ "releasetools_build_super_image_defaults",
+ ],
+}
+
+python_binary_host {
+ name: "check_ota_package_signature",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
+ "check_ota_package_signature.py",
+ ],
+ libs: [
+ "releasetools_common",
+ ],
+ required: [
+ "delta_generator",
+ ],
+}
+
+python_binary_host {
+ name: "check_target_files_signatures",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
+ "check_target_files_signatures.py",
+ ],
+ libs: [
+ "releasetools_common",
+ ],
+ required: [
+ "aapt",
+ ],
+}
+
+python_binary_host {
+ name: "check_target_files_vintf",
+ defaults: [
+ "releasetools_binary_defaults",
+ "releasetools_check_target_files_vintf_defaults"
+ ],
+}
+
+python_binary_host {
+ name: "img_from_target_files",
+ defaults: [
+ "releasetools_binary_defaults",
+ "releasetools_img_from_target_files_defaults",
+ ],
+}
+
+python_binary_host {
+ name: "make_recovery_patch",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
"make_recovery_patch.py",
+ ],
+ libs: [
+ "releasetools_common",
+ ],
+}
+
+python_binary_host {
+ name: "merge_builds",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
+ "merge_builds.py",
+ ],
+ libs: [
+ "releasetools_build_super_image",
+ "releasetools_common",
+ ],
+}
+
+python_binary_host {
+ name: "merge_target_files",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
"merge_target_files.py",
- "ota_from_target_files.py",
+ ],
+ libs: [
+ "releasetools_add_img_to_target_files",
+ "releasetools_build_super_image",
+ "releasetools_check_target_files_vintf",
+ "releasetools_common",
+ "releasetools_img_from_target_files",
+ "releasetools_ota_from_target_files",
+ ],
+ required: [
+ "checkvintf",
+ ],
+}
+
+python_binary_host {
+ name: "ota_from_target_files",
+ defaults: [
+ "releasetools_binary_defaults",
+ "releasetools_ota_from_target_files_defaults",
+ ],
+}
+
+python_binary_host {
+ name: "ota_package_parser",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
"ota_package_parser.py",
"rangelib.py",
+ ],
+}
+
+python_binary_host {
+ name: "sparse_img",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
+ "rangelib.py",
+ "sparse_img.py",
+ ],
+}
+
+python_binary_host {
+ name: "sign_apex",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
+ "sign_apex.py",
+ ],
+ libs: [
+ "releasetools_apex_utils",
+ "releasetools_common",
+ ],
+}
+
+python_binary_host {
+ name: "sign_target_files_apks",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
+ "sign_target_files_apks.py",
+ ],
+ libs: [
+ "releasetools_add_img_to_target_files",
+ "releasetools_apex_utils",
+ "releasetools_common",
+ ],
+}
+
+python_binary_host {
+ name: "validate_target_files",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
+ "validate_target_files.py",
+ ],
+ libs: [
+ "releasetools_common",
+ ],
+}
+
+//
+// Tests.
+//
+
+python_defaults {
+ name: "releasetools_test_defaults",
+ srcs: [
+ "check_ota_package_signature.py",
+ "check_target_files_signatures.py",
+ "make_recovery_patch.py",
+ "merge_target_files.py",
+ "ota_package_parser.py",
"sign_apex.py",
"sign_target_files_apks.py",
- "sparse_img.py",
- "target_files_diff.py",
"validate_target_files.py",
- "verity_utils.py",
+
+ "test_*.py",
+ ],
+ libs: [
+ "releasetools_add_img_to_target_files",
+ "releasetools_apex_utils",
+ "releasetools_build_image",
+ "releasetools_build_super_image",
+ "releasetools_check_target_files_vintf",
+ "releasetools_common",
+ "releasetools_img_from_target_files",
+ "releasetools_ota_from_target_files",
+ "releasetools_verity_utils",
+ ],
+ data: [
+ "testdata/**/*",
+ ],
+ required: [
+ "otatools",
],
}
@@ -57,17 +452,35 @@
name: "releasetools_test",
defaults: ["releasetools_test_defaults"],
main: "test_utils.py",
- srcs: [
- "test_*.py",
- ],
- libs: [
- "releasetools_lib",
- ],
- data: [
- "testdata/*",
- ],
- required: [
- "otatools",
- ],
+ version: {
+ py2: {
+ enabled: true,
+ // When using embedded launcher, atest will try (but may fail) to load libc++.so from
+ // host, because the test executable won't be able to find the needed libs via its
+ // runpath.
+ embedded_launcher: false,
+ },
+ py3: {
+ enabled: false,
+ embedded_launcher: false,
+ },
+ },
+ test_suites: ["general-tests"],
+}
+
+python_test_host {
+ name: "releasetools_py3_test",
+ defaults: ["releasetools_test_defaults"],
+ main: "test_utils.py",
+ version: {
+ py2: {
+ enabled: false,
+ embedded_launcher: false,
+ },
+ py3: {
+ enabled: true,
+ embedded_launcher: false,
+ },
+ },
test_suites: ["general-tests"],
}
diff --git a/tools/releasetools/TEST_MAPPING b/tools/releasetools/TEST_MAPPING
index 77cef07..0af0f04 100644
--- a/tools/releasetools/TEST_MAPPING
+++ b/tools/releasetools/TEST_MAPPING
@@ -3,6 +3,10 @@
{
"name": "releasetools_test",
"host": true
+ },
+ {
+ "name": "releasetools_py3_test",
+ "host": true
}
]
}
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 98386a6..23ae29f 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -222,20 +222,20 @@
return img.name
-def AddProductServices(output_zip):
- """Turn the contents of PRODUCT_SERVICES into a product_services image and
- store it in output_zip."""
+def AddSystemExt(output_zip):
+ """Turn the contents of SYSTEM_EXT into a system_ext image and store it in
+ output_zip."""
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES",
- "product_services.img")
+ "system_ext.img")
if os.path.exists(img.name):
- logger.info("product_services.img already exists; no need to rebuild...")
+ logger.info("system_ext.img already exists; no need to rebuild...")
return img.name
block_list = OutputFile(
- output_zip, OPTIONS.input_tmp, "IMAGES", "product_services.map")
+ output_zip, OPTIONS.input_tmp, "IMAGES", "system_ext.map")
CreateImage(
- OPTIONS.input_tmp, OPTIONS.info_dict, "product_services", img,
+ OPTIONS.input_tmp, OPTIONS.info_dict, "system_ext", img,
block_list=block_list)
return img.name
@@ -391,28 +391,6 @@
img.Write()
-def AppendVBMetaArgsForPartition(cmd, partition, image):
- """Appends the VBMeta arguments for partition.
-
- It sets up the VBMeta argument by including the partition descriptor from the
- given 'image', or by configuring the partition as a chained partition.
-
- Args:
- cmd: A list of command args that will be used to generate the vbmeta image.
- The argument for the partition will be appended to the list.
- partition: The name of the partition (e.g. "system").
- image: The path to the partition image.
- """
- # Check if chain partition is used.
- key_path = OPTIONS.info_dict.get("avb_" + partition + "_key_path")
- if key_path:
- chained_partition_arg = common.GetAvbChainedPartitionArg(
- partition, OPTIONS.info_dict)
- cmd.extend(["--chain_partition", chained_partition_arg])
- else:
- cmd.extend(["--include_descriptors_from_image", image])
-
-
def AddVBMeta(output_zip, partitions, name, needed_partitions):
"""Creates a VBMeta image and stores it in output_zip.
@@ -442,45 +420,7 @@
logger.info("%s.img already exists; not rebuilding...", name)
return img.name
- avbtool = OPTIONS.info_dict["avb_avbtool"]
- cmd = [avbtool, "make_vbmeta_image", "--output", img.name]
- common.AppendAVBSigningArgs(cmd, name)
-
- for partition, path in partitions.items():
- if partition not in needed_partitions:
- continue
- assert (partition in common.AVB_PARTITIONS or
- partition in common.AVB_VBMETA_PARTITIONS), \
- 'Unknown partition: {}'.format(partition)
- assert os.path.exists(path), \
- 'Failed to find {} for {}'.format(path, partition)
- AppendVBMetaArgsForPartition(cmd, partition, path)
-
- args = OPTIONS.info_dict.get("avb_{}_args".format(name))
- if args and args.strip():
- split_args = shlex.split(args)
- for index, arg in enumerate(split_args[:-1]):
- # Sanity check that the image file exists. Some images might be defined
- # as a path relative to source tree, which may not be available at the
- # same location when running this script (we have the input target_files
- # zip only). For such cases, we additionally scan other locations (e.g.
- # IMAGES/, RADIO/, etc) before bailing out.
- if arg == '--include_descriptors_from_image':
- image_path = split_args[index + 1]
- if os.path.exists(image_path):
- continue
- found = False
- for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']:
- alt_path = os.path.join(
- OPTIONS.input_tmp, dir_name, os.path.basename(image_path))
- if os.path.exists(alt_path):
- split_args[index + 1] = alt_path
- found = True
- break
- assert found, 'Failed to find {}'.format(image_path)
- cmd.extend(split_args)
-
- common.RunAndCheckOutput(cmd)
+ common.BuildVBMeta(img.name, partitions, name, needed_partitions)
img.Write()
return img.name
@@ -734,10 +674,10 @@
has_recovery = OPTIONS.info_dict.get("no_recovery") != "true"
has_boot = OPTIONS.info_dict.get("no_boot") != "true"
- # {vendor,odm,product,product_services}.img are unlike system.img or
+ # {vendor,odm,product,system_ext}.img are unlike system.img or
# system_other.img. Because it could be built from source, or dropped into
# target_files.zip as a prebuilt blob. We consider either of them as
- # {vendor,product,product_services}.img being available, which could be
+ # {vendor,product,system_ext}.img being available, which could be
# used when generating vbmeta.img for AVB.
has_vendor = (os.path.isdir(os.path.join(OPTIONS.input_tmp, "VENDOR")) or
os.path.exists(os.path.join(OPTIONS.input_tmp, "IMAGES",
@@ -748,11 +688,11 @@
has_product = (os.path.isdir(os.path.join(OPTIONS.input_tmp, "PRODUCT")) or
os.path.exists(os.path.join(OPTIONS.input_tmp, "IMAGES",
"product.img")))
- has_product_services = (os.path.isdir(os.path.join(OPTIONS.input_tmp,
- "PRODUCT_SERVICES")) or
- os.path.exists(os.path.join(OPTIONS.input_tmp,
- "IMAGES",
- "product_services.img")))
+ has_system_ext = (os.path.isdir(os.path.join(OPTIONS.input_tmp,
+ "SYSTEM_EXT")) or
+ os.path.exists(os.path.join(OPTIONS.input_tmp,
+ "IMAGES",
+ "system_ext.img")))
has_system = os.path.isdir(os.path.join(OPTIONS.input_tmp, "SYSTEM"))
has_system_other = os.path.isdir(os.path.join(OPTIONS.input_tmp,
"SYSTEM_OTHER"))
@@ -810,11 +750,11 @@
banner("recovery (two-step image)")
# The special recovery.img for two-step package use.
recovery_two_step_image = common.GetBootableImage(
- "IMAGES/recovery-two-step.img", "recovery-two-step.img",
+ "OTA/recovery-two-step.img", "recovery-two-step.img",
OPTIONS.input_tmp, "RECOVERY", two_step_image=True)
assert recovery_two_step_image, "Failed to create recovery-two-step.img."
recovery_two_step_image_path = os.path.join(
- OPTIONS.input_tmp, "IMAGES", "recovery-two-step.img")
+ OPTIONS.input_tmp, "OTA", "recovery-two-step.img")
if not os.path.exists(recovery_two_step_image_path):
recovery_two_step_image.WriteToDir(OPTIONS.input_tmp)
if output_zip:
@@ -833,9 +773,9 @@
banner("product")
partitions['product'] = AddProduct(output_zip)
- if has_product_services:
- banner("product_services")
- partitions['product_services'] = AddProductServices(output_zip)
+ if has_system_ext:
+ banner("system_ext")
+ partitions['system_ext'] = AddSystemExt(output_zip)
if has_odm:
banner("odm")
diff --git a/tools/releasetools/apex_utils.py b/tools/releasetools/apex_utils.py
index 6eaa12f..18ad8ce 100644
--- a/tools/releasetools/apex_utils.py
+++ b/tools/releasetools/apex_utils.py
@@ -177,6 +177,7 @@
payload_dir = common.MakeTempDir(prefix='apex-payload-')
with zipfile.ZipFile(apex_file) as apex_fd:
payload_file = apex_fd.extract(APEX_PAYLOAD_IMAGE, payload_dir)
+ zip_items = apex_fd.namelist()
payload_info = ParseApexPayloadInfo(avbtool, payload_file)
SignApexPayload(
@@ -192,7 +193,8 @@
payload_public_key = common.ExtractAvbPublicKey(avbtool, payload_key)
common.ZipDelete(apex_file, APEX_PAYLOAD_IMAGE)
- common.ZipDelete(apex_file, APEX_PUBKEY)
+ if APEX_PUBKEY in zip_items:
+ common.ZipDelete(apex_file, APEX_PUBKEY)
apex_zip = zipfile.ZipFile(apex_file, 'a')
common.ZipWrite(apex_zip, payload_file, arcname=APEX_PAYLOAD_IMAGE)
common.ZipWrite(apex_zip, payload_public_key, arcname=APEX_PUBKEY)
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index ecb1d31..72f065d 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -28,12 +28,12 @@
import threading
import zlib
from collections import deque, namedtuple, OrderedDict
-from hashlib import sha1
import common
+from images import EmptyImage
from rangelib import RangeSet
-__all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
+__all__ = ["BlockImageDiff"]
logger = logging.getLogger(__name__)
@@ -60,209 +60,6 @@
return PatchInfo(imgdiff, f.read())
-class Image(object):
- def RangeSha1(self, ranges):
- raise NotImplementedError
-
- def ReadRangeSet(self, ranges):
- raise NotImplementedError
-
- def TotalSha1(self, include_clobbered_blocks=False):
- raise NotImplementedError
-
- def WriteRangeDataToFd(self, ranges, fd):
- raise NotImplementedError
-
-
-class EmptyImage(Image):
- """A zero-length image."""
-
- def __init__(self):
- self.blocksize = 4096
- self.care_map = RangeSet()
- self.clobbered_blocks = RangeSet()
- self.extended = RangeSet()
- self.total_blocks = 0
- self.file_map = {}
- self.hashtree_info = None
-
- def RangeSha1(self, ranges):
- return sha1().hexdigest()
-
- def ReadRangeSet(self, ranges):
- return ()
-
- def TotalSha1(self, include_clobbered_blocks=False):
- # EmptyImage always carries empty clobbered_blocks, so
- # include_clobbered_blocks can be ignored.
- assert self.clobbered_blocks.size() == 0
- return sha1().hexdigest()
-
- def WriteRangeDataToFd(self, ranges, fd):
- raise ValueError("Can't write data from EmptyImage to file")
-
-
-class DataImage(Image):
- """An image wrapped around a single string of data."""
-
- def __init__(self, data, trim=False, pad=False):
- self.data = data
- self.blocksize = 4096
-
- assert not (trim and pad)
-
- partial = len(self.data) % self.blocksize
- padded = False
- if partial > 0:
- if trim:
- self.data = self.data[:-partial]
- elif pad:
- self.data += '\0' * (self.blocksize - partial)
- padded = True
- else:
- raise ValueError(("data for DataImage must be multiple of %d bytes "
- "unless trim or pad is specified") %
- (self.blocksize,))
-
- assert len(self.data) % self.blocksize == 0
-
- self.total_blocks = len(self.data) // self.blocksize
- self.care_map = RangeSet(data=(0, self.total_blocks))
- # When the last block is padded, we always write the whole block even for
- # incremental OTAs. Because otherwise the last block may get skipped if
- # unchanged for an incremental, but would fail the post-install
- # verification if it has non-zero contents in the padding bytes.
- # Bug: 23828506
- if padded:
- clobbered_blocks = [self.total_blocks-1, self.total_blocks]
- else:
- clobbered_blocks = []
- self.clobbered_blocks = clobbered_blocks
- self.extended = RangeSet()
-
- zero_blocks = []
- nonzero_blocks = []
- reference = '\0' * self.blocksize
-
- for i in range(self.total_blocks-1 if padded else self.total_blocks):
- d = self.data[i*self.blocksize : (i+1)*self.blocksize]
- if d == reference:
- zero_blocks.append(i)
- zero_blocks.append(i+1)
- else:
- nonzero_blocks.append(i)
- nonzero_blocks.append(i+1)
-
- assert zero_blocks or nonzero_blocks or clobbered_blocks
-
- self.file_map = dict()
- if zero_blocks:
- self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
- if nonzero_blocks:
- self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
- if clobbered_blocks:
- self.file_map["__COPY"] = RangeSet(data=clobbered_blocks)
-
- def _GetRangeData(self, ranges):
- for s, e in ranges:
- yield self.data[s*self.blocksize:e*self.blocksize]
-
- def RangeSha1(self, ranges):
- h = sha1()
- for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
- h.update(data)
- return h.hexdigest()
-
- def ReadRangeSet(self, ranges):
- return list(self._GetRangeData(ranges))
-
- def TotalSha1(self, include_clobbered_blocks=False):
- if not include_clobbered_blocks:
- return self.RangeSha1(self.care_map.subtract(self.clobbered_blocks))
- return sha1(self.data).hexdigest()
-
- def WriteRangeDataToFd(self, ranges, fd):
- for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
- fd.write(data)
-
-
-class FileImage(Image):
- """An image wrapped around a raw image file."""
-
- def __init__(self, path, hashtree_info_generator=None):
- self.path = path
- self.blocksize = 4096
- self._file_size = os.path.getsize(self.path)
- self._file = open(self.path, 'rb')
-
- if self._file_size % self.blocksize != 0:
- raise ValueError("Size of file %s must be multiple of %d bytes, but is %d"
- % self.path, self.blocksize, self._file_size)
-
- self.total_blocks = self._file_size // self.blocksize
- self.care_map = RangeSet(data=(0, self.total_blocks))
- self.clobbered_blocks = RangeSet()
- self.extended = RangeSet()
-
- self.generator_lock = threading.Lock()
-
- self.hashtree_info = None
- if hashtree_info_generator:
- self.hashtree_info = hashtree_info_generator.Generate(self)
-
- zero_blocks = []
- nonzero_blocks = []
- reference = '\0' * self.blocksize
-
- for i in range(self.total_blocks):
- d = self._file.read(self.blocksize)
- if d == reference:
- zero_blocks.append(i)
- zero_blocks.append(i+1)
- else:
- nonzero_blocks.append(i)
- nonzero_blocks.append(i+1)
-
- assert zero_blocks or nonzero_blocks
-
- self.file_map = {}
- if zero_blocks:
- self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
- if nonzero_blocks:
- self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
- if self.hashtree_info:
- self.file_map["__HASHTREE"] = self.hashtree_info.hashtree_range
-
- def __del__(self):
- self._file.close()
-
- def _GetRangeData(self, ranges):
- # Use a lock to protect the generator so that we will not run two
- # instances of this generator on the same object simultaneously.
- with self.generator_lock:
- for s, e in ranges:
- self._file.seek(s * self.blocksize)
- for _ in range(s, e):
- yield self._file.read(self.blocksize)
-
- def RangeSha1(self, ranges):
- h = sha1()
- for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
- h.update(data)
- return h.hexdigest()
-
- def ReadRangeSet(self, ranges):
- return list(self._GetRangeData(ranges))
-
- def TotalSha1(self, include_clobbered_blocks=False):
- assert not self.clobbered_blocks
- return self.RangeSha1(self.care_map)
-
- def WriteRangeDataToFd(self, ranges, fd):
- for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
- fd.write(data)
-
-
class Transfer(object):
def __init__(self, tgt_name, src_name, tgt_ranges, src_ranges, tgt_sha1,
src_sha1, style, by_id):
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index ba04651..af508fe 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -18,7 +18,7 @@
Builds output_image from the given input_directory, properties_file,
and writes the image to target_output_directory.
-Usage: build_image.py input_directory properties_file output_image \\
+Usage: build_image input_directory properties_file output_image \\
target_output_directory
"""
@@ -644,30 +644,30 @@
d["extfs_rsv_pct"] = "0"
copy_prop("product_reserved_size", "partition_reserved_size")
copy_prop("product_selinux_fc", "selinux_fc")
- elif mount_point == "product_services":
- copy_prop("avb_product_services_hashtree_enable", "avb_hashtree_enable")
- copy_prop("avb_product_services_add_hashtree_footer_args",
+ elif mount_point == "system_ext":
+ copy_prop("avb_system_ext_hashtree_enable", "avb_hashtree_enable")
+ copy_prop("avb_system_ext_add_hashtree_footer_args",
"avb_add_hashtree_footer_args")
- copy_prop("avb_product_services_key_path", "avb_key_path")
- copy_prop("avb_product_services_algorithm", "avb_algorithm")
- copy_prop("product_services_fs_type", "fs_type")
- copy_prop("product_services_size", "partition_size")
- if not copy_prop("product_services_journal_size", "journal_size"):
+ copy_prop("avb_system_ext_key_path", "avb_key_path")
+ copy_prop("avb_system_ext_algorithm", "avb_algorithm")
+ copy_prop("system_ext_fs_type", "fs_type")
+ copy_prop("system_ext_size", "partition_size")
+ if not copy_prop("system_ext_journal_size", "journal_size"):
d["journal_size"] = "0"
- copy_prop("product_services_verity_block_device", "verity_block_device")
+ copy_prop("system_ext_verity_block_device", "verity_block_device")
copy_prop("ext4_share_dup_blocks", "ext4_share_dup_blocks")
- copy_prop("product_services_squashfs_compressor", "squashfs_compressor")
- copy_prop("product_services_squashfs_compressor_opt",
+ copy_prop("system_ext_squashfs_compressor", "squashfs_compressor")
+ copy_prop("system_ext_squashfs_compressor_opt",
"squashfs_compressor_opt")
- copy_prop("product_services_squashfs_block_size", "squashfs_block_size")
- copy_prop("product_services_squashfs_disable_4k_align",
+ copy_prop("system_ext_squashfs_block_size", "squashfs_block_size")
+ copy_prop("system_ext_squashfs_disable_4k_align",
"squashfs_disable_4k_align")
- copy_prop("product_services_base_fs_file", "base_fs_file")
- copy_prop("product_services_extfs_inode_count", "extfs_inode_count")
- if not copy_prop("product_services_extfs_rsv_pct", "extfs_rsv_pct"):
+ copy_prop("system_ext_base_fs_file", "base_fs_file")
+ copy_prop("system_ext_extfs_inode_count", "extfs_inode_count")
+ if not copy_prop("system_ext_extfs_rsv_pct", "extfs_rsv_pct"):
d["extfs_rsv_pct"] = "0"
- copy_prop("product_services_reserved_size", "partition_reserved_size")
- copy_prop("product_services_selinux_fc", "selinux_fc")
+ copy_prop("system_ext_reserved_size", "partition_reserved_size")
+ copy_prop("system_ext_selinux_fc", "selinux_fc")
elif mount_point == "odm":
copy_prop("avb_odm_hashtree_enable", "avb_hashtree_enable")
copy_prop("avb_odm_add_hashtree_footer_args",
@@ -736,8 +736,8 @@
copy_prop("partition_size", "odm_size")
elif mount_point == "product":
copy_prop("partition_size", "product_size")
- elif mount_point == "product_services":
- copy_prop("partition_size", "product_services_size")
+ elif mount_point == "system_ext":
+ copy_prop("partition_size", "system_ext_size")
return d
@@ -777,8 +777,8 @@
mount_point = "oem"
elif image_filename == "product.img":
mount_point = "product"
- elif image_filename == "product_services.img":
- mount_point = "product_services"
+ elif image_filename == "system_ext.img":
+ mount_point = "system_ext"
else:
logger.error("Unknown image file name %s", image_filename)
sys.exit(1)
diff --git a/tools/releasetools/build_super_image.py b/tools/releasetools/build_super_image.py
index 045ad55..f63453d 100755
--- a/tools/releasetools/build_super_image.py
+++ b/tools/releasetools/build_super_image.py
@@ -80,10 +80,12 @@
block_devices = shlex.split(info_dict.get("super_block_devices", "").strip())
groups = shlex.split(info_dict.get("super_partition_groups", "").strip())
- if ab_update:
+ if ab_update and retrofit:
cmd += ["--metadata-slots", "2"]
+ elif ab_update:
+ cmd += ["--metadata-slots", "3"]
else:
- cmd += ["--metadata-slots", "1"]
+ cmd += ["--metadata-slots", "2"]
if ab_update and retrofit:
cmd.append("--auto-slot-suffixing")
diff --git a/tools/releasetools/check_target_files_signatures.py b/tools/releasetools/check_target_files_signatures.py
index 60200a3..8c1bb9a 100755
--- a/tools/releasetools/check_target_files_signatures.py
+++ b/tools/releasetools/check_target_files_signatures.py
@@ -213,7 +213,7 @@
self.certs = frozenset(out)
def ReadManifest(self, full_filename):
- p = common.Run(["aapt", "dump", "xmltree", full_filename,
+ p = common.Run(["aapt2", "dump", "xmltree", full_filename, "--file",
"AndroidManifest.xml"],
stdout=subprocess.PIPE)
manifest, err = p.communicate()
diff --git a/tools/releasetools/check_target_files_vintf.py b/tools/releasetools/check_target_files_vintf.py
new file mode 100755
index 0000000..543147c
--- /dev/null
+++ b/tools/releasetools/check_target_files_vintf.py
@@ -0,0 +1,232 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Check VINTF compatibility from a target files package.
+
+Usage: check_target_files_vintf target_files
+
+target_files can be a ZIP file or an extracted target files directory.
+"""
+
+import logging
+import subprocess
+import sys
+import os
+import zipfile
+
+import common
+
+logger = logging.getLogger(__name__)
+
+OPTIONS = common.OPTIONS
+
+# Keys are paths that VINTF searches. Must keep in sync with libvintf's search
+# paths (VintfObject.cpp).
+# These paths are stored in different directories in target files package, so
+# we have to search for the correct path and tell checkvintf to remap them.
+DIR_SEARCH_PATHS = {
+ '/system': ('SYSTEM',),
+ '/vendor': ('VENDOR', 'SYSTEM/vendor'),
+ '/product': ('PRODUCT', 'SYSTEM/product'),
+ '/odm': ('ODM', 'VENDOR/odm'),
+}
+
+UNZIP_PATTERN = ['META/*', '*/build.prop']
+
+
+def GetDirmap(input_tmp):
+ dirmap = {}
+ for device_path, target_files_rel_paths in DIR_SEARCH_PATHS.items():
+ for target_files_rel_path in target_files_rel_paths:
+ target_files_path = os.path.join(input_tmp, target_files_rel_path)
+ if os.path.isdir(target_files_path):
+ dirmap[device_path] = target_files_path
+ break
+ if device_path not in dirmap:
+ raise ValueError("Can't determine path for device path " + device_path +
+ ". Searched the following:" +
+ ("\n".join(target_files_rel_paths)))
+ return dirmap
+
+
+def GetArgsForSkus(info_dict):
+ skus = info_dict.get('vintf_odm_manifest_skus', '').strip().split()
+ if not skus:
+ logger.info("ODM_MANIFEST_SKUS is not defined. Check once without SKUs.")
+ skus = ['']
+ return [['--property', 'ro.boot.product.hardware.sku=' + sku]
+ for sku in skus]
+
+
+def GetArgsForShippingApiLevel(info_dict):
+ shipping_api_level = info_dict['vendor.build.prop'].get(
+ 'ro.product.first_api_level')
+ if not shipping_api_level:
+ logger.warning('Cannot determine ro.product.first_api_level')
+ return []
+ return ['--property', 'ro.product.first_api_level=' + shipping_api_level]
+
+
+def GetArgsForKernel(input_tmp):
+ version_path = os.path.join(input_tmp, 'META/kernel_version.txt')
+ config_path = os.path.join(input_tmp, 'META/kernel_configs.txt')
+
+ if not os.path.isfile(version_path) or not os.path.isfile(config_path):
+ logger.info('Skipping kernel config checks because ' +
+ 'PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS is not set')
+ return []
+
+ with open(version_path) as f:
+ version = f.read().strip()
+
+ return ['--kernel', '{}:{}'.format(version, config_path)]
+
+
+def CheckVintfFromExtractedTargetFiles(input_tmp, info_dict=None):
+ """
+ Checks VINTF metadata of an extracted target files directory.
+
+ Args:
+ inp: path to the directory that contains the extracted target files archive.
+ info_dict: The build-time info dict. If None, it will be loaded from inp.
+
+ Returns:
+ True if VINTF check is skipped or compatible, False if incompatible. Raise
+ a RuntimeError if any error occurs.
+ """
+
+ if info_dict is None:
+ info_dict = common.LoadInfoDict(input_tmp)
+
+ if info_dict.get('vintf_enforce') != 'true':
+ logger.warning('PRODUCT_ENFORCE_VINTF_MANIFEST is not set, skipping checks')
+ return True
+
+ dirmap = GetDirmap(input_tmp)
+ args_for_skus = GetArgsForSkus(info_dict)
+ shipping_api_level_args = GetArgsForShippingApiLevel(info_dict)
+ kernel_args = GetArgsForKernel(input_tmp)
+
+ common_command = [
+ 'checkvintf',
+ '--check-compat',
+ ]
+ for device_path, real_path in dirmap.items():
+ common_command += ['--dirmap', '{}:{}'.format(device_path, real_path)]
+ common_command += kernel_args
+ common_command += shipping_api_level_args
+
+ success = True
+ for sku_args in args_for_skus:
+ command = common_command + sku_args
+ proc = common.Run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = proc.communicate()
+ if proc.returncode == 0:
+ logger.info("Command `%s` returns 'compatible'", ' '.join(command))
+ elif out.strip() == "INCOMPATIBLE":
+ logger.info("Command `%s` returns 'incompatible'", ' '.join(command))
+ success = False
+ else:
+ raise common.ExternalError(
+ "Failed to run command '{}' (exit code {}):\nstdout:{}\nstderr:{}"
+ .format(' '.join(command), proc.returncode, out, err))
+ logger.info("stdout: %s", out)
+ logger.info("stderr: %s", err)
+
+ return success
+
+
+def GetVintfFileList():
+ """
+ Returns a list of VINTF metadata files that should be read from a target files
+ package before executing checkvintf.
+ """
+ def PathToPatterns(path):
+ if path[-1] == '/':
+ path += '*'
+ for device_path, target_files_rel_paths in DIR_SEARCH_PATHS.items():
+ if path.startswith(device_path):
+ suffix = path[len(device_path):]
+ return [rel_path + suffix for rel_path in target_files_rel_paths]
+ raise RuntimeError('Unrecognized path from checkvintf --dump-file-list: ' +
+ path)
+
+ out = common.RunAndCheckOutput(['checkvintf', '--dump-file-list'])
+ paths = out.strip().split('\n')
+ paths = sum((PathToPatterns(path) for path in paths if path), [])
+ return paths
+
+
+def CheckVintfFromTargetFiles(inp, info_dict=None):
+ """
+ Checks VINTF metadata of a target files zip.
+
+ Args:
+ inp: path to the target files archive.
+ info_dict: The build-time info dict. If None, it will be loaded from inp.
+
+ Returns:
+ True if VINTF check is skipped or compatible, False if incompatible. Raise
+ a RuntimeError if any error occurs.
+ """
+ input_tmp = common.UnzipTemp(inp, GetVintfFileList() + UNZIP_PATTERN)
+ return CheckVintfFromExtractedTargetFiles(input_tmp, info_dict)
+
+
+def CheckVintf(inp, info_dict=None):
+ """
+ Checks VINTF metadata of a target files zip or extracted target files
+ directory.
+
+ Args:
+ inp: path to the (possibly extracted) target files archive.
+ info_dict: The build-time info dict. If None, it will be loaded from inp.
+
+ Returns:
+ True if VINTF check is skipped or compatible, False if incompatible. Raise
+ a RuntimeError if any error occurs.
+ """
+ if os.path.isdir(inp):
+ logger.info('Checking VINTF compatibility extracted target files...')
+ return CheckVintfFromExtractedTargetFiles(inp, info_dict)
+
+ if zipfile.is_zipfile(inp):
+ logger.info('Checking VINTF compatibility target files...')
+ return CheckVintfFromTargetFiles(inp, info_dict)
+
+ raise ValueError('{} is not a valid directory or zip file'.format(inp))
+
+
+def main(argv):
+ args = common.ParseOptions(argv, __doc__)
+ if len(args) != 1:
+ common.Usage(__doc__)
+ sys.exit(1)
+ common.InitLogging()
+ if not CheckVintf(args[0]):
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ try:
+ common.CloseInheritedPipes()
+ main(sys.argv[1:])
+ except common.ExternalError:
+ logger.exception('\n ERROR:\n')
+ sys.exit(1)
+ finally:
+ common.Cleanup()
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index edde89c..6756049 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -39,8 +39,9 @@
import zipfile
from hashlib import sha1, sha256
-import blockimgdiff
+import images
import sparse_img
+from blockimgdiff import BlockImageDiff
logger = logging.getLogger(__name__)
@@ -98,15 +99,14 @@
# The partitions allowed to be signed by AVB (Android Verified Boot 2.0). Note
# that system_other is not in the list because we don't want to include its
# descriptor into vbmeta.img.
-AVB_PARTITIONS = ('boot', 'dtbo', 'odm', 'product', 'product_services',
- 'recovery', 'system', 'vendor')
+AVB_PARTITIONS = ('boot', 'dtbo', 'odm', 'product', 'recovery', 'system',
+ 'system_ext', 'vendor')
# Chained VBMeta partitions.
AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor')
# Partitions that should have their care_map added to META/care_map.pb
-PARTITIONS_WITH_CARE_MAP = ('system', 'vendor', 'product', 'product_services',
- 'odm')
+PARTITIONS_WITH_CARE_MAP = ('system', 'vendor', 'product', 'system_ext', 'odm')
class ErrorCode(object):
@@ -251,6 +251,8 @@
"""
proc = Run(args, verbose=verbose, **kwargs)
output, _ = proc.communicate()
+ if output is None:
+ output = ""
# Don't log any if caller explicitly says so.
if verbose != False:
logger.info("%s", output.rstrip())
@@ -461,10 +463,13 @@
return LoadDictionaryFromLines(data.split("\n"))
-def LoadDictionaryFromFile(file_path):
+def LoadListFromFile(file_path):
with open(file_path) as f:
- lines = list(f.read().splitlines())
+ return f.read().splitlines()
+
+def LoadDictionaryFromFile(file_path):
+ lines = LoadListFromFile(file_path)
return LoadDictionaryFromLines(lines)
@@ -549,10 +554,72 @@
logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
+def MergeDynamicPartitionInfoDicts(framework_dict,
+ vendor_dict,
+ include_dynamic_partition_list=True,
+ size_prefix="",
+ size_suffix="",
+ list_prefix="",
+ list_suffix=""):
+ """Merges dynamic partition info variables.
+
+ Args:
+ framework_dict: The dictionary of dynamic partition info variables from the
+ partial framework target files.
+ vendor_dict: The dictionary of dynamic partition info variables from the
+ partial vendor target files.
+ include_dynamic_partition_list: If true, merges the dynamic_partition_list
+ variable. Not all use cases need this variable merged.
+ size_prefix: The prefix in partition group size variables that precedes the
+ name of the partition group. For example, partition group 'group_a' with
+ corresponding size variable 'super_group_a_group_size' would have the
+ size_prefix 'super_'.
+ size_suffix: Similar to size_prefix but for the variable's suffix. For
+ example, 'super_group_a_group_size' would have size_suffix '_group_size'.
+ list_prefix: Similar to size_prefix but for the partition group's
+ partition_list variable.
+ list_suffix: Similar to size_suffix but for the partition group's
+ partition_list variable.
+
+ Returns:
+ The merged dynamic partition info dictionary.
+ """
+ merged_dict = {}
+ # Partition groups and group sizes are defined by the vendor dict because
+ # these values may vary for each board that uses a shared system image.
+ merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
+ if include_dynamic_partition_list:
+ framework_dynamic_partition_list = framework_dict.get(
+ "dynamic_partition_list", "")
+ vendor_dynamic_partition_list = vendor_dict.get("dynamic_partition_list",
+ "")
+ merged_dict["dynamic_partition_list"] = (
+ "%s %s" % (framework_dynamic_partition_list,
+ vendor_dynamic_partition_list)).strip()
+ for partition_group in merged_dict["super_partition_groups"].split(" "):
+ # Set the partition group's size using the value from the vendor dict.
+ key = "%s%s%s" % (size_prefix, partition_group, size_suffix)
+ if key not in vendor_dict:
+ raise ValueError("Vendor dict does not contain required key %s." % key)
+ merged_dict[key] = vendor_dict[key]
+
+ # Set the partition group's partition list using a concatenation of the
+ # framework and vendor partition lists.
+ key = "%s%s%s" % (list_prefix, partition_group, list_suffix)
+ merged_dict[key] = (
+ "%s %s" %
+ (framework_dict.get(key, ""), vendor_dict.get(key, ""))).strip()
+ return merged_dict
+
+
def AppendAVBSigningArgs(cmd, partition):
"""Append signing arguments for avbtool."""
# e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
key_path = OPTIONS.info_dict.get("avb_" + partition + "_key_path")
+ if key_path and not os.path.exists(key_path) and OPTIONS.search_path:
+ new_key_path = os.path.join(OPTIONS.search_path, key_path)
+ if os.path.exists(new_key_path):
+ key_path = new_key_path
algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm")
if key_path and algorithm:
cmd.extend(["--key", key_path, "--algorithm", algorithm])
@@ -562,6 +629,33 @@
cmd.extend(["--salt", avb_salt])
+def GetAvbPartitionArg(partition, image, info_dict = None):
+ """Returns the VBMeta arguments for partition.
+
+ It sets up the VBMeta argument by including the partition descriptor from the
+ given 'image', or by configuring the partition as a chained partition.
+
+ Args:
+ partition: The name of the partition (e.g. "system").
+ image: The path to the partition image.
+ info_dict: A dict returned by common.LoadInfoDict(). Will use
+ OPTIONS.info_dict if None has been given.
+
+ Returns:
+ A list of VBMeta arguments.
+ """
+ if info_dict is None:
+ info_dict = OPTIONS.info_dict
+
+ # Check if chain partition is used.
+ key_path = info_dict.get("avb_" + partition + "_key_path")
+ if key_path:
+ chained_partition_arg = GetAvbChainedPartitionArg(partition, info_dict)
+ return ["--chain_partition", chained_partition_arg]
+ else:
+ return ["--include_descriptors_from_image", image]
+
+
def GetAvbChainedPartitionArg(partition, info_dict, key=None):
"""Constructs and returns the arg to build or verify a chained partition.
@@ -578,12 +672,75 @@
"""
if key is None:
key = info_dict["avb_" + partition + "_key_path"]
+ if key and not os.path.exists(key) and OPTIONS.search_path:
+ new_key_path = os.path.join(OPTIONS.search_path, key)
+ if os.path.exists(new_key_path):
+ key = new_key_path
pubkey_path = ExtractAvbPublicKey(info_dict["avb_avbtool"], key)
rollback_index_location = info_dict[
"avb_" + partition + "_rollback_index_location"]
return "{}:{}:{}".format(partition, rollback_index_location, pubkey_path)
+def BuildVBMeta(image_path, partitions, name, needed_partitions):
+ """Creates a VBMeta image.
+
+ It generates the requested VBMeta image. The requested image could be for
+ top-level or chained VBMeta image, which is determined based on the name.
+
+ Args:
+ image_path: The output path for the new VBMeta image.
+ partitions: A dict that's keyed by partition names with image paths as
+ values. Only valid partition names are accepted, as listed in
+ common.AVB_PARTITIONS.
+ name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'.
+ needed_partitions: Partitions whose descriptors should be included into the
+ generated VBMeta image.
+
+ Raises:
+ AssertionError: On invalid input args.
+ """
+ avbtool = OPTIONS.info_dict["avb_avbtool"]
+ cmd = [avbtool, "make_vbmeta_image", "--output", image_path]
+ AppendAVBSigningArgs(cmd, name)
+
+ for partition, path in partitions.items():
+ if partition not in needed_partitions:
+ continue
+ assert (partition in AVB_PARTITIONS or
+ partition in AVB_VBMETA_PARTITIONS), \
+ 'Unknown partition: {}'.format(partition)
+ assert os.path.exists(path), \
+ 'Failed to find {} for {}'.format(path, partition)
+ cmd.extend(GetAvbPartitionArg(partition, path))
+
+ args = OPTIONS.info_dict.get("avb_{}_args".format(name))
+ if args and args.strip():
+ split_args = shlex.split(args)
+ for index, arg in enumerate(split_args[:-1]):
+ # Sanity check that the image file exists. Some images might be defined
+ # as a path relative to source tree, which may not be available at the
+ # same location when running this script (we have the input target_files
+ # zip only). For such cases, we additionally scan other locations (e.g.
+ # IMAGES/, RADIO/, etc) before bailing out.
+ if arg == '--include_descriptors_from_image':
+ image_path = split_args[index + 1]
+ if os.path.exists(image_path):
+ continue
+ found = False
+ for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']:
+ alt_path = os.path.join(
+ OPTIONS.input_tmp, dir_name, os.path.basename(image_path))
+ if os.path.exists(alt_path):
+ split_args[index + 1] = alt_path
+ found = True
+ break
+ assert found, 'Failed to find {}'.format(image_path)
+ cmd.extend(split_args)
+
+ RunAndCheckOutput(cmd)
+
+
def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
has_ramdisk=False, two_step_image=False):
"""Build a bootable image from the specified sourcedir.
@@ -916,8 +1073,8 @@
# ota_from_target_files.py (since LMP).
assert os.path.exists(path) and os.path.exists(mappath)
- return blockimgdiff.FileImage(path, hashtree_info_generator=
- hashtree_info_generator)
+ return images.FileImage(path, hashtree_info_generator=hashtree_info_generator)
+
def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
hashtree_info_generator=None):
@@ -1043,7 +1200,7 @@
def GetMinSdkVersion(apk_name):
"""Gets the minSdkVersion declared in the APK.
- It calls 'aapt' to query the embedded minSdkVersion from the given APK file.
+ It calls 'aapt2' to query the embedded minSdkVersion from the given APK file.
This can be both a decimal number (API Level) or a codename.
Args:
@@ -1056,12 +1213,12 @@
ExternalError: On failing to obtain the min SDK version.
"""
proc = Run(
- ["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE,
+ ["aapt2", "dump", "badging", apk_name], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdoutdata, stderrdata = proc.communicate()
if proc.returncode != 0:
raise ExternalError(
- "Failed to obtain minSdkVersion: aapt return code {}:\n{}\n{}".format(
+ "Failed to obtain minSdkVersion: aapt2 return code {}:\n{}\n{}".format(
proc.returncode, stdoutdata, stderrdata))
for line in stdoutdata.split("\n"):
@@ -1069,7 +1226,7 @@
m = re.match(r'sdkVersion:\'([^\']*)\'', line)
if m:
return m.group(1)
- raise ExternalError("No minSdkVersion returned by aapt")
+ raise ExternalError("No minSdkVersion returned by aapt2")
def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
@@ -1461,7 +1618,7 @@
values.
"""
result = {}
- for k, v in sorted(current.iteritems()):
+ for k, v in sorted(current.items()):
if v:
result[k] = v
else:
@@ -1482,7 +1639,7 @@
f.write("# (Additional spaces are harmless.)\n\n")
first_line = None
- sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
+ sorted_list = sorted([(not v, k, v) for (k, v) in current.items()])
for i, (_, k, v) in enumerate(sorted_list):
f.write("[[[ %s ]]] %s\n" % (v, k))
if not v and first_line is None:
@@ -1648,7 +1805,7 @@
"""Keyword arguments to the constructor become attributes of this
object, which is passed to all functions in the device-specific
module."""
- for k, v in kwargs.iteritems():
+ for k, v in kwargs.items():
setattr(self, k, v)
self.extras = OPTIONS.extras
@@ -1917,9 +2074,9 @@
assert version >= 3
self.version = version
- b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
- version=self.version,
- disable_imgdiff=self.disable_imgdiff)
+ b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
+ version=self.version,
+ disable_imgdiff=self.disable_imgdiff)
self.path = os.path.join(MakeTempDir(), partition)
b.Compute(self.path)
self._required_cache = b.max_stashed_size
@@ -2173,8 +2330,10 @@
return ctx.hexdigest()
-DataImage = blockimgdiff.DataImage
-EmptyImage = blockimgdiff.EmptyImage
+# Expose these two classes to support vendor-specific scripts
+DataImage = images.DataImage
+EmptyImage = images.EmptyImage
+
# map recovery.fstab's fs_types to mount/format "partition types"
PARTITION_TYPES = {
diff --git a/tools/releasetools/images.py b/tools/releasetools/images.py
new file mode 100644
index 0000000..a24148a
--- /dev/null
+++ b/tools/releasetools/images.py
@@ -0,0 +1,223 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific
+
+import os
+import threading
+from hashlib import sha1
+
+from rangelib import RangeSet
+
+__all__ = ["EmptyImage", "DataImage", "FileImage"]
+
+
+class Image(object):
+ def RangeSha1(self, ranges):
+ raise NotImplementedError
+
+ def ReadRangeSet(self, ranges):
+ raise NotImplementedError
+
+ def TotalSha1(self, include_clobbered_blocks=False):
+ raise NotImplementedError
+
+ def WriteRangeDataToFd(self, ranges, fd):
+ raise NotImplementedError
+
+
+class EmptyImage(Image):
+ """A zero-length image."""
+
+ def __init__(self):
+ self.blocksize = 4096
+ self.care_map = RangeSet()
+ self.clobbered_blocks = RangeSet()
+ self.extended = RangeSet()
+ self.total_blocks = 0
+ self.file_map = {}
+ self.hashtree_info = None
+
+ def RangeSha1(self, ranges):
+ return sha1().hexdigest()
+
+ def ReadRangeSet(self, ranges):
+ return ()
+
+ def TotalSha1(self, include_clobbered_blocks=False):
+ # EmptyImage always carries empty clobbered_blocks, so
+ # include_clobbered_blocks can be ignored.
+ assert self.clobbered_blocks.size() == 0
+ return sha1().hexdigest()
+
+ def WriteRangeDataToFd(self, ranges, fd):
+ raise ValueError("Can't write data from EmptyImage to file")
+
+
+class DataImage(Image):
+ """An image wrapped around a single string of data."""
+
+ def __init__(self, data, trim=False, pad=False):
+ self.data = data
+ self.blocksize = 4096
+
+ assert not (trim and pad)
+
+ partial = len(self.data) % self.blocksize
+ padded = False
+ if partial > 0:
+ if trim:
+ self.data = self.data[:-partial]
+ elif pad:
+ self.data += '\0' * (self.blocksize - partial)
+ padded = True
+ else:
+ raise ValueError(("data for DataImage must be multiple of %d bytes "
+ "unless trim or pad is specified") %
+ (self.blocksize,))
+
+ assert len(self.data) % self.blocksize == 0
+
+ self.total_blocks = len(self.data) // self.blocksize
+ self.care_map = RangeSet(data=(0, self.total_blocks))
+ # When the last block is padded, we always write the whole block even for
+ # incremental OTAs. Because otherwise the last block may get skipped if
+ # unchanged for an incremental, but would fail the post-install
+ # verification if it has non-zero contents in the padding bytes.
+ # Bug: 23828506
+ if padded:
+ clobbered_blocks = [self.total_blocks-1, self.total_blocks]
+ else:
+ clobbered_blocks = []
+ self.clobbered_blocks = clobbered_blocks
+ self.extended = RangeSet()
+
+ zero_blocks = []
+ nonzero_blocks = []
+ reference = '\0' * self.blocksize
+
+ for i in range(self.total_blocks-1 if padded else self.total_blocks):
+ d = self.data[i*self.blocksize : (i+1)*self.blocksize]
+ if d == reference:
+ zero_blocks.append(i)
+ zero_blocks.append(i+1)
+ else:
+ nonzero_blocks.append(i)
+ nonzero_blocks.append(i+1)
+
+ assert zero_blocks or nonzero_blocks or clobbered_blocks
+
+ self.file_map = dict()
+ if zero_blocks:
+ self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
+ if nonzero_blocks:
+ self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
+ if clobbered_blocks:
+ self.file_map["__COPY"] = RangeSet(data=clobbered_blocks)
+
+ def _GetRangeData(self, ranges):
+ for s, e in ranges:
+ yield self.data[s*self.blocksize:e*self.blocksize]
+
+ def RangeSha1(self, ranges):
+ h = sha1()
+ for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
+ h.update(data)
+ return h.hexdigest()
+
+ def ReadRangeSet(self, ranges):
+ return list(self._GetRangeData(ranges))
+
+ def TotalSha1(self, include_clobbered_blocks=False):
+ if not include_clobbered_blocks:
+ return self.RangeSha1(self.care_map.subtract(self.clobbered_blocks))
+ return sha1(self.data).hexdigest()
+
+ def WriteRangeDataToFd(self, ranges, fd):
+ for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
+ fd.write(data)
+
+
+class FileImage(Image):
+ """An image wrapped around a raw image file."""
+
+ def __init__(self, path, hashtree_info_generator=None):
+ self.path = path
+ self.blocksize = 4096
+ self._file_size = os.path.getsize(self.path)
+ self._file = open(self.path, 'rb')
+
+ if self._file_size % self.blocksize != 0:
+ raise ValueError("Size of file %s must be multiple of %d bytes, but is %d"
+ % self.path, self.blocksize, self._file_size)
+
+ self.total_blocks = self._file_size // self.blocksize
+ self.care_map = RangeSet(data=(0, self.total_blocks))
+ self.clobbered_blocks = RangeSet()
+ self.extended = RangeSet()
+
+ self.generator_lock = threading.Lock()
+
+ self.hashtree_info = None
+ if hashtree_info_generator:
+ self.hashtree_info = hashtree_info_generator.Generate(self)
+
+ zero_blocks = []
+ nonzero_blocks = []
+ reference = '\0' * self.blocksize
+
+ for i in range(self.total_blocks):
+ d = self._file.read(self.blocksize)
+ if d == reference:
+ zero_blocks.append(i)
+ zero_blocks.append(i+1)
+ else:
+ nonzero_blocks.append(i)
+ nonzero_blocks.append(i+1)
+
+ assert zero_blocks or nonzero_blocks
+
+ self.file_map = {}
+ if zero_blocks:
+ self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
+ if nonzero_blocks:
+ self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
+ if self.hashtree_info:
+ self.file_map["__HASHTREE"] = self.hashtree_info.hashtree_range
+
+ def __del__(self):
+ self._file.close()
+
+ def _GetRangeData(self, ranges):
+ # Use a lock to protect the generator so that we will not run two
+ # instances of this generator on the same object simultaneously.
+ with self.generator_lock:
+ for s, e in ranges:
+ self._file.seek(s * self.blocksize)
+ for _ in range(s, e):
+ yield self._file.read(self.blocksize)
+
+ def RangeSha1(self, ranges):
+ h = sha1()
+ for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
+ h.update(data)
+ return h.hexdigest()
+
+ def ReadRangeSet(self, ranges):
+ return list(self._GetRangeData(ranges))
+
+ def TotalSha1(self, include_clobbered_blocks=False):
+ assert not self.clobbered_blocks
+ return self.RangeSha1(self.care_map)
+
+ def WriteRangeDataToFd(self, ranges, fd):
+ for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
+ fd.write(data)
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 8fb9871..ab38d0d 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -15,30 +15,32 @@
# limitations under the License.
"""
-Given target-files, produces an image zipfile suitable for use
-with 'fastboot update'.
+Given an input target-files, produces an image zipfile suitable for use with
+'fastboot update'.
Usage: img_from_target_files [flags] input_target_files output_image_zip
-input_target_files: one of the following:
- - directory containing extracted target files. It will load info from
- OTA/android-info.txt, META/misc_info.txt and build the image zipfile using
- images from IMAGES/.
- - target files package. Same as above, but extracts the archive before
- building the image zipfile.
+input_target_files: Path to the input target_files zip.
Flags:
-z (--bootable_zip)
Include only the bootable images (eg 'boot' and 'recovery') in
the output.
+ --additional <filespec>
+ Include an additional entry into the generated zip file. The filespec is
+ in a format that's accepted by zip2zip (e.g.
+ 'OTA/android-info.txt:android-info.txt', to copy `OTA/android-info.txt`
+ from input_file into output_file as `android-info.txt`. Refer to the
+ `filespec` arg in zip2zip's help message). The option can be repeated to
+ include multiple entries.
+
"""
from __future__ import print_function
import logging
import os
-import shutil
import sys
import zipfile
@@ -46,160 +48,196 @@
from build_super_image import BuildSuperImage
if sys.hexversion < 0x02070000:
- print("Python 2.7 or newer is required.", file=sys.stderr)
+ print('Python 2.7 or newer is required.', file=sys.stderr)
sys.exit(1)
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
+OPTIONS.additional_entries = []
+OPTIONS.bootable_only = False
+OPTIONS.put_super = None
+OPTIONS.dynamic_partition_list = None
+OPTIONS.super_device_list = None
+OPTIONS.retrofit_dap = None
+OPTIONS.build_super = None
+OPTIONS.sparse_userimages = None
+
def LoadOptions(input_file):
- """
- Load information from input_file to OPTIONS.
+ """Loads information from input_file to OPTIONS.
Args:
- input_file: A Zipfile instance of input zip file, or path to the directory
- of extracted zip.
+ input_file: Path to the input target_files zip file.
"""
- info = OPTIONS.info_dict = common.LoadInfoDict(input_file)
+ with zipfile.ZipFile(input_file) as input_zip:
+ info = OPTIONS.info_dict = common.LoadInfoDict(input_zip)
- OPTIONS.put_super = info.get("super_image_in_update_package") == "true"
- OPTIONS.dynamic_partition_list = info.get("dynamic_partition_list",
- "").strip().split()
- OPTIONS.super_device_list = info.get("super_block_devices",
- "").strip().split()
- OPTIONS.retrofit_dap = info.get("dynamic_partition_retrofit") == "true"
- OPTIONS.build_super = info.get("build_super_partition") == "true"
- OPTIONS.sparse_userimages = bool(info.get("extfs_sparse_flag"))
+ OPTIONS.put_super = info.get('super_image_in_update_package') == 'true'
+ OPTIONS.dynamic_partition_list = info.get('dynamic_partition_list',
+ '').strip().split()
+ OPTIONS.super_device_list = info.get('super_block_devices',
+ '').strip().split()
+ OPTIONS.retrofit_dap = info.get('dynamic_partition_retrofit') == 'true'
+ OPTIONS.build_super = info.get('build_super_partition') == 'true'
+ OPTIONS.sparse_userimages = bool(info.get('extfs_sparse_flag'))
-def CopyInfo(input_tmp, output_zip):
- """Copy the android-info.txt file from the input to the output."""
- common.ZipWrite(
- output_zip, os.path.join(input_tmp, "OTA", "android-info.txt"),
- "android-info.txt")
-
-
-def CopyUserImages(input_tmp, output_zip):
- """
- Copy user images from the unzipped input and write to output_zip.
+def CopyZipEntries(input_file, output_file, entries):
+ """Copies ZIP entries between input and output files.
Args:
- input_tmp: path to the unzipped input.
- output_zip: a ZipFile instance to write images to.
+ input_file: Path to the input target_files zip.
+ output_file: Output filename.
+ entries: A list of entries to copy, in a format that's accepted by zip2zip
+ (e.g. 'OTA/android-info.txt:android-info.txt', which copies
+ `OTA/android-info.txt` from input_file into output_file as
+ `android-info.txt`. Refer to the `filespec` arg in zip2zip's help
+ message).
"""
- dynamic_images = [p + ".img" for p in OPTIONS.dynamic_partition_list]
+ logger.info('Writing %d entries to archive...', len(entries))
+ cmd = ['zip2zip', '-i', input_file, '-o', output_file]
+ cmd.extend(entries)
+ common.RunAndCheckOutput(cmd)
+
+
+def EntriesForUserImages(input_file):
+ """Returns the user images entries to be copied.
+
+ Args:
+ input_file: Path to the input target_files zip file.
+ """
+ dynamic_images = [p + '.img' for p in OPTIONS.dynamic_partition_list]
# Filter out system_other for launch DAP devices because it is in super image.
- if not OPTIONS.retrofit_dap and "system" in OPTIONS.dynamic_partition_list:
- dynamic_images.append("system_other.img")
+ if not OPTIONS.retrofit_dap and 'system' in OPTIONS.dynamic_partition_list:
+ dynamic_images.append('system_other.img')
- images_path = os.path.join(input_tmp, "IMAGES")
- # A target-files zip must contain the images since Lollipop.
- assert os.path.exists(images_path)
- for image in sorted(os.listdir(images_path)):
- if OPTIONS.bootable_only and image not in ("boot.img", "recovery.img"):
+ entries = [
+ 'OTA/android-info.txt:android-info.txt',
+ ]
+ with zipfile.ZipFile(input_file) as input_zip:
+ namelist = input_zip.namelist()
+
+ for image_path in [name for name in namelist if name.startswith('IMAGES/')]:
+ image = os.path.basename(image_path)
+ if OPTIONS.bootable_only and image not in ('boot.img', 'recovery.img'):
continue
- if not image.endswith(".img"):
+ if not image.endswith('.img'):
continue
- if image == "recovery-two-step.img":
- continue
+ # Filter out super_empty and the images that are already in super partition.
if OPTIONS.put_super:
- if image == "super_empty.img":
+ if image == 'super_empty.img':
continue
if image in dynamic_images:
continue
- logger.info("writing %s to archive...", os.path.join("IMAGES", image))
- common.ZipWrite(output_zip, os.path.join(images_path, image), image)
+ entries.append('{}:{}'.format(image_path, image))
+ return entries
-def WriteSuperImages(input_tmp, output_zip):
- """
- Write super images from the unzipped input and write to output_zip. This is
- only done if super_image_in_update_package is set to "true".
+def EntriesForSplitSuperImages(input_file):
+ """Returns the entries for split super images.
- - For retrofit dynamic partition devices, copy split super images from target
- files package.
- - For devices launched with dynamic partitions, build super image from target
- files package.
+ This is only done for retrofit dynamic partition devices.
Args:
- input_tmp: path to the unzipped input.
- output_zip: a ZipFile instance to write images to.
+ input_file: Path to the input target_files zip file.
"""
- if not OPTIONS.build_super or not OPTIONS.put_super:
- return
+ with zipfile.ZipFile(input_file) as input_zip:
+ namelist = input_zip.namelist()
+ entries = []
+ for device in OPTIONS.super_device_list:
+ image = 'OTA/super_{}.img'.format(device)
+ assert image in namelist, 'Failed to find {}'.format(image)
+ entries.append('{}:{}'.format(image, os.path.basename(image)))
+ return entries
- if OPTIONS.retrofit_dap:
- # retrofit devices already have split super images under OTA/
- images_path = os.path.join(input_tmp, "OTA")
- for device in OPTIONS.super_device_list:
- image = "super_%s.img" % device
- image_path = os.path.join(images_path, image)
- assert os.path.exists(image_path)
- logger.info("writing %s to archive...", os.path.join("OTA", image))
- common.ZipWrite(output_zip, image_path, image)
- else:
- # super image for non-retrofit devices aren't in target files package,
- # so build it.
- super_file = common.MakeTempFile("super_", ".img")
- logger.info("building super image %s...", super_file)
- BuildSuperImage(input_tmp, super_file)
- logger.info("writing super.img to archive...")
- common.ZipWrite(output_zip, super_file, "super.img")
+
+def RebuildAndWriteSuperImages(input_file, output_file):
+ """Builds and writes super images to the output file."""
+ logger.info('Building super image...')
+
+ # We need files under IMAGES/, OTA/, META/ for img_from_target_files.py.
+ # However, common.LoadInfoDict() may read additional files under BOOT/,
+ # RECOVERY/ and ROOT/. So unzip everything from the target_files.zip.
+ input_tmp = common.UnzipTemp(input_file)
+
+ super_file = common.MakeTempFile('super_', '.img')
+ BuildSuperImage(input_tmp, super_file)
+
+ logger.info('Writing super.img to archive...')
+ with zipfile.ZipFile(
+ output_file, 'a', compression=zipfile.ZIP_DEFLATED,
+ allowZip64=not OPTIONS.sparse_userimages) as output_zip:
+ common.ZipWrite(output_zip, super_file, 'super.img')
+
+
+def ImgFromTargetFiles(input_file, output_file):
+ """Creates an image archive from the input target_files zip.
+
+ Args:
+ input_file: Path to the input target_files zip.
+ output_file: Output filename.
+
+ Raises:
+ ValueError: On invalid input.
+ """
+ if not zipfile.is_zipfile(input_file):
+ raise ValueError('%s is not a valid zipfile' % input_file)
+
+ logger.info('Building image zip from target files zip.')
+
+ LoadOptions(input_file)
+
+ # Entries to be copied into the output file.
+ entries = EntriesForUserImages(input_file)
+
+ # Only for devices that retrofit dynamic partitions there're split super
+ # images available in the target_files.zip.
+ rebuild_super = False
+ if OPTIONS.build_super and OPTIONS.put_super:
+ if OPTIONS.retrofit_dap:
+ entries += EntriesForSplitSuperImages(input_file)
+ else:
+ rebuild_super = True
+
+ # Any additional entries provided by caller.
+ entries += OPTIONS.additional_entries
+
+ CopyZipEntries(input_file, output_file, entries)
+
+ if rebuild_super:
+ RebuildAndWriteSuperImages(input_file, output_file)
def main(argv):
- # This allows modifying the value from inner function.
- bootable_only_array = [False]
- def option_handler(o, _):
- if o in ("-z", "--bootable_zip"):
- bootable_only_array[0] = True
+ def option_handler(o, a):
+ if o in ('-z', '--bootable_zip'):
+ OPTIONS.bootable_only = True
+ elif o == '--additional':
+ OPTIONS.additional_entries.append(a)
else:
return False
return True
args = common.ParseOptions(argv, __doc__,
- extra_opts="z",
- extra_long_opts=["bootable_zip"],
+ extra_opts='z',
+ extra_long_opts=[
+ 'additional=',
+ 'bootable_zip',
+ ],
extra_option_handler=option_handler)
-
- OPTIONS.bootable_only = bootable_only_array[0]
-
if len(args) != 2:
common.Usage(__doc__)
sys.exit(1)
common.InitLogging()
- target_files = args[0]
- if os.path.isdir(target_files):
- logger.info("Building image zip from extracted target files.")
- OPTIONS.input_tmp = target_files
- elif zipfile.is_zipfile(target_files):
- logger.info("Building image zip from target files zip.")
- # We need files under IMAGES/, OTA/, META/ for img_from_target_files.py.
- # However, common.LoadInfoDict() may read additional files under BOOT/,
- # RECOVERY/ and ROOT/. So unzip everything from the target_files.zip.
- OPTIONS.input_tmp = common.UnzipTemp(target_files)
- else:
- raise ValueError("%s is not a valid path." % target_files)
+ ImgFromTargetFiles(args[0], args[1])
- LoadOptions(OPTIONS.input_tmp)
- output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED,
- allowZip64=not OPTIONS.sparse_userimages)
-
- try:
- CopyInfo(OPTIONS.input_tmp, output_zip)
- CopyUserImages(OPTIONS.input_tmp, output_zip)
- WriteSuperImages(OPTIONS.input_tmp, output_zip)
- finally:
- logger.info("cleaning up...")
- common.ZipClose(output_zip)
-
- logger.info("done.")
+ logger.info('done.')
if __name__ == '__main__':
@@ -207,7 +245,7 @@
common.CloseInheritedPipes()
main(sys.argv[1:])
except common.ExternalError as e:
- logger.exception("\n ERROR:\n")
+ logger.exception('\n ERROR:\n')
sys.exit(1)
finally:
common.Cleanup()
diff --git a/tools/releasetools/make_recovery_patch b/tools/releasetools/make_recovery_patch
deleted file mode 120000
index 45cec08..0000000
--- a/tools/releasetools/make_recovery_patch
+++ /dev/null
@@ -1 +0,0 @@
-make_recovery_patch.py
\ No newline at end of file
diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py
old mode 100755
new mode 100644
diff --git a/tools/releasetools/merge_builds.py b/tools/releasetools/merge_builds.py
new file mode 100644
index 0000000..ca348cf
--- /dev/null
+++ b/tools/releasetools/merge_builds.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+"""Merges two non-dist partial builds together.
+
+Given two partial builds, a framework build and a vendor build, merge the builds
+together so that the images can be flashed using 'fastboot flashall'.
+
+To support both DAP and non-DAP vendor builds with a single framework partial
+build, the framework partial build should always be built with DAP enabled. The
+vendor partial build determines whether the merged result supports DAP.
+
+This script does not require builds to be built with 'make dist'.
+This script regenerates super_empty.img and vbmeta.img if necessary. Other
+images are assumed to not require regeneration.
+
+Usage: merge_builds.py [args]
+
+ --framework_images comma_separated_image_list
+ Comma-separated list of image names that should come from the framework
+ build.
+
+ --product_out_framework product_out_framework_path
+ Path to out/target/product/<framework build>.
+
+ --product_out_vendor product_out_vendor_path
+ Path to out/target/product/<vendor build>.
+
+ --build_vbmeta
+ If provided, vbmeta.img will be regenerated in out/target/product/<vendor
+ build>.
+
+ --framework_misc_info_keys
+ The optional path to a newline-separated config file containing keys to
+ obtain from the framework instance of misc_info.txt, used for creating
+ vbmeta.img. The remaining keys come from the vendor instance.
+"""
+from __future__ import print_function
+
+import logging
+import os
+import sys
+
+import build_super_image
+import common
+
+logger = logging.getLogger(__name__)
+
+OPTIONS = common.OPTIONS
+OPTIONS.framework_images = ("system",)
+OPTIONS.product_out_framework = None
+OPTIONS.product_out_vendor = None
+OPTIONS.build_vbmeta = False
+OPTIONS.framework_misc_info_keys = None
+
+
+def CreateImageSymlinks():
+ for image in OPTIONS.framework_images:
+ image_path = os.path.join(OPTIONS.product_out_framework, "%s.img" % image)
+ symlink_path = os.path.join(OPTIONS.product_out_vendor, "%s.img" % image)
+ if os.path.exists(symlink_path):
+ if os.path.islink(symlink_path):
+ os.remove(symlink_path)
+ else:
+ raise ValueError("Attempting to overwrite built image: %s" %
+ symlink_path)
+ os.symlink(image_path, symlink_path)
+
+
+def BuildSuperEmpty():
+ framework_dict = common.LoadDictionaryFromFile(
+ os.path.join(OPTIONS.product_out_framework, "misc_info.txt"))
+ vendor_dict = common.LoadDictionaryFromFile(
+ os.path.join(OPTIONS.product_out_vendor, "misc_info.txt"))
+ # Regenerate super_empty.img if both partial builds enable DAP. If only the
+ # the vendor build enables DAP, the vendor build's existing super_empty.img
+ # will be reused. If only the framework build should enable DAP, super_empty
+ # should be included in the --framework_images flag to copy the existing
+ # super_empty.img from the framework build.
+ if (framework_dict.get("use_dynamic_partitions") == "true") and (
+ vendor_dict.get("use_dynamic_partitions") == "true"):
+ logger.info("Building super_empty.img.")
+ merged_dict = dict(vendor_dict)
+ merged_dict.update(
+ common.MergeDynamicPartitionInfoDicts(
+ framework_dict=framework_dict,
+ vendor_dict=vendor_dict,
+ size_prefix="super_",
+ size_suffix="_group_size",
+ list_prefix="super_",
+ list_suffix="_partition_list"))
+ output_super_empty_path = os.path.join(OPTIONS.product_out_vendor,
+ "super_empty.img")
+ build_super_image.BuildSuperImage(merged_dict, output_super_empty_path)
+
+
+def BuildVBMeta():
+ logger.info("Building vbmeta.img.")
+
+ framework_dict = common.LoadDictionaryFromFile(
+ os.path.join(OPTIONS.product_out_framework, "misc_info.txt"))
+ vendor_dict = common.LoadDictionaryFromFile(
+ os.path.join(OPTIONS.product_out_vendor, "misc_info.txt"))
+ merged_dict = dict(vendor_dict)
+ if OPTIONS.framework_misc_info_keys:
+ for key in common.LoadListFromFile(OPTIONS.framework_misc_info_keys):
+ merged_dict[key] = framework_dict[key]
+
+ # Build vbmeta.img using partitions in product_out_vendor.
+ partitions = {}
+ for partition in common.AVB_PARTITIONS:
+ partition_path = os.path.join(OPTIONS.product_out_vendor,
+ "%s.img" % partition)
+ if os.path.exists(partition_path):
+ partitions[partition] = partition_path
+
+ # vbmeta_partitions includes the partitions that should be included into
+ # top-level vbmeta.img, which are the ones that are not included in any
+ # chained VBMeta image plus the chained VBMeta images themselves.
+ vbmeta_partitions = common.AVB_PARTITIONS[:]
+ for partition in common.AVB_VBMETA_PARTITIONS:
+ chained_partitions = merged_dict.get("avb_%s" % partition, "").strip()
+ if chained_partitions:
+ partitions[partition] = os.path.join(OPTIONS.product_out_vendor,
+ "%s.img" % partition)
+ vbmeta_partitions = [
+ item for item in vbmeta_partitions
+ if item not in chained_partitions.split()
+ ]
+ vbmeta_partitions.append(partition)
+
+ output_vbmeta_path = os.path.join(OPTIONS.product_out_vendor, "vbmeta.img")
+ OPTIONS.info_dict = merged_dict
+ common.BuildVBMeta(output_vbmeta_path, partitions, "vbmeta",
+ vbmeta_partitions)
+
+
+def MergeBuilds():
+ CreateImageSymlinks()
+ BuildSuperEmpty()
+ if OPTIONS.build_vbmeta:
+ BuildVBMeta()
+
+
+def main():
+ common.InitLogging()
+
+ def option_handler(o, a):
+ if o == "--framework_images":
+ OPTIONS.framework_images = [i.strip() for i in a.split(",")]
+ elif o == "--product_out_framework":
+ OPTIONS.product_out_framework = a
+ elif o == "--product_out_vendor":
+ OPTIONS.product_out_vendor = a
+ elif o == "--build_vbmeta":
+ OPTIONS.build_vbmeta = True
+ elif o == "--framework_misc_info_keys":
+ OPTIONS.framework_misc_info_keys = a
+ else:
+ return False
+ return True
+
+ args = common.ParseOptions(
+ sys.argv[1:],
+ __doc__,
+ extra_long_opts=[
+ "framework_images=",
+ "product_out_framework=",
+ "product_out_vendor=",
+ "build_vbmeta",
+ "framework_misc_info_keys=",
+ ],
+ extra_option_handler=option_handler)
+
+ if (args or OPTIONS.product_out_framework is None or
+ OPTIONS.product_out_vendor is None):
+ common.Usage(__doc__)
+ sys.exit(1)
+
+ MergeBuilds()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py
index a9e9151..61c4f4e 100755
--- a/tools/releasetools/merge_target_files.py
+++ b/tools/releasetools/merge_target_files.py
@@ -13,6 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
+#
"""This script merges two partial target files packages.
One package contains framework files, and the other contains vendor files.
@@ -86,13 +87,14 @@
import add_img_to_target_files
import build_super_image
+import check_target_files_vintf
import common
import img_from_target_files
import ota_from_target_files
logger = logging.getLogger(__name__)
+
OPTIONS = common.OPTIONS
-OPTIONS.verbose = True
OPTIONS.framework_target_files = None
OPTIONS.framework_item_list = None
OPTIONS.framework_misc_info_keys = None
@@ -116,8 +118,6 @@
'META/apkcerts.txt',
'META/filesystem_config.txt',
'META/root_filesystem_config.txt',
- 'META/system_manifest.xml',
- 'META/system_matrix.xml',
'META/update_engine_config.txt',
'PRODUCT/*',
'ROOT/*',
@@ -143,8 +143,8 @@
'avb_system_rollback_index_location',
'avb_product_hashtree_enable',
'avb_product_add_hashtree_footer_args',
- 'avb_product_services_hashtree_enable',
- 'avb_product_services_add_hashtree_footer_args',
+ 'avb_system_ext_hashtree_enable',
+ 'avb_system_ext_add_hashtree_footer_args',
'system_root_image',
'root_dir',
'ab_update',
@@ -162,8 +162,6 @@
'META/otakeys.txt',
'META/releasetools.py',
'META/vendor_filesystem_config.txt',
- 'META/vendor_manifest.xml',
- 'META/vendor_matrix.xml',
'BOOT/*',
'DATA/*',
'ODM/*',
@@ -190,7 +188,7 @@
'DATA/',
'ODM/',
'PRODUCT/',
- 'PRODUCT_SERVICES/',
+ 'SYSTEM_EXT/',
'RADIO/',
'RECOVERY/',
'ROOT/',
@@ -201,10 +199,10 @@
def write_sorted_data(data, path):
- """Write the sorted contents of either a list or dict to file.
+ """Writes the sorted contents of either a list or dict to file.
- This function sorts the contents of the list or dict and then
- writes the resulting sorted contents to a file specified by path.
+ This function sorts the contents of the list or dict and then writes the
+ resulting sorted contents to a file specified by path.
Args:
data: The list or dict to sort and write.
@@ -219,7 +217,7 @@
def extract_items(target_files, target_files_temp_dir, extract_item_list):
- """Extract items from target files to temporary directory.
+ """Extracts items from target files to temporary directory.
This function extracts from the specified target files zip archive into the
specified temporary directory, the items specified in the extract item list.
@@ -236,8 +234,7 @@
# Filter the extract_item_list to remove any items that do not exist in the
# zip file. Otherwise, the extraction step will fail.
- with zipfile.ZipFile(
- target_files, 'r', allowZip64=True) as target_files_zipfile:
+ with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zipfile:
target_files_namelist = target_files_zipfile.namelist()
filtered_extract_item_list = []
@@ -279,21 +276,6 @@
shutil.copyfile(original_file_path, copied_file_path)
-def read_config_list(config_file_path):
- """Reads a config file into a list of strings.
-
- Expects the file to be newline-separated.
-
- Args:
- config_file_path: The path to the config file to open and read.
-
- Returns:
- The list of strings in the config file.
- """
- with open(config_file_path) as config_file:
- return config_file.read().splitlines()
-
-
def validate_config_lists(framework_item_list, framework_misc_info_keys,
vendor_item_list):
"""Performs validations on the merge config lists.
@@ -334,9 +316,9 @@
in_vendor = any(item.startswith(partition) for item in vendor_item_list)
if in_framework and in_vendor:
logger.error(
- 'Cannot extract items from {0} for both the framework and vendor'
+ 'Cannot extract items from %s for both the framework and vendor'
' builds. Please ensure only one merge config item list'
- ' includes {0}.'.format(partition))
+ ' includes %s.', partition, partition)
has_error = True
if ('dynamic_partition_list' in framework_misc_info_keys) or (
@@ -351,15 +333,14 @@
def process_ab_partitions_txt(framework_target_files_temp_dir,
vendor_target_files_temp_dir,
output_target_files_temp_dir):
- """Perform special processing for META/ab_partitions.txt.
+ """Performs special processing for META/ab_partitions.txt.
- This function merges the contents of the META/ab_partitions.txt files from
- the framework directory and the vendor directory, placing the merged result in
- the output directory. The precondition in that the files are already
- extracted. The post condition is that the output META/ab_partitions.txt
- contains the merged content. The format for each ab_partitions.txt a one
- partition name per line. The output file contains the union of the parition
- names.
+ This function merges the contents of the META/ab_partitions.txt files from the
+ framework directory and the vendor directory, placing the merged result in the
+ output directory. The precondition in that the files are already extracted.
+ The post condition is that the output META/ab_partitions.txt contains the
+ merged content. The format for each ab_partitions.txt a one partition name per
+ line. The output file contains the union of the parition names.
Args:
framework_target_files_temp_dir: The name of a directory containing the
@@ -392,10 +373,10 @@
def append_recovery_to_filesystem_config(output_target_files_temp_dir):
- """Perform special processing for META/filesystem_config.txt.
+ """Performs special processing for META/filesystem_config.txt.
- This function appends recovery information to META/filesystem_config.txt
- so that recovery patch regeneration will succeed.
+ This function appends recovery information to META/filesystem_config.txt so
+ that recovery patch regeneration will succeed.
Args:
output_target_files_temp_dir: The name of a directory that will be used to
@@ -417,69 +398,11 @@
'selabel=u:object_r:install_recovery_exec:s0 capabilities=0x0\n')
-def merge_dynamic_partition_info_dicts(framework_dict,
- vendor_dict,
- include_dynamic_partition_list=True,
- size_prefix='',
- size_suffix='',
- list_prefix='',
- list_suffix=''):
- """Merges dynamic partition info variables.
-
- Args:
- framework_dict: The dictionary of dynamic partition info variables from the
- partial framework target files.
- vendor_dict: The dictionary of dynamic partition info variables from the
- partial vendor target files.
- include_dynamic_partition_list: If true, merges the dynamic_partition_list
- variable. Not all use cases need this variable merged.
- size_prefix: The prefix in partition group size variables that precedes the
- name of the partition group. For example, partition group 'group_a' with
- corresponding size variable 'super_group_a_group_size' would have the
- size_prefix 'super_'.
- size_suffix: Similar to size_prefix but for the variable's suffix. For
- example, 'super_group_a_group_size' would have size_suffix '_group_size'.
- list_prefix: Similar to size_prefix but for the partition group's
- partition_list variable.
- list_suffix: Similar to size_suffix but for the partition group's
- partition_list variable.
-
- Returns:
- The merged dynamic partition info dictionary.
- """
- merged_dict = {}
- # Partition groups and group sizes are defined by the vendor dict because
- # these values may vary for each board that uses a shared system image.
- merged_dict['super_partition_groups'] = vendor_dict['super_partition_groups']
- if include_dynamic_partition_list:
- framework_dynamic_partition_list = framework_dict.get(
- 'dynamic_partition_list', '')
- vendor_dynamic_partition_list = vendor_dict.get('dynamic_partition_list',
- '')
- merged_dict['dynamic_partition_list'] = (
- '%s %s' % (framework_dynamic_partition_list,
- vendor_dynamic_partition_list)).strip()
- for partition_group in merged_dict['super_partition_groups'].split(' '):
- # Set the partition group's size using the value from the vendor dict.
- key = '%s%s%s' % (size_prefix, partition_group, size_suffix)
- if key not in vendor_dict:
- raise ValueError('Vendor dict does not contain required key %s.' % key)
- merged_dict[key] = vendor_dict[key]
-
- # Set the partition group's partition list using a concatenation of the
- # framework and vendor partition lists.
- key = '%s%s%s' % (list_prefix, partition_group, list_suffix)
- merged_dict[key] = (
- '%s %s' %
- (framework_dict.get(key, ''), vendor_dict.get(key, ''))).strip()
- return merged_dict
-
-
def process_misc_info_txt(framework_target_files_temp_dir,
vendor_target_files_temp_dir,
output_target_files_temp_dir,
framework_misc_info_keys):
- """Perform special processing for META/misc_info.txt.
+ """Performs special processing for META/misc_info.txt.
This function merges the contents of the META/misc_info.txt files from the
framework directory and the vendor directory, placing the merged result in the
@@ -518,7 +441,7 @@
# Merge misc info keys used for Dynamic Partitions.
if (merged_dict.get('use_dynamic_partitions') == 'true') and (
framework_dict.get('use_dynamic_partitions') == 'true'):
- merged_dynamic_partitions_dict = merge_dynamic_partition_info_dicts(
+ merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
framework_dict=framework_dict,
vendor_dict=merged_dict,
size_prefix='super_',
@@ -551,7 +474,7 @@
def process_dynamic_partitions_info_txt(framework_target_files_dir,
vendor_target_files_dir,
output_target_files_dir):
- """Perform special processing for META/dynamic_partitions_info.txt.
+ """Performs special processing for META/dynamic_partitions_info.txt.
This function merges the contents of the META/dynamic_partitions_info.txt
files from the framework directory and the vendor directory, placing the
@@ -581,7 +504,7 @@
vendor_dynamic_partitions_dict = common.LoadDictionaryFromFile(
os.path.join(vendor_target_files_dir, *dynamic_partitions_info_path))
- merged_dynamic_partitions_dict = merge_dynamic_partition_info_dicts(
+ merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
framework_dict=framework_dynamic_partitions_dict,
vendor_dict=vendor_dynamic_partitions_dict,
# META/dynamic_partitions_info.txt does not use dynamic_partition_list.
@@ -599,14 +522,13 @@
def process_apex_keys_apk_certs_common(framework_target_files_dir,
vendor_target_files_dir,
output_target_files_dir, file_name):
- """Perform special processing for META/apexkeys.txt or META/apkcerts.txt.
+ """Performs special processing for META/apexkeys.txt or META/apkcerts.txt.
This function merges the contents of the META/apexkeys.txt or
- META/apkcerts.txt files from the framework directory and the vendor
- directory, placing the merged result in the output directory. The
- precondition in that the files are already extracted. The post condition
- is that the output META/apexkeys.txt or META/apkcerts.txt contains the
- merged content.
+ META/apkcerts.txt files from the framework directory and the vendor directory,
+ placing the merged result in the output directory. The precondition in that
+ the files are already extracted. The post condition is that the output
+ META/apexkeys.txt or META/apkcerts.txt contains the merged content.
Args:
framework_target_files_dir: The name of a directory containing the special
@@ -673,7 +595,7 @@
vendor_target_files_temp_dir,
output_target_files_temp_dir,
framework_misc_info_keys, rebuild_recovery):
- """Perform special-case processing for certain target files items.
+ """Performs special-case processing for certain target files items.
Certain files in the output target files package require special-case
processing. This function performs all that special-case processing.
@@ -732,16 +654,30 @@
file_name='apexkeys.txt')
-def merge_target_files(temp_dir, framework_target_files, framework_item_list,
- framework_misc_info_keys, vendor_target_files,
- vendor_item_list, output_target_files, output_dir,
- output_item_list, output_ota, output_img,
- output_super_empty, rebuild_recovery):
- """Merge two target files packages together.
+def files_from_path(target_path, extra_args=None):
+ """Gets files under given path.
- This function takes framework and vendor target files packages as input,
- performs various file extractions, special case processing, and finally
- creates a merged zip archive as output.
+ Get (sub)files from given target path and return sorted list.
+
+ Args:
+ target_path: Target path to get subfiles.
+ extra_args: List of extra argument for find command. Optional.
+
+ Returns:
+ Sorted files and directories list.
+ """
+
+ find_command = ['find', target_path] + (extra_args or [])
+ find_process = common.Run(find_command, stdout=subprocess.PIPE, verbose=False)
+ return common.RunAndCheckOutput(['sort'],
+ stdin=find_process.stdout,
+ verbose=False)
+
+
+def create_merged_package(temp_dir, framework_target_files, framework_item_list,
+ vendor_target_files, vendor_item_list,
+ framework_misc_info_keys, rebuild_recovery):
+ """Merges two target files packages into one target files structure.
Args:
temp_dir: The name of a directory we use when we extract items from the
@@ -753,29 +689,21 @@
target files package as is, meaning these items will land in the output
target files package exactly as they appear in the input partial framework
target files package.
- framework_misc_info_keys: The list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys from the vendor
- instance.
vendor_target_files: The name of the zip archive containing the vendor
partial target files package.
vendor_item_list: The list of items to extract from the partial vendor
target files package as is, meaning these items will land in the output
target files package exactly as they appear in the input partial vendor
target files package.
- output_target_files: The name of the output zip archive target files package
- created by merging framework and vendor.
- output_dir: The destination directory for saving merged files.
- output_item_list: The list of items to copy into the output_dir.
- output_ota: The name of the output zip archive ota package.
- output_img: The name of the output zip archive img package.
- output_super_empty: If provided, creates a super_empty.img file from the
- merged target files package and saves it at this path.
+ framework_misc_info_keys: The list of keys to obtain from the framework
+ instance of META/misc_info.txt. The remaining keys from the vendor
+ instance.
rebuild_recovery: If true, rebuild the recovery patch used by non-A/B
devices and write it to the system image.
- """
- logger.info('starting: merge framework %s and vendor %s into output %s',
- framework_target_files, vendor_target_files, output_target_files)
+ Returns:
+ Path to merged package under temp directory.
+ """
# Create directory names that we'll use when we extract files from framework,
# and vendor, and for zipping the final output.
@@ -832,19 +760,44 @@
framework_misc_info_keys=framework_misc_info_keys,
rebuild_recovery=rebuild_recovery)
- # Regenerate IMAGES in the temporary directory.
+ return output_target_files_temp_dir
+
+
+def generate_images(target_files_dir, rebuild_recovery):
+ """Generate images from target files.
+
+ This function takes merged output temporary directory and create images
+ from it.
+
+ Args:
+ target_files_dir: Path to merged temp directory.
+ rebuild_recovery: If true, rebuild the recovery patch used by non-A/B
+ devices and write it to the system image.
+ """
+
+ # Regenerate IMAGES in the target directory.
add_img_args = ['--verbose']
+ add_img_args.append('--add_missing')
if rebuild_recovery:
add_img_args.append('--rebuild_recovery')
- add_img_args.append(output_target_files_temp_dir)
+ add_img_args.append(target_files_dir)
add_img_to_target_files.main(add_img_args)
+
+def generate_super_empty_image(target_dir, output_super_empty):
+ """Generates super_empty image from target package.
+
+ Args:
+ target_dir: Path to the target file package which contains misc_info.txt for
+ detailed information for super image.
+ output_super_empty: If provided, copies a super_empty.img file from the
+ target files package to this path.
+ """
# Create super_empty.img using the merged misc_info.txt.
- misc_info_txt = os.path.join(output_target_files_temp_dir, 'META',
- 'misc_info.txt')
+ misc_info_txt = os.path.join(target_dir, 'META', 'misc_info.txt')
use_dynamic_partitions = common.LoadDictionaryFromFile(misc_info_txt).get(
'use_dynamic_partitions')
@@ -853,8 +806,7 @@
raise ValueError(
'Building super_empty.img requires use_dynamic_partitions=true.')
elif use_dynamic_partitions == 'true':
- super_empty_img = os.path.join(output_target_files_temp_dir, 'IMAGES',
- 'super_empty.img')
+ super_empty_img = os.path.join(target_dir, 'IMAGES', 'super_empty.img')
build_super_image_args = [
misc_info_txt,
super_empty_img,
@@ -865,15 +817,107 @@
if output_super_empty:
shutil.copyfile(super_empty_img, output_super_empty)
- # Create the IMG package from the merged target files (before zipping, in
- # order to avoid an unnecessary unzip and copy).
+
+def create_target_files_archive(output_file, source_dir, temp_dir):
+ """Creates archive from target package.
+
+ Args:
+ output_file: The name of the zip archive target files package.
+ source_dir: The target directory contains package to be archived.
+ temp_dir: Path to temporary directory for any intermediate files.
+ """
+ output_target_files_list = os.path.join(temp_dir, 'output.list')
+ output_zip = os.path.abspath(output_file)
+ output_target_files_meta_dir = os.path.join(source_dir, 'META')
+
+ meta_content = files_from_path(output_target_files_meta_dir)
+ other_content = files_from_path(
+ source_dir,
+ ['-path', output_target_files_meta_dir, '-prune', '-o', '-print'])
+
+ with open(output_target_files_list, 'w') as f:
+ f.write(meta_content)
+ f.write(other_content)
+
+ command = [
+ 'soong_zip',
+ '-d',
+ '-o',
+ output_zip,
+ '-C',
+ source_dir,
+ '-l',
+ output_target_files_list,
+ ]
+
+ logger.info('creating %s', output_file)
+ common.RunAndWait(command, verbose=True)
+ logger.info('finished creating %s', output_file)
+
+ return output_zip
+
+
+def merge_target_files(temp_dir, framework_target_files, framework_item_list,
+ framework_misc_info_keys, vendor_target_files,
+ vendor_item_list, output_target_files, output_dir,
+ output_item_list, output_ota, output_img,
+ output_super_empty, rebuild_recovery):
+ """Merges two target files packages together.
+
+ This function takes framework and vendor target files packages as input,
+ performs various file extractions, special case processing, and finally
+ creates a merged zip archive as output.
+
+ Args:
+ temp_dir: The name of a directory we use when we extract items from the
+ input target files packages, and also a scratch directory that we use for
+ temporary files.
+ framework_target_files: The name of the zip archive containing the framework
+ partial target files package.
+ framework_item_list: The list of items to extract from the partial framework
+ target files package as is, meaning these items will land in the output
+ target files package exactly as they appear in the input partial framework
+ target files package.
+ framework_misc_info_keys: The list of keys to obtain from the framework
+ instance of META/misc_info.txt. The remaining keys from the vendor
+ instance.
+ vendor_target_files: The name of the zip archive containing the vendor
+ partial target files package.
+ vendor_item_list: The list of items to extract from the partial vendor
+ target files package as is, meaning these items will land in the output
+ target files package exactly as they appear in the input partial vendor
+ target files package.
+ output_target_files: The name of the output zip archive target files package
+ created by merging framework and vendor.
+ output_dir: The destination directory for saving merged files.
+ output_item_list: The list of items to copy into the output_dir.
+ output_ota: The name of the output zip archive ota package.
+ output_img: The name of the output zip archive img package.
+ output_super_empty: If provided, creates a super_empty.img file from the
+ merged target files package and saves it at this path.
+ rebuild_recovery: If true, rebuild the recovery patch used by non-A/B
+ devices and write it to the system image.
+ """
+
+ logger.info('starting: merge framework %s and vendor %s into output %s',
+ framework_target_files, vendor_target_files, output_target_files)
+
+ output_target_files_temp_dir = create_merged_package(
+ temp_dir, framework_target_files, framework_item_list,
+ vendor_target_files, vendor_item_list, framework_misc_info_keys,
+ rebuild_recovery)
+
+ if not check_target_files_vintf.CheckVintf(output_target_files_temp_dir):
+ raise RuntimeError("Incompatible VINTF metadata")
+
+ generate_images(output_target_files_temp_dir, rebuild_recovery)
+
+ generate_super_empty_image(output_target_files_temp_dir, output_super_empty)
if output_img:
- img_from_target_files_args = [
- output_target_files_temp_dir,
- output_img,
- ]
- img_from_target_files.main(img_from_target_files_args)
+ # Create the IMG package from the merged target files (before zipping, in
+ # order to avoid an unnecessary unzip and copy).
+ img_from_target_files.main([output_target_files_temp_dir, output_img])
# Finally, create the output target files zip archive and/or copy the
# output items to the output target files directory.
@@ -884,59 +928,18 @@
if not output_target_files:
return
- output_zip = os.path.abspath(output_target_files)
- output_target_files_list = os.path.join(temp_dir, 'output.list')
- output_target_files_meta_dir = os.path.join(output_target_files_temp_dir,
- 'META')
-
- find_command = [
- 'find',
- output_target_files_meta_dir,
- ]
- find_process = common.Run(find_command, stdout=subprocess.PIPE, verbose=False)
- meta_content = common.RunAndCheckOutput(['sort'],
- stdin=find_process.stdout,
- verbose=False)
-
- find_command = [
- 'find', output_target_files_temp_dir, '-path',
- output_target_files_meta_dir, '-prune', '-o', '-print'
- ]
- find_process = common.Run(find_command, stdout=subprocess.PIPE, verbose=False)
- other_content = common.RunAndCheckOutput(['sort'],
- stdin=find_process.stdout,
- verbose=False)
-
- with open(output_target_files_list, 'wb') as f:
- f.write(meta_content)
- f.write(other_content)
-
- command = [
- 'soong_zip',
- '-d',
- '-o',
- output_zip,
- '-C',
- output_target_files_temp_dir,
- '-l',
- output_target_files_list,
- ]
- logger.info('creating %s', output_target_files)
- common.RunAndWait(command, verbose=True)
- logger.info('finished creating %s', output_target_files)
+ output_zip = create_target_files_archive(output_target_files,
+ output_target_files_temp_dir,
+ temp_dir)
# Create the OTA package from the merged target files package.
if output_ota:
- ota_from_target_files_args = [
- output_zip,
- output_ota,
- ]
- ota_from_target_files.main(ota_from_target_files_args)
+ ota_from_target_files.main([output_zip, output_ota])
def call_func_with_temp_dir(func, keep_tmp):
- """Manage the creation and cleanup of the temporary directory.
+ """Manages the creation and cleanup of the temporary directory.
This function calls the given function after first creating a temporary
directory. It also cleans up the temporary directory.
@@ -955,8 +958,6 @@
try:
func(temp_dir)
- except:
- raise
finally:
if keep_tmp:
logger.info('keeping %s', temp_dir)
@@ -987,10 +988,8 @@
elif o == '--framework-item-list':
OPTIONS.framework_item_list = a
elif o == '--system-misc-info-keys':
- logger.warning(
- '--system-misc-info-keys has been renamed to '
- '--framework-misc-info-keys'
- )
+ logger.warning('--system-misc-info-keys has been renamed to '
+ '--framework-misc-info-keys')
OPTIONS.framework_misc_info_keys = a
elif o == '--framework-misc-info-keys':
OPTIONS.framework_misc_info_keys = a
@@ -1050,6 +1049,7 @@
],
extra_option_handler=option_handler)
+ # pylint: disable=too-many-boolean-expressions
if (args or OPTIONS.framework_target_files is None or
OPTIONS.vendor_target_files is None or
(OPTIONS.output_target_files is None and OPTIONS.output_dir is None) or
@@ -1057,24 +1057,27 @@
common.Usage(__doc__)
sys.exit(1)
+ # Always turn on verbose logging.
+ OPTIONS.verbose = True
+
if OPTIONS.framework_item_list:
- framework_item_list = read_config_list(OPTIONS.framework_item_list)
+ framework_item_list = common.LoadListFromFile(OPTIONS.framework_item_list)
else:
framework_item_list = DEFAULT_FRAMEWORK_ITEM_LIST
if OPTIONS.framework_misc_info_keys:
- framework_misc_info_keys = read_config_list(
+ framework_misc_info_keys = common.LoadListFromFile(
OPTIONS.framework_misc_info_keys)
else:
framework_misc_info_keys = DEFAULT_FRAMEWORK_MISC_INFO_KEYS
if OPTIONS.vendor_item_list:
- vendor_item_list = read_config_list(OPTIONS.vendor_item_list)
+ vendor_item_list = common.LoadListFromFile(OPTIONS.vendor_item_list)
else:
vendor_item_list = DEFAULT_VENDOR_ITEM_LIST
if OPTIONS.output_item_list:
- output_item_list = read_config_list(OPTIONS.output_item_list)
+ output_item_list = common.LoadListFromFile(OPTIONS.output_item_list)
else:
output_item_list = None
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 0e84327..dc75ce2 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -72,7 +72,7 @@
--skip_postinstall is implied.
--skip_compatibility_check
- Skip adding the compatibility package to the generated OTA package.
+ Skip checking compatibility of the input target files package.
--output_metadata_path
Write a copy of the metadata to a separate file. Therefore, users can
@@ -139,6 +139,9 @@
A/B OTA specific options
+ --disable_fec_computation
+ Disable the on device FEC data computation for incremental updates.
+
--include_secondary
Additionally include the payload for secondary slot images (default:
False). Only meaningful when generating A/B OTAs.
@@ -181,6 +184,7 @@
from __future__ import print_function
+import collections
import logging
import multiprocessing
import os.path
@@ -188,9 +192,9 @@
import shutil
import struct
import sys
-import tempfile
import zipfile
+import check_target_files_vintf
import common
import edify_generator
import verity_utils
@@ -234,14 +238,19 @@
OPTIONS.retrofit_dynamic_partitions = False
OPTIONS.skip_compatibility_check = False
OPTIONS.output_metadata_path = None
+OPTIONS.disable_fec_computation = False
METADATA_NAME = 'META-INF/com/android/metadata'
POSTINSTALL_CONFIG = 'META/postinstall_config.txt'
DYNAMIC_PARTITION_INFO = 'META/dynamic_partitions_info.txt'
AB_PARTITIONS = 'META/ab_partitions.txt'
-UNZIP_PATTERN = ['IMAGES/*', 'META/*', 'RADIO/*']
+UNZIP_PATTERN = ['IMAGES/*', 'META/*', 'OTA/*', 'RADIO/*']
+# Files to be unzipped for target diffing purpose.
+TARGET_DIFFING_UNZIP_PATTERN = ['BOOT', 'RECOVERY', 'SYSTEM/*', 'VENDOR/*',
+ 'PRODUCT/*', 'SYSTEM_EXT/*', 'ODM/*']
RETROFIT_DAP_UNZIP_PATTERN = ['OTA/super_*.img', AB_PARTITIONS]
+SECONDARY_IMAGES_SKIP_PARTITIONS = ['odm', 'product', 'system_ext', 'vendor']
class BuildInfo(object):
@@ -276,8 +285,8 @@
_RO_PRODUCT_RESOLVE_PROPS = ["ro.product.brand", "ro.product.device",
"ro.product.manufacturer", "ro.product.model",
"ro.product.name"]
- _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER = ["product", "product_services",
- "odm", "vendor", "system"]
+ _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER = ["product", "odm", "vendor",
+ "system_ext", "system"]
def __init__(self, info_dict, oem_dicts):
"""Initializes a BuildInfo instance with the given dicts.
@@ -539,6 +548,15 @@
self.payload_properties = None
self.secondary = secondary
+ def _Run(self, cmd): # pylint: disable=no-self-use
+ # Don't pipe (buffer) the output if verbose is set. Let
+ # brillo_update_payload write to stdout/stderr directly, so its progress can
+ # be monitored.
+ if OPTIONS.verbose:
+ common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
+ else:
+ common.RunAndCheckOutput(cmd)
+
def Generate(self, target_file, source_file=None, additional_args=None):
"""Generates a payload from the given target-files zip(s).
@@ -558,8 +576,10 @@
"--target_image", target_file]
if source_file is not None:
cmd.extend(["--source_image", source_file])
+ if OPTIONS.disable_fec_computation:
+ cmd.extend(["--disable_fec_computation", "true"])
cmd.extend(additional_args)
- common.RunAndCheckOutput(cmd)
+ self._Run(cmd)
self.payload_file = payload_file
self.payload_properties = None
@@ -583,7 +603,7 @@
"--signature_size", str(payload_signer.key_size),
"--metadata_hash_file", metadata_sig_file,
"--payload_hash_file", payload_sig_file]
- common.RunAndCheckOutput(cmd)
+ self._Run(cmd)
# 2. Sign the hashes.
signed_payload_sig_file = payload_signer.Sign(payload_sig_file)
@@ -598,7 +618,7 @@
"--signature_size", str(payload_signer.key_size),
"--metadata_signature_file", signed_metadata_sig_file,
"--payload_signature_file", signed_payload_sig_file]
- common.RunAndCheckOutput(cmd)
+ self._Run(cmd)
# 4. Dump the signed payload properties.
properties_file = common.MakeTempFile(prefix="payload-properties-",
@@ -606,7 +626,7 @@
cmd = ["brillo_update_payload", "properties",
"--payload", signed_payload_file,
"--properties_file", properties_file]
- common.RunAndCheckOutput(cmd)
+ self._Run(cmd)
if self.secondary:
with open(properties_file, "a") as f:
@@ -681,13 +701,12 @@
recovery_two_step_img_name = "recovery-two-step.img"
recovery_two_step_img_path = os.path.join(
- OPTIONS.input_tmp, "IMAGES", recovery_two_step_img_name)
+ OPTIONS.input_tmp, "OTA", recovery_two_step_img_name)
if os.path.exists(recovery_two_step_img_path):
- recovery_two_step_img = common.GetBootableImage(
- recovery_two_step_img_name, recovery_two_step_img_name,
- OPTIONS.input_tmp, "RECOVERY")
- common.ZipWriteStr(
- output_zip, recovery_two_step_img_name, recovery_two_step_img.data)
+ common.ZipWrite(
+ output_zip,
+ recovery_two_step_img_path,
+ arcname=recovery_two_step_img_name)
logger.info(
"two-step package: using %s in stage 1/3", recovery_two_step_img_name)
script.WriteRawImage("/boot", recovery_two_step_img_name)
@@ -711,20 +730,15 @@
return False
-def HasVendorPartition(target_files_zip):
- return HasPartition(target_files_zip, "vendor")
+def HasTrebleEnabled(target_files, target_info):
+ def HasVendorPartition(target_files):
+ if os.path.isdir(target_files):
+ return os.path.isdir(os.path.join(target_files, "VENDOR"))
+ if zipfile.is_zipfile(target_files):
+ return HasPartition(zipfile.ZipFile(target_files), "vendor")
+ raise ValueError("Unknown target_files argument")
-
-def HasProductPartition(target_files_zip):
- return HasPartition(target_files_zip, "product")
-
-
-def HasOdmPartition(target_files_zip):
- return HasPartition(target_files_zip, "odm")
-
-
-def HasTrebleEnabled(target_files_zip, target_info):
- return (HasVendorPartition(target_files_zip) and
+ return (HasVendorPartition(target_files) and
target_info.GetBuildProp("ro.treble.enabled") == "true")
@@ -749,74 +763,23 @@
source_info.GetBuildProp("ro.build.thumbprint"))
-def AddCompatibilityArchiveIfTrebleEnabled(target_zip, output_zip, target_info,
- source_info=None):
- """Adds compatibility info into the output zip if it's Treble-enabled target.
+def CheckVintfIfTrebleEnabled(target_files, target_info):
+ """Checks compatibility info of the input target files.
- Metadata used for on-device compatibility verification is retrieved from
- target_zip then added to compatibility.zip which is added to the output_zip
- archive.
+ Metadata used for compatibility verification is retrieved from target_zip.
- Compatibility archive should only be included for devices that have enabled
+ Compatibility should only be checked for devices that have enabled
Treble support.
Args:
- target_zip: Zip file containing the source files to be included for OTA.
- output_zip: Zip file that will be sent for OTA.
+ target_files: Path to zip file containing the source files to be included
+ for OTA. Can also be the path to extracted directory.
target_info: The BuildInfo instance that holds the target build info.
- source_info: The BuildInfo instance that holds the source build info, if
- generating an incremental OTA; None otherwise.
"""
- def AddCompatibilityArchive(framework_updated, device_updated):
- """Adds compatibility info based on update status of both sides of Treble
- boundary.
-
- Args:
- framework_updated: If True, the system / product image will be updated
- and therefore their metadata should be included.
- device_updated: If True, the vendor / odm image will be updated and
- therefore their metadata should be included.
- """
- # Determine what metadata we need. Files are names relative to META/.
- compatibility_files = []
- device_metadata = ("vendor_manifest.xml", "vendor_matrix.xml")
- framework_metadata = ("system_manifest.xml", "system_matrix.xml")
- if device_updated:
- compatibility_files += device_metadata
- if framework_updated:
- compatibility_files += framework_metadata
-
- # Create new archive.
- compatibility_archive = tempfile.NamedTemporaryFile()
- compatibility_archive_zip = zipfile.ZipFile(
- compatibility_archive, "w", compression=zipfile.ZIP_DEFLATED)
-
- # Add metadata.
- for file_name in compatibility_files:
- target_file_name = "META/" + file_name
-
- if target_file_name in target_zip.namelist():
- data = target_zip.read(target_file_name)
- common.ZipWriteStr(compatibility_archive_zip, file_name, data)
-
- # Ensure files are written before we copy into output_zip.
- compatibility_archive_zip.close()
-
- # Only add the archive if we have any compatibility info.
- if compatibility_archive_zip.namelist():
- common.ZipWrite(output_zip, compatibility_archive.name,
- arcname="compatibility.zip",
- compress_type=zipfile.ZIP_STORED)
-
- def FingerprintChanged(source_fp, target_fp):
- if source_fp is None or target_fp is None:
- return True
- return source_fp != target_fp
-
# Will only proceed if the target has enabled the Treble support (as well as
# having a /vendor partition).
- if not HasTrebleEnabled(target_zip, target_info):
+ if not HasTrebleEnabled(target_files, target_info):
return
# Skip adding the compatibility package as a workaround for b/114240221. The
@@ -824,28 +787,96 @@
if OPTIONS.skip_compatibility_check:
return
- # Full OTA carries the info for system/vendor/product/odm
- if source_info is None:
- AddCompatibilityArchive(True, True)
- return
+ if not check_target_files_vintf.CheckVintf(target_files, target_info):
+ raise RuntimeError("VINTF compatibility check failed")
- source_fp = source_info.fingerprint
- target_fp = target_info.fingerprint
- system_updated = source_fp != target_fp
- # other build fingerprints could be possibly blacklisted at build time. For
- # such a case, we consider those images being changed.
- vendor_updated = FingerprintChanged(source_info.vendor_fingerprint,
- target_info.vendor_fingerprint)
- product_updated = HasProductPartition(target_zip) and \
- FingerprintChanged(source_info.product_fingerprint,
- target_info.product_fingerprint)
- odm_updated = HasOdmPartition(target_zip) and \
- FingerprintChanged(source_info.odm_fingerprint,
- target_info.odm_fingerprint)
+def GetBlockDifferences(target_zip, source_zip, target_info, source_info,
+ device_specific):
+ """Returns a ordered dict of block differences with partition name as key."""
- AddCompatibilityArchive(system_updated or product_updated,
- vendor_updated or odm_updated)
+ def GetIncrementalBlockDifferenceForPartition(name):
+ if not HasPartition(source_zip, name):
+ raise RuntimeError("can't generate incremental that adds {}".format(name))
+
+ partition_src = common.GetUserImage(name, OPTIONS.source_tmp, source_zip,
+ info_dict=source_info,
+ allow_shared_blocks=allow_shared_blocks)
+
+ hashtree_info_generator = verity_utils.CreateHashtreeInfoGenerator(
+ name, 4096, target_info)
+ partition_tgt = common.GetUserImage(name, OPTIONS.target_tmp, target_zip,
+ info_dict=target_info,
+ allow_shared_blocks=allow_shared_blocks,
+ hashtree_info_generator=
+ hashtree_info_generator)
+
+ # Check the first block of the source system partition for remount R/W only
+ # if the filesystem is ext4.
+ partition_source_info = source_info["fstab"]["/" + name]
+ check_first_block = partition_source_info.fs_type == "ext4"
+ # Disable using imgdiff for squashfs. 'imgdiff -z' expects input files to be
+ # in zip formats. However with squashfs, a) all files are compressed in LZ4;
+ # b) the blocks listed in block map may not contain all the bytes for a
+ # given file (because they're rounded to be 4K-aligned).
+ partition_target_info = target_info["fstab"]["/" + name]
+ disable_imgdiff = (partition_source_info.fs_type == "squashfs" or
+ partition_target_info.fs_type == "squashfs")
+ return common.BlockDifference(name, partition_src, partition_tgt,
+ check_first_block,
+ version=blockimgdiff_version,
+ disable_imgdiff=disable_imgdiff)
+
+ if source_zip:
+ # See notes in common.GetUserImage()
+ allow_shared_blocks = (source_info.get('ext4_share_dup_blocks') == "true" or
+ target_info.get('ext4_share_dup_blocks') == "true")
+ blockimgdiff_version = max(
+ int(i) for i in target_info.get(
+ "blockimgdiff_versions", "1").split(","))
+ assert blockimgdiff_version >= 3
+
+ block_diff_dict = collections.OrderedDict()
+ partition_names = ["system", "vendor", "product", "odm", "system_ext"]
+ for partition in partition_names:
+ if not HasPartition(target_zip, partition):
+ continue
+ # Full OTA update.
+ if not source_zip:
+ tgt = common.GetUserImage(partition, OPTIONS.input_tmp, target_zip,
+ info_dict=target_info,
+ reset_file_map=True)
+ block_diff_dict[partition] = common.BlockDifference(partition, tgt,
+ src=None)
+ # Incremental OTA update.
+ else:
+ block_diff_dict[partition] = GetIncrementalBlockDifferenceForPartition(
+ partition)
+ assert "system" in block_diff_dict
+
+ # Get the block diffs from the device specific script. If there is a
+ # duplicate block diff for a partition, ignore the diff in the generic script
+ # and use the one in the device specific script instead.
+ if source_zip:
+ device_specific_diffs = device_specific.IncrementalOTA_GetBlockDifferences()
+ function_name = "IncrementalOTA_GetBlockDifferences"
+ else:
+ device_specific_diffs = device_specific.FullOTA_GetBlockDifferences()
+ function_name = "FullOTA_GetBlockDifferences"
+
+ if device_specific_diffs:
+ assert all(isinstance(diff, common.BlockDifference)
+ for diff in device_specific_diffs), \
+ "{} is not returning a list of BlockDifference objects".format(
+ function_name)
+ for diff in device_specific_diffs:
+ if diff.partition in block_diff_dict:
+ logger.warning("Duplicate block difference found. Device specific block"
+ " diff for partition '%s' overrides the one in generic"
+ " script.", diff.partition)
+ block_diff_dict[diff.partition] = diff
+
+ return block_diff_dict
def WriteFullOTAPackage(input_zip, output_file):
@@ -889,6 +920,11 @@
target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount)
device_specific.FullOTA_Assertions()
+ block_diff_dict = GetBlockDifferences(target_zip=input_zip, source_zip=None,
+ target_info=target_info,
+ source_info=None,
+ device_specific=device_specific)
+
# Two-step package strategy (in chronological order, which is *not*
# the order in which the generated script has things):
#
@@ -940,67 +976,39 @@
device_specific.FullOTA_InstallBegin()
- system_progress = 0.75
-
+ # All other partitions as well as the data wipe use 10% of the progress, and
+ # the update of the system partition takes the remaining progress.
+ system_progress = 0.9 - (len(block_diff_dict) - 1) * 0.1
if OPTIONS.wipe_user_data:
system_progress -= 0.1
- if HasVendorPartition(input_zip):
- system_progress -= 0.1
-
- script.ShowProgress(system_progress, 0)
-
- def GetBlockDifference(partition):
- # Full OTA is done as an "incremental" against an empty source image. This
- # has the effect of writing new data from the package to the entire
- # partition, but lets us reuse the updater code that writes incrementals to
- # do it.
- tgt = common.GetUserImage(partition, OPTIONS.input_tmp, input_zip,
- info_dict=target_info,
- reset_file_map=True)
- diff = common.BlockDifference(partition, tgt, src=None)
- return diff
-
- device_specific_diffs = device_specific.FullOTA_GetBlockDifferences()
- if device_specific_diffs:
- assert all(isinstance(diff, common.BlockDifference)
- for diff in device_specific_diffs), \
- "FullOTA_GetBlockDifferences is not returning a list of " \
- "BlockDifference objects"
-
- progress_dict = dict()
- block_diffs = [GetBlockDifference("system")]
- if HasVendorPartition(input_zip):
- block_diffs.append(GetBlockDifference("vendor"))
- progress_dict["vendor"] = 0.1
- if device_specific_diffs:
- block_diffs += device_specific_diffs
+ progress_dict = {partition: 0.1 for partition in block_diff_dict}
+ progress_dict["system"] = system_progress
if target_info.get('use_dynamic_partitions') == "true":
# Use empty source_info_dict to indicate that all partitions / groups must
# be re-added.
dynamic_partitions_diff = common.DynamicPartitionsDifference(
info_dict=OPTIONS.info_dict,
- block_diffs=block_diffs,
+ block_diffs=block_diff_dict.values(),
progress_dict=progress_dict)
dynamic_partitions_diff.WriteScript(script, output_zip,
write_verify_script=OPTIONS.verify)
else:
- for block_diff in block_diffs:
+ for block_diff in block_diff_dict.values():
block_diff.WriteScript(script, output_zip,
progress=progress_dict.get(block_diff.partition),
write_verify_script=OPTIONS.verify)
- AddCompatibilityArchiveIfTrebleEnabled(input_zip, output_zip, target_info)
+ CheckVintfIfTrebleEnabled(OPTIONS.input_tmp, target_info)
boot_img = common.GetBootableImage(
"boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
common.CheckSize(boot_img.data, "boot.img", target_info)
common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
- script.ShowProgress(0.05, 5)
script.WriteRawImage("/boot", "boot.img")
- script.ShowProgress(0.2, 10)
+ script.ShowProgress(0.1, 10)
device_specific.FullOTA_InstallEnd()
if OPTIONS.extra_script is not None:
@@ -1560,69 +1568,13 @@
target_recovery = common.GetBootableImage(
"/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
- # See notes in common.GetUserImage()
- allow_shared_blocks = (source_info.get('ext4_share_dup_blocks') == "true" or
- target_info.get('ext4_share_dup_blocks') == "true")
- system_src = common.GetUserImage("system", OPTIONS.source_tmp, source_zip,
- info_dict=source_info,
- allow_shared_blocks=allow_shared_blocks)
+ block_diff_dict = GetBlockDifferences(target_zip=target_zip,
+ source_zip=source_zip,
+ target_info=target_info,
+ source_info=source_info,
+ device_specific=device_specific)
- hashtree_info_generator = verity_utils.CreateHashtreeInfoGenerator(
- "system", 4096, target_info)
- system_tgt = common.GetUserImage("system", OPTIONS.target_tmp, target_zip,
- info_dict=target_info,
- allow_shared_blocks=allow_shared_blocks,
- hashtree_info_generator=
- hashtree_info_generator)
-
- blockimgdiff_version = max(
- int(i) for i in target_info.get("blockimgdiff_versions", "1").split(","))
- assert blockimgdiff_version >= 3
-
- # Check the first block of the source system partition for remount R/W only
- # if the filesystem is ext4.
- system_src_partition = source_info["fstab"]["/system"]
- check_first_block = system_src_partition.fs_type == "ext4"
- # Disable using imgdiff for squashfs. 'imgdiff -z' expects input files to be
- # in zip formats. However with squashfs, a) all files are compressed in LZ4;
- # b) the blocks listed in block map may not contain all the bytes for a given
- # file (because they're rounded to be 4K-aligned).
- system_tgt_partition = target_info["fstab"]["/system"]
- disable_imgdiff = (system_src_partition.fs_type == "squashfs" or
- system_tgt_partition.fs_type == "squashfs")
- system_diff = common.BlockDifference("system", system_tgt, system_src,
- check_first_block,
- version=blockimgdiff_version,
- disable_imgdiff=disable_imgdiff)
-
- if HasVendorPartition(target_zip):
- if not HasVendorPartition(source_zip):
- raise RuntimeError("can't generate incremental that adds /vendor")
- vendor_src = common.GetUserImage("vendor", OPTIONS.source_tmp, source_zip,
- info_dict=source_info,
- allow_shared_blocks=allow_shared_blocks)
- hashtree_info_generator = verity_utils.CreateHashtreeInfoGenerator(
- "vendor", 4096, target_info)
- vendor_tgt = common.GetUserImage(
- "vendor", OPTIONS.target_tmp, target_zip,
- info_dict=target_info,
- allow_shared_blocks=allow_shared_blocks,
- hashtree_info_generator=hashtree_info_generator)
-
- # Check first block of vendor partition for remount R/W only if
- # disk type is ext4
- vendor_partition = source_info["fstab"]["/vendor"]
- check_first_block = vendor_partition.fs_type == "ext4"
- disable_imgdiff = vendor_partition.fs_type == "squashfs"
- vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src,
- check_first_block,
- version=blockimgdiff_version,
- disable_imgdiff=disable_imgdiff)
- else:
- vendor_diff = None
-
- AddCompatibilityArchiveIfTrebleEnabled(
- target_zip, output_zip, target_info, source_info)
+ CheckVintfIfTrebleEnabled(OPTIONS.target_tmp, target_info)
# Assertions (e.g. device properties check).
target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount)
@@ -1686,12 +1638,8 @@
WriteFingerprintAssertion(script, target_info, source_info)
# Check the required cache size (i.e. stashed blocks).
- size = []
- if system_diff:
- size.append(system_diff.required_cache)
- if vendor_diff:
- size.append(vendor_diff.required_cache)
-
+ required_cache_sizes = [diff.required_cache for diff in
+ block_diff_dict.values()]
if updating_boot:
boot_type, boot_device = common.GetTypeAndDevice("/boot", source_info)
d = common.Difference(target_boot, source_boot)
@@ -1714,10 +1662,14 @@
"{}:{}:{}:{}".format(
boot_type, boot_device, source_boot.size, source_boot.sha1))
- size.append(target_boot.size)
+ required_cache_sizes.append(target_boot.size)
- if size:
- script.CacheFreeSpaceCheck(max(size))
+ if required_cache_sizes:
+ script.CacheFreeSpaceCheck(max(required_cache_sizes))
+
+ # Verify the existing partitions.
+ for diff in block_diff_dict.values():
+ diff.WriteVerifyScript(script, touched_blocks_only=True)
device_specific.IncrementalOTA_VerifyEnd()
@@ -1734,30 +1686,12 @@
# Stage 3/3: Make changes.
script.Comment("Stage 3/3")
- # Verify the existing partitions.
- system_diff.WriteVerifyScript(script, touched_blocks_only=True)
- if vendor_diff:
- vendor_diff.WriteVerifyScript(script, touched_blocks_only=True)
- device_specific_diffs = device_specific.IncrementalOTA_GetBlockDifferences()
- if device_specific_diffs:
- assert all(isinstance(diff, common.BlockDifference)
- for diff in device_specific_diffs), \
- "IncrementalOTA_GetBlockDifferences is not returning a list of " \
- "BlockDifference objects"
- for diff in device_specific_diffs:
- diff.WriteVerifyScript(script, touched_blocks_only=True)
-
script.Comment("---- start making changes here ----")
device_specific.IncrementalOTA_InstallBegin()
- block_diffs = [system_diff]
- progress_dict = {"system": 0.8 if vendor_diff else 0.9}
- if vendor_diff:
- block_diffs.append(vendor_diff)
- progress_dict["vendor"] = 0.1
- if device_specific_diffs:
- block_diffs += device_specific_diffs
+ progress_dict = {partition: 0.1 for partition in block_diff_dict}
+ progress_dict["system"] = 1 - len(block_diff_dict) * 0.1
if OPTIONS.source_info_dict.get("use_dynamic_partitions") == "true":
if OPTIONS.target_info_dict.get("use_dynamic_partitions") != "true":
@@ -1766,12 +1700,12 @@
dynamic_partitions_diff = common.DynamicPartitionsDifference(
info_dict=OPTIONS.target_info_dict,
source_info_dict=OPTIONS.source_info_dict,
- block_diffs=block_diffs,
+ block_diffs=block_diff_dict.values(),
progress_dict=progress_dict)
dynamic_partitions_diff.WriteScript(
script, output_zip, write_verify_script=OPTIONS.verify)
else:
- for block_diff in block_diffs:
+ for block_diff in block_diff_dict.values():
block_diff.WriteScript(script, output_zip,
progress=progress_dict.get(block_diff.partition),
write_verify_script=OPTIONS.verify)
@@ -1859,6 +1793,43 @@
Returns:
The filename of the target-files.zip for generating secondary payload.
"""
+
+ def GetInfoForSecondaryImages(info_file):
+ """Updates info file for secondary payload generation.
+
+ Scan each line in the info file, and remove the unwanted partitions from
+ the dynamic partition list in the related properties. e.g.
+ "super_google_dynamic_partitions_partition_list=system vendor product"
+ will become "super_google_dynamic_partitions_partition_list=system".
+
+ Args:
+ info_file: The input info file. e.g. misc_info.txt.
+
+ Returns:
+ A string of the updated info content.
+ """
+
+ output_list = []
+ with open(info_file) as f:
+ lines = f.read().splitlines()
+
+ # The suffix in partition_list variables that follows the name of the
+ # partition group.
+ LIST_SUFFIX = 'partition_list'
+ for line in lines:
+ if line.startswith('#') or '=' not in line:
+ output_list.append(line)
+ continue
+ key, value = line.strip().split('=', 1)
+ if key == 'dynamic_partition_list' or key.endswith(LIST_SUFFIX):
+ partitions = value.split()
+ partitions = [partition for partition in partitions if partition
+ not in SECONDARY_IMAGES_SKIP_PARTITIONS]
+ output_list.append('{}={}'.format(key, ' '.join(partitions)))
+ else:
+ output_list.append(line)
+ return '\n'.join(output_list)
+
target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
target_zip = zipfile.ZipFile(target_file, 'w', allowZip64=True)
@@ -1875,12 +1846,32 @@
elif info.filename in ('IMAGES/system.img',
'IMAGES/system.map'):
pass
+ # Images like vendor and product are not needed in the secondary payload.
+ elif info.filename in ['IMAGES/{}.img'.format(partition) for partition in
+ SECONDARY_IMAGES_SKIP_PARTITIONS]:
+ pass
# Skip copying the postinstall config if requested.
elif skip_postinstall and info.filename == POSTINSTALL_CONFIG:
pass
- elif info.filename.startswith(('META/', 'IMAGES/', 'RADIO/')):
+ elif info.filename.startswith('META/'):
+ # Remove the unnecessary partitions for secondary images from the
+ # ab_partitions file.
+ if info.filename == AB_PARTITIONS:
+ with open(unzipped_file) as f:
+ partition_list = f.read().splitlines()
+ partition_list = [partition for partition in partition_list if partition
+ and partition not in SECONDARY_IMAGES_SKIP_PARTITIONS]
+ common.ZipWriteStr(target_zip, info.filename, '\n'.join(partition_list))
+ # Remove the unnecessary partitions from the dynamic partitions list.
+ elif (info.filename == 'META/misc_info.txt' or
+ info.filename == DYNAMIC_PARTITION_INFO):
+ modified_info = GetInfoForSecondaryImages(unzipped_file)
+ common.ZipWriteStr(target_zip, info.filename, modified_info)
+ else:
+ common.ZipWrite(target_zip, unzipped_file, arcname=info.filename)
+ elif info.filename.startswith(('IMAGES/', 'RADIO/')):
common.ZipWrite(target_zip, unzipped_file, arcname=info.filename)
common.ZipClose(target_zip)
@@ -1997,8 +1988,7 @@
return target_file
-def WriteABOTAPackageWithBrilloScript(target_file, output_file,
- source_file=None):
+def GenerateAbOtaPackage(target_file, output_file, source_file=None):
"""Generates an Android OTA package that has A/B update payload."""
# Stage the output zip package for package signing.
if not OPTIONS.no_signing:
@@ -2076,11 +2066,10 @@
else:
logger.warning("Cannot find care map file in target_file package")
- AddCompatibilityArchiveIfTrebleEnabled(
- target_zip, output_zip, target_info, source_info)
-
common.ZipClose(target_zip)
+ CheckVintfIfTrebleEnabled(target_file, target_info)
+
# We haven't written the metadata entry yet, which will be handled in
# FinalizeMetadata().
common.ZipClose(output_zip)
@@ -2096,6 +2085,66 @@
FinalizeMetadata(metadata, staging_file, output_file, needed_property_files)
+def GenerateNonAbOtaPackage(target_file, output_file, source_file=None):
+ """Generates a non-A/B OTA package."""
+ # Sanity check the loaded info dicts first.
+ if OPTIONS.info_dict.get("no_recovery") == "true":
+ raise common.ExternalError(
+ "--- target build has specified no recovery ---")
+
+ # Non-A/B OTAs rely on /cache partition to store temporary files.
+ cache_size = OPTIONS.info_dict.get("cache_size")
+ if cache_size is None:
+ logger.warning("--- can't determine the cache partition size ---")
+ OPTIONS.cache_size = cache_size
+
+ if OPTIONS.extra_script is not None:
+ with open(OPTIONS.extra_script) as fp:
+ OPTIONS.extra_script = fp.read()
+
+ if OPTIONS.extracted_input is not None:
+ OPTIONS.input_tmp = OPTIONS.extracted_input
+ else:
+ logger.info("unzipping target target-files...")
+ OPTIONS.input_tmp = common.UnzipTemp(target_file, UNZIP_PATTERN)
+ OPTIONS.target_tmp = OPTIONS.input_tmp
+
+ # If the caller explicitly specified the device-specific extensions path via
+ # -s / --device_specific, use that. Otherwise, use META/releasetools.py if it
+ # is present in the target target_files. Otherwise, take the path of the file
+ # from 'tool_extensions' in the info dict and look for that in the local
+ # filesystem, relative to the current directory.
+ if OPTIONS.device_specific is None:
+ from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
+ if os.path.exists(from_input):
+ logger.info("(using device-specific extensions from target_files)")
+ OPTIONS.device_specific = from_input
+ else:
+ OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions")
+
+ if OPTIONS.device_specific is not None:
+ OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific)
+
+ # Generate a full OTA.
+ if source_file is None:
+ with zipfile.ZipFile(target_file) as input_zip:
+ WriteFullOTAPackage(
+ input_zip,
+ output_file)
+
+ # Generate an incremental OTA.
+ else:
+ logger.info("unzipping source target-files...")
+ OPTIONS.source_tmp = common.UnzipTemp(
+ OPTIONS.incremental_source, UNZIP_PATTERN)
+ with zipfile.ZipFile(target_file) as input_zip, \
+ zipfile.ZipFile(source_file) as source_zip:
+ WriteBlockIncrementalOTAPackage(
+ input_zip,
+ source_zip,
+ output_file)
+
+
def main(argv):
def option_handler(o, a):
@@ -2162,6 +2211,8 @@
OPTIONS.skip_compatibility_check = True
elif o == "--output_metadata_path":
OPTIONS.output_metadata_path = a
+ elif o == "--disable_fec_computation":
+ OPTIONS.disable_fec_computation = True
else:
return False
return True
@@ -2196,6 +2247,7 @@
"retrofit_dynamic_partitions",
"skip_compatibility_check",
"output_metadata_path=",
+ "disable_fec_computation",
], extra_option_handler=option_handler)
if len(args) != 2:
@@ -2270,76 +2322,29 @@
OPTIONS.key_passwords = common.GetKeyPasswords([OPTIONS.package_key])
if ab_update:
- WriteABOTAPackageWithBrilloScript(
+ GenerateAbOtaPackage(
target_file=args[0],
output_file=args[1],
source_file=OPTIONS.incremental_source)
- logger.info("done.")
- return
-
- # Sanity check the loaded info dicts first.
- if OPTIONS.info_dict.get("no_recovery") == "true":
- raise common.ExternalError(
- "--- target build has specified no recovery ---")
-
- # Non-A/B OTAs rely on /cache partition to store temporary files.
- cache_size = OPTIONS.info_dict.get("cache_size")
- if cache_size is None:
- logger.warning("--- can't determine the cache partition size ---")
- OPTIONS.cache_size = cache_size
-
- if OPTIONS.extra_script is not None:
- with open(OPTIONS.extra_script) as fp:
- OPTIONS.extra_script = fp.read()
-
- if OPTIONS.extracted_input is not None:
- OPTIONS.input_tmp = OPTIONS.extracted_input
else:
- logger.info("unzipping target target-files...")
- OPTIONS.input_tmp = common.UnzipTemp(args[0], UNZIP_PATTERN)
- OPTIONS.target_tmp = OPTIONS.input_tmp
+ GenerateNonAbOtaPackage(
+ target_file=args[0],
+ output_file=args[1],
+ source_file=OPTIONS.incremental_source)
- # If the caller explicitly specified the device-specific extensions path via
- # -s / --device_specific, use that. Otherwise, use META/releasetools.py if it
- # is present in the target target_files. Otherwise, take the path of the file
- # from 'tool_extensions' in the info dict and look for that in the local
- # filesystem, relative to the current directory.
- if OPTIONS.device_specific is None:
- from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
- if os.path.exists(from_input):
- logger.info("(using device-specific extensions from target_files)")
- OPTIONS.device_specific = from_input
- else:
- OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions")
+ # Post OTA generation works.
+ if OPTIONS.incremental_source is not None and OPTIONS.log_diff:
+ logger.info("Generating diff logs...")
+ logger.info("Unzipping target-files for diffing...")
+ target_dir = common.UnzipTemp(args[0], TARGET_DIFFING_UNZIP_PATTERN)
+ source_dir = common.UnzipTemp(
+ OPTIONS.incremental_source, TARGET_DIFFING_UNZIP_PATTERN)
- if OPTIONS.device_specific is not None:
- OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific)
-
- # Generate a full OTA.
- if OPTIONS.incremental_source is None:
- with zipfile.ZipFile(args[0], 'r') as input_zip:
- WriteFullOTAPackage(
- input_zip,
- output_file=args[1])
-
- # Generate an incremental OTA.
- else:
- logger.info("unzipping source target-files...")
- OPTIONS.source_tmp = common.UnzipTemp(
- OPTIONS.incremental_source, UNZIP_PATTERN)
- with zipfile.ZipFile(args[0], 'r') as input_zip, \
- zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
- WriteBlockIncrementalOTAPackage(
- input_zip,
- source_zip,
- output_file=args[1])
-
- if OPTIONS.log_diff:
- with open(OPTIONS.log_diff, 'w') as out_file:
- import target_files_diff
- target_files_diff.recursiveDiff(
- '', OPTIONS.source_tmp, OPTIONS.input_tmp, out_file)
+ with open(OPTIONS.log_diff, 'w') as out_file:
+ import target_files_diff
+ target_files_diff.recursiveDiff(
+ '', source_dir, target_dir, out_file)
logger.info("done.")
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index a7e6bb0..3119afa 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -111,6 +111,7 @@
import copy
import errno
import gzip
+import io
import itertools
import logging
import os
@@ -188,6 +189,9 @@
for apex, key in OPTIONS.extra_apex_payload_keys.items():
if not key:
key = 'PRESIGNED'
+ if apex not in keys_info:
+ logger.warning('Failed to find %s in target_files; Ignored', apex)
+ continue
keys_info[apex] = (key, keys_info[apex][1])
# Apply the key remapping to container keys.
@@ -419,7 +423,8 @@
if filename.startswith("IMAGES/"):
continue
- # Skip split super images, which will be re-generated during signing.
+ # Skip OTA-specific images (e.g. split super images), which will be
+ # re-generated during signing.
if filename.startswith("OTA/") and filename.endswith(".img"):
continue
@@ -501,8 +506,8 @@
"PRODUCT/build.prop",
"SYSTEM/product/build.prop",
- "PRODUCT_SERVICES/build.prop",
- "SYSTEM/product_services/build.prop",
+ "SYSTEM_EXT/build.prop",
+ "SYSTEM/system_ext/build.prop",
"SYSTEM/etc/prop.default",
"BOOT/RAMDISK/prop.default",
@@ -743,12 +748,7 @@
filename: The archive name in the output zip.
keys: A list of public keys to use during OTA package verification.
"""
-
- try:
- from StringIO import StringIO
- except ImportError:
- from io import StringIO
- temp_file = StringIO()
+ temp_file = io.BytesIO()
certs_zip = zipfile.ZipFile(temp_file, "w")
for k in keys:
common.ZipWrite(certs_zip, k)
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
old mode 100755
new mode 100644
diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py
index 08e0190..3d0766f 100644
--- a/tools/releasetools/test_add_img_to_target_files.py
+++ b/tools/releasetools/test_add_img_to_target_files.py
@@ -21,7 +21,7 @@
import common
import test_utils
from add_img_to_target_files import (
- AddCareMapForAbOta, AddPackRadioImages, AppendVBMetaArgsForPartition,
+ AddCareMapForAbOta, AddPackRadioImages,
CheckAbOtaImages, GetCareMap)
from rangelib import RangeSet
@@ -379,32 +379,6 @@
# The existing entry should be scheduled to be replaced.
self.assertIn('META/care_map.pb', OPTIONS.replace_updated_files_list)
- def test_AppendVBMetaArgsForPartition(self):
- OPTIONS.info_dict = {}
- cmd = []
- AppendVBMetaArgsForPartition(cmd, 'system', '/path/to/system.img')
- self.assertEqual(
- ['--include_descriptors_from_image', '/path/to/system.img'], cmd)
-
- @test_utils.SkipIfExternalToolsUnavailable()
- def test_AppendVBMetaArgsForPartition_vendorAsChainedPartition(self):
- testdata_dir = test_utils.get_testdata_dir()
- pubkey = os.path.join(testdata_dir, 'testkey.pubkey.pem')
- OPTIONS.info_dict = {
- 'avb_avbtool': 'avbtool',
- 'avb_vendor_key_path': pubkey,
- 'avb_vendor_rollback_index_location': 5,
- }
- cmd = []
- AppendVBMetaArgsForPartition(cmd, 'vendor', '/path/to/vendor.img')
- self.assertEqual(2, len(cmd))
- self.assertEqual('--chain_partition', cmd[0])
- chained_partition_args = cmd[1].split(':')
- self.assertEqual(3, len(chained_partition_args))
- self.assertEqual('vendor', chained_partition_args[0])
- self.assertEqual('5', chained_partition_args[1])
- self.assertTrue(os.path.exists(chained_partition_args[2]))
-
def test_GetCareMap(self):
sparse_image = test_utils.construct_sparse_image([
(0xCAC1, 6),
diff --git a/tools/releasetools/test_blockimgdiff.py b/tools/releasetools/test_blockimgdiff.py
index 4c86933..0987dcf 100644
--- a/tools/releasetools/test_blockimgdiff.py
+++ b/tools/releasetools/test_blockimgdiff.py
@@ -18,9 +18,8 @@
from hashlib import sha1
import common
-from blockimgdiff import (
- BlockImageDiff, DataImage, EmptyImage, FileImage, HeapItem, ImgdiffStats,
- Transfer)
+from blockimgdiff import BlockImageDiff, HeapItem, ImgdiffStats, Transfer
+from images import DataImage, EmptyImage, FileImage
from rangelib import RangeSet
from test_utils import ReleaseToolsTestCase
diff --git a/tools/releasetools/test_check_target_files_vintf.py b/tools/releasetools/test_check_target_files_vintf.py
new file mode 100644
index 0000000..a1328c2
--- /dev/null
+++ b/tools/releasetools/test_check_target_files_vintf.py
@@ -0,0 +1,143 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os.path
+
+import common
+import test_utils
+from check_target_files_vintf import CheckVintf
+
+# A skeleton target files directory structure. This is VINTF compatible.
+SKELETON_TARGET_FILE_STRUCTURE = {
+ # Empty files
+ 'PRODUCT/build.prop': '',
+ 'PRODUCT/etc/build.prop': '',
+ 'VENDOR/etc/build.prop': '',
+ 'ODM/build.prop': '',
+ 'ODM/etc/build.prop': '',
+ 'RECOVERY/RAMDISK/etc/recovery.fstab': '',
+ 'SYSTEM/build.prop': '',
+ 'SYSTEM/etc/build.prop': '',
+ 'SYSTEM_EXT/build.prop': '',
+ 'SYSTEM_EXT/etc/build.prop': '',
+
+ # Non-empty files
+ 'SYSTEM/compatibility_matrix.xml':"""
+ <compatibility-matrix version="1.0" type="framework">
+ <sepolicy>
+ <sepolicy-version>0.0</sepolicy-version>
+ <kernel-sepolicy-version>0</kernel-sepolicy-version>
+ </sepolicy>
+ </compatibility-matrix>""",
+ 'SYSTEM/manifest.xml':
+ '<manifest version="1.0" type="framework" />',
+ 'VENDOR/build.prop': 'ro.product.first_api_level=29\n',
+ 'VENDOR/compatibility_matrix.xml':
+ '<compatibility-matrix version="1.0" type="device" />',
+ 'VENDOR/manifest.xml':
+ '<manifest version="1.0" type="device"/>',
+ 'META/misc_info.txt':
+ 'recovery_api_version=3\nfstab_version=2\nvintf_enforce=true\n',
+}
+
+
+def write_string_to_file(content, path, mode='w'):
+ if not os.path.isdir(os.path.dirname(path)):
+ os.makedirs(os.path.dirname(path))
+ with open(path, mode=mode) as f:
+ f.write(content)
+
+
+class CheckTargetFilesVintfTest(test_utils.ReleaseToolsTestCase):
+
+ def setUp(self):
+ self.testdata_dir = test_utils.get_testdata_dir()
+
+ def prepare_test_dir(self, test_delta_rel_path):
+ test_delta_dir = os.path.join(self.testdata_dir, test_delta_rel_path)
+ test_dir = common.MakeTempDir(prefix='check_target_files_vintf')
+
+ # Create a skeleton directory structure of target files
+ for rel_path, content in SKELETON_TARGET_FILE_STRUCTURE.items():
+ write_string_to_file(content, os.path.join(test_dir, rel_path))
+
+ # Overwrite with files from test_delta_rel_path
+ for root, _, files in os.walk(test_delta_dir):
+ rel_root = os.path.relpath(root, test_delta_dir)
+ for f in files:
+ output_file = os.path.join(test_dir, rel_root, f)
+ with open(os.path.join(root, f)) as inp:
+ write_string_to_file(inp.read(), output_file)
+
+ return test_dir
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_CheckVintf_sanity(self):
+ msg = 'Sanity check with skeleton target files failed.'
+ test_dir = self.prepare_test_dir('does-not-exist')
+ self.assertTrue(CheckVintf(test_dir), msg=msg)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_CheckVintf_matrix_incompat(self):
+ msg = 'vintf/matrix_incompat should be incompatible because sepolicy ' \
+ 'version fails to match'
+ test_dir = self.prepare_test_dir('vintf/matrix_incompat')
+ self.assertFalse(CheckVintf(test_dir), msg=msg)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_CheckVintf_kernel_compat(self):
+ msg = 'vintf/kernel with 4.14.1 kernel version should be compatible'
+ test_dir = self.prepare_test_dir('vintf/kernel')
+ write_string_to_file('', os.path.join(test_dir, 'META/kernel_configs.txt'))
+ write_string_to_file('4.14.1',
+ os.path.join(test_dir, 'META/kernel_version.txt'))
+ self.assertTrue(CheckVintf(test_dir), msg=msg)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_CheckVintf_kernel_incompat(self):
+ msg = 'vintf/kernel with 4.14.0 kernel version should be incompatible ' \
+ 'because 4.14.1 kernel version is required'
+ test_dir = self.prepare_test_dir('vintf/kernel')
+ write_string_to_file('', os.path.join(test_dir, 'META/kernel_configs.txt'))
+ write_string_to_file('4.14.0',
+ os.path.join(test_dir, 'META/kernel_version.txt'))
+ self.assertFalse(CheckVintf(test_dir), msg=msg)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_CheckVintf_sku_compat(self):
+ msg = 'vintf/sku_compat should be compatible because ' \
+ 'ODM/etc/vintf/manifest_sku.xml has the required HALs'
+ test_dir = self.prepare_test_dir('vintf/sku_compat')
+ write_string_to_file('vintf_odm_manifest_skus=sku',
+ os.path.join(test_dir, 'META/misc_info.txt'), mode='a')
+ self.assertTrue(CheckVintf(test_dir), msg=msg)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_CheckVintf_sku_incompat(self):
+ msg = 'vintf/sku_compat should be compatible because ' \
+ 'ODM/etc/vintf/manifest_sku.xml does not have the required HALs'
+ test_dir = self.prepare_test_dir('vintf/sku_incompat')
+ write_string_to_file('vintf_odm_manifest_skus=sku',
+ os.path.join(test_dir, 'META/misc_info.txt'), mode='a')
+ self.assertFalse(CheckVintf(test_dir), msg=msg)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_CheckVintf_bad_xml(self):
+ test_dir = self.prepare_test_dir('does-not-exist')
+ write_string_to_file('not an XML',
+ os.path.join(test_dir, 'VENDOR/manifest.xml'))
+ # Should raise an error because a file has invalid format.
+ self.assertRaises(common.ExternalError, CheckVintf, test_dir)
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index 287cf0a..ceb023f 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -25,9 +25,9 @@
import common
import test_utils
import validate_target_files
+from images import EmptyImage, DataImage
from rangelib import RangeSet
-from blockimgdiff import EmptyImage, DataImage
KiB = 1024
MiB = 1024 * KiB
@@ -912,6 +912,23 @@
'recovery_as_boot': 'true',
}
+ def test_LoadListFromFile(self):
+ file_path = os.path.join(self.testdata_dir,
+ 'merge_config_framework_item_list')
+ contents = common.LoadListFromFile(file_path)
+ expected_contents = [
+ 'META/apkcerts.txt',
+ 'META/filesystem_config.txt',
+ 'META/root_filesystem_config.txt',
+ 'META/system_manifest.xml',
+ 'META/system_matrix.xml',
+ 'META/update_engine_config.txt',
+ 'PRODUCT/*',
+ 'ROOT/*',
+ 'SYSTEM/*',
+ ]
+ self.assertEqual(sorted(contents), sorted(expected_contents))
+
@staticmethod
def _test_LoadInfoDict_createTargetFiles(info_dict, fstab_path):
target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
@@ -1057,6 +1074,93 @@
self.assertRaises(
AssertionError, common.LoadInfoDict, target_files_zip, True)
+ def test_MergeDynamicPartitionInfoDicts_ReturnsMergedDict(self):
+ framework_dict = {
+ 'super_partition_groups': 'group_a',
+ 'dynamic_partition_list': 'system',
+ 'super_group_a_list': 'system',
+ }
+ vendor_dict = {
+ 'super_partition_groups': 'group_a group_b',
+ 'dynamic_partition_list': 'vendor product',
+ 'super_group_a_list': 'vendor',
+ 'super_group_a_size': '1000',
+ 'super_group_b_list': 'product',
+ 'super_group_b_size': '2000',
+ }
+ merged_dict = common.MergeDynamicPartitionInfoDicts(
+ framework_dict=framework_dict,
+ vendor_dict=vendor_dict,
+ size_prefix='super_',
+ size_suffix='_size',
+ list_prefix='super_',
+ list_suffix='_list')
+ expected_merged_dict = {
+ 'super_partition_groups': 'group_a group_b',
+ 'dynamic_partition_list': 'system vendor product',
+ 'super_group_a_list': 'system vendor',
+ 'super_group_a_size': '1000',
+ 'super_group_b_list': 'product',
+ 'super_group_b_size': '2000',
+ }
+ self.assertEqual(merged_dict, expected_merged_dict)
+
+ def test_MergeDynamicPartitionInfoDicts_IgnoringFrameworkGroupSize(self):
+ framework_dict = {
+ 'super_partition_groups': 'group_a',
+ 'dynamic_partition_list': 'system',
+ 'super_group_a_list': 'system',
+ 'super_group_a_size': '5000',
+ }
+ vendor_dict = {
+ 'super_partition_groups': 'group_a group_b',
+ 'dynamic_partition_list': 'vendor product',
+ 'super_group_a_list': 'vendor',
+ 'super_group_a_size': '1000',
+ 'super_group_b_list': 'product',
+ 'super_group_b_size': '2000',
+ }
+ merged_dict = common.MergeDynamicPartitionInfoDicts(
+ framework_dict=framework_dict,
+ vendor_dict=vendor_dict,
+ size_prefix='super_',
+ size_suffix='_size',
+ list_prefix='super_',
+ list_suffix='_list')
+ expected_merged_dict = {
+ 'super_partition_groups': 'group_a group_b',
+ 'dynamic_partition_list': 'system vendor product',
+ 'super_group_a_list': 'system vendor',
+ 'super_group_a_size': '1000',
+ 'super_group_b_list': 'product',
+ 'super_group_b_size': '2000',
+ }
+ self.assertEqual(merged_dict, expected_merged_dict)
+
+ def test_GetAvbPartitionArg(self):
+ info_dict = {}
+ cmd = common.GetAvbPartitionArg('system', '/path/to/system.img', info_dict)
+ self.assertEqual(
+ ['--include_descriptors_from_image', '/path/to/system.img'], cmd)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_AppendVBMetaArgsForPartition_vendorAsChainedPartition(self):
+ testdata_dir = test_utils.get_testdata_dir()
+ pubkey = os.path.join(testdata_dir, 'testkey.pubkey.pem')
+ info_dict = {
+ 'avb_avbtool': 'avbtool',
+ 'avb_vendor_key_path': pubkey,
+ 'avb_vendor_rollback_index_location': 5,
+ }
+ cmd = common.GetAvbPartitionArg('vendor', '/path/to/vendor.img', info_dict)
+ self.assertEqual(2, len(cmd))
+ self.assertEqual('--chain_partition', cmd[0])
+ chained_partition_args = cmd[1].split(':')
+ self.assertEqual(3, len(chained_partition_args))
+ self.assertEqual('vendor', chained_partition_args[0])
+ self.assertEqual('5', chained_partition_args[1])
+ self.assertTrue(os.path.exists(chained_partition_args[2]))
+
class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase):
"""Checks the format of install-recovery.sh.
@@ -1254,10 +1358,10 @@
def test_incremental(self):
source_info = common.LoadDictionaryFromLines("""
-dynamic_partition_list=system vendor product product_services
+dynamic_partition_list=system vendor product system_ext
super_partition_groups=group_foo
super_group_foo_group_size={group_foo_size}
-super_group_foo_partition_list=system vendor product product_services
+super_group_foo_partition_list=system vendor product system_ext
""".format(group_foo_size=4 * GiB).split("\n"))
target_info = common.LoadDictionaryFromLines("""
dynamic_partition_list=system vendor product odm
@@ -1274,7 +1378,7 @@
src=FakeSparseImage(1024 * MiB)),
MockBlockDifference("product", FakeSparseImage(1024 * MiB),
src=FakeSparseImage(1024 * MiB)),
- MockBlockDifference("product_services", None,
+ MockBlockDifference("system_ext", None,
src=FakeSparseImage(1024 * MiB)),
MockBlockDifference("odm", FakeSparseImage(1024 * MiB),
src=None)]
@@ -1297,11 +1401,11 @@
self.assertLess(patch_idx, verify_idx,
"Should verify {} after patching".format(p))
- self.assertNotIn("patch(product_services);", self.script.lines)
+ self.assertNotIn("patch(system_ext);", self.script.lines)
lines = self.get_op_list(self.output_path)
- remove = lines.index("remove product_services")
+ remove = lines.index("remove system_ext")
move_product_out = lines.index("move product default")
shrink = lines.index("resize vendor 536870912")
shrink_group = lines.index("resize_group group_foo 3221225472")
diff --git a/tools/releasetools/test_merge_target_files.py b/tools/releasetools/test_merge_target_files.py
index b90a2e7..1abe83c 100644
--- a/tools/releasetools/test_merge_target_files.py
+++ b/tools/releasetools/test_merge_target_files.py
@@ -18,11 +18,10 @@
import common
import test_utils
-from merge_target_files import (read_config_list, validate_config_lists,
+from merge_target_files import (validate_config_lists,
DEFAULT_FRAMEWORK_ITEM_LIST,
DEFAULT_VENDOR_ITEM_LIST,
DEFAULT_FRAMEWORK_MISC_INFO_KEYS, copy_items,
- merge_dynamic_partition_info_dicts,
process_apex_keys_apk_certs_common)
@@ -83,23 +82,6 @@
self.assertEqual(
os.readlink(os.path.join(output_dir, 'a_link.cpp')), 'a.cpp')
- def test_read_config_list(self):
- framework_item_list_file = os.path.join(self.testdata_dir,
- 'merge_config_framework_item_list')
- framework_item_list = read_config_list(framework_item_list_file)
- expected_framework_item_list = [
- 'META/apkcerts.txt',
- 'META/filesystem_config.txt',
- 'META/root_filesystem_config.txt',
- 'META/system_manifest.xml',
- 'META/system_matrix.xml',
- 'META/update_engine_config.txt',
- 'PRODUCT/*',
- 'ROOT/*',
- 'SYSTEM/*',
- ]
- self.assertItemsEqual(framework_item_list, expected_framework_item_list)
-
def test_validate_config_lists_ReturnsFalseIfMissingDefaultItem(self):
framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
framework_item_list.remove('SYSTEM/*')
@@ -143,69 +125,6 @@
framework_misc_info_keys,
DEFAULT_VENDOR_ITEM_LIST))
- def test_merge_dynamic_partition_info_dicts_ReturnsMergedDict(self):
- framework_dict = {
- 'super_partition_groups': 'group_a',
- 'dynamic_partition_list': 'system',
- 'super_group_a_list': 'system',
- }
- vendor_dict = {
- 'super_partition_groups': 'group_a group_b',
- 'dynamic_partition_list': 'vendor product',
- 'super_group_a_list': 'vendor',
- 'super_group_a_size': '1000',
- 'super_group_b_list': 'product',
- 'super_group_b_size': '2000',
- }
- merged_dict = merge_dynamic_partition_info_dicts(
- framework_dict=framework_dict,
- vendor_dict=vendor_dict,
- size_prefix='super_',
- size_suffix='_size',
- list_prefix='super_',
- list_suffix='_list')
- expected_merged_dict = {
- 'super_partition_groups': 'group_a group_b',
- 'dynamic_partition_list': 'system vendor product',
- 'super_group_a_list': 'system vendor',
- 'super_group_a_size': '1000',
- 'super_group_b_list': 'product',
- 'super_group_b_size': '2000',
- }
- self.assertEqual(merged_dict, expected_merged_dict)
-
- def test_merge_dynamic_partition_info_dicts_IgnoringFrameworkGroupSize(self):
- framework_dict = {
- 'super_partition_groups': 'group_a',
- 'dynamic_partition_list': 'system',
- 'super_group_a_list': 'system',
- 'super_group_a_size': '5000',
- }
- vendor_dict = {
- 'super_partition_groups': 'group_a group_b',
- 'dynamic_partition_list': 'vendor product',
- 'super_group_a_list': 'vendor',
- 'super_group_a_size': '1000',
- 'super_group_b_list': 'product',
- 'super_group_b_size': '2000',
- }
- merged_dict = merge_dynamic_partition_info_dicts(
- framework_dict=framework_dict,
- vendor_dict=vendor_dict,
- size_prefix='super_',
- size_suffix='_size',
- list_prefix='super_',
- list_suffix='_list')
- expected_merged_dict = {
- 'super_partition_groups': 'group_a group_b',
- 'dynamic_partition_list': 'system vendor product',
- 'super_group_a_list': 'system vendor',
- 'super_group_a_size': '1000',
- 'super_group_b_list': 'product',
- 'super_group_b_size': '2000',
- }
- self.assertEqual(merged_dict, expected_merged_dict)
-
def test_process_apex_keys_apk_certs_ReturnsTrueIfNoConflicts(self):
output_dir = common.MakeTempDir()
os.makedirs(os.path.join(output_dir, 'META'))
diff --git a/tools/releasetools/test_ota_from_target_files.py b/tools/releasetools/test_ota_from_target_files.py
index ee831e3..0846d87 100644
--- a/tools/releasetools/test_ota_from_target_files.py
+++ b/tools/releasetools/test_ota_from_target_files.py
@@ -588,11 +588,11 @@
with zipfile.ZipFile(target_file) as verify_zip:
namelist = verify_zip.namelist()
+ ab_partitions = verify_zip.read('META/ab_partitions.txt')
self.assertIn('META/ab_partitions.txt', namelist)
self.assertIn('IMAGES/boot.img', namelist)
self.assertIn('IMAGES/system.img', namelist)
- self.assertIn('IMAGES/vendor.img', namelist)
self.assertIn('RADIO/bootloader.img', namelist)
self.assertIn('RADIO/modem.img', namelist)
self.assertIn(POSTINSTALL_CONFIG, namelist)
@@ -600,6 +600,9 @@
self.assertNotIn('IMAGES/system_other.img', namelist)
self.assertNotIn('IMAGES/system.map', namelist)
+ expected_ab_partitions = ['boot', 'system', 'bootloader', 'modem']
+ self.assertEqual('\n'.join(expected_ab_partitions), ab_partitions)
+
@test_utils.SkipIfExternalToolsUnavailable()
def test_GetTargetFilesZipForSecondaryImages_skipPostinstall(self):
input_file = construct_target_files(secondary=True)
@@ -612,7 +615,6 @@
self.assertIn('META/ab_partitions.txt', namelist)
self.assertIn('IMAGES/boot.img', namelist)
self.assertIn('IMAGES/system.img', namelist)
- self.assertIn('IMAGES/vendor.img', namelist)
self.assertIn('RADIO/bootloader.img', namelist)
self.assertIn('RADIO/modem.img', namelist)
@@ -633,7 +635,6 @@
self.assertIn('META/ab_partitions.txt', namelist)
self.assertIn('IMAGES/boot.img', namelist)
self.assertIn('IMAGES/system.img', namelist)
- self.assertIn('IMAGES/vendor.img', namelist)
self.assertIn(POSTINSTALL_CONFIG, namelist)
self.assertNotIn('IMAGES/system_other.img', namelist)
@@ -642,6 +643,55 @@
self.assertNotIn('RADIO/modem.img', namelist)
@test_utils.SkipIfExternalToolsUnavailable()
+ def test_GetTargetFilesZipForSecondaryImages_dynamicPartitions(self):
+ input_file = construct_target_files(secondary=True)
+ misc_info = '\n'.join([
+ 'use_dynamic_partition_size=true',
+ 'use_dynamic_partitions=true',
+ 'dynamic_partition_list=system vendor product',
+ 'super_partition_groups=google_dynamic_partitions',
+ 'super_google_dynamic_partitions_group_size=4873781248',
+ 'super_google_dynamic_partitions_partition_list=system vendor product',
+ ])
+ dynamic_partitions_info = '\n'.join([
+ 'super_partition_groups=google_dynamic_partitions',
+ 'super_google_dynamic_partitions_group_size=4873781248',
+ 'super_google_dynamic_partitions_partition_list=system vendor product',
+ ])
+
+ with zipfile.ZipFile(input_file, 'a') as append_zip:
+ common.ZipWriteStr(append_zip, 'META/misc_info.txt', misc_info)
+ common.ZipWriteStr(append_zip, 'META/dynamic_partitions_info.txt',
+ dynamic_partitions_info)
+
+ target_file = GetTargetFilesZipForSecondaryImages(input_file)
+
+ with zipfile.ZipFile(target_file) as verify_zip:
+ namelist = verify_zip.namelist()
+ updated_misc_info = verify_zip.read('META/misc_info.txt')
+ updated_dynamic_partitions_info = verify_zip.read(
+ 'META/dynamic_partitions_info.txt')
+
+ self.assertIn('META/ab_partitions.txt', namelist)
+ self.assertIn('IMAGES/boot.img', namelist)
+ self.assertIn('IMAGES/system.img', namelist)
+ self.assertIn(POSTINSTALL_CONFIG, namelist)
+ self.assertIn('META/misc_info.txt', namelist)
+ self.assertIn('META/dynamic_partitions_info.txt', namelist)
+
+ self.assertNotIn('IMAGES/system_other.img', namelist)
+ self.assertNotIn('IMAGES/system.map', namelist)
+
+ # Check the vendor & product are removed from the partitions list.
+ expected_misc_info = misc_info.replace('system vendor product',
+ 'system')
+ expected_dynamic_partitions_info = dynamic_partitions_info.replace(
+ 'system vendor product', 'system')
+ self.assertEqual(expected_misc_info, updated_misc_info)
+ self.assertEqual(expected_dynamic_partitions_info,
+ updated_dynamic_partitions_info)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
def test_GetTargetFilesZipWithoutPostinstallConfig(self):
input_file = construct_target_files()
target_file = GetTargetFilesZipWithoutPostinstallConfig(input_file)
diff --git a/tools/releasetools/test_sign_target_files_apks.py b/tools/releasetools/test_sign_target_files_apks.py
index 9a1d163..e0a635a 100644
--- a/tools/releasetools/test_sign_target_files_apks.py
+++ b/tools/releasetools/test_sign_target_files_apks.py
@@ -15,6 +15,7 @@
#
import base64
+import io
import os.path
import zipfile
@@ -22,7 +23,7 @@
import test_utils
from sign_target_files_apks import (
CheckApkAndApexKeysAvailable, EditTags, GetApkFileInfo, ReadApexKeysInfo,
- ReplaceCerts, ReplaceVerityKeyId, RewriteProps)
+ ReplaceCerts, ReplaceVerityKeyId, RewriteProps, WriteOtacerts)
class SignTargetFilesApksTest(test_utils.ReleaseToolsTestCase):
@@ -70,10 +71,10 @@
'ro.product.build.fingerprint=foo/bar/release-keys'),
('ro.product.build.thumbprint=foo/bar/dev-keys',
'ro.product.build.thumbprint=foo/bar/release-keys'),
- ('ro.product_services.build.fingerprint=foo/bar/test-keys',
- 'ro.product_services.build.fingerprint=foo/bar/release-keys'),
- ('ro.product_services.build.thumbprint=foo/bar/test-keys',
- 'ro.product_services.build.thumbprint=foo/bar/release-keys'),
+ ('ro.system_ext.build.fingerprint=foo/bar/test-keys',
+ 'ro.system_ext.build.fingerprint=foo/bar/release-keys'),
+ ('ro.system_ext.build.thumbprint=foo/bar/test-keys',
+ 'ro.system_ext.build.thumbprint=foo/bar/release-keys'),
('# comment line 1', '# comment line 1'),
('ro.bootimage.build.fingerprint=foo/bar/dev-keys',
'ro.bootimage.build.fingerprint=foo/bar/release-keys'),
@@ -91,8 +92,8 @@
'ro.odm.build.tags=release-keys'),
('ro.product.build.tags=dev-keys',
'ro.product.build.tags=release-keys'),
- ('ro.product_services.build.tags=dev-keys',
- 'ro.product_services.build.tags=release-keys'),
+ ('ro.system_ext.build.tags=dev-keys',
+ 'ro.system_ext.build.tags=release-keys'),
('# comment line 2', '# comment line 2'),
('ro.build.display.id=OPR6.170623.012 dev-keys',
'ro.build.display.id=OPR6.170623.012'),
@@ -236,6 +237,22 @@
}
self.assertEqual(output_xml, ReplaceCerts(input_xml))
+ def test_WriteOtacerts(self):
+ certs = [
+ os.path.join(self.testdata_dir, 'platform.x509.pem'),
+ os.path.join(self.testdata_dir, 'media.x509.pem'),
+ os.path.join(self.testdata_dir, 'testkey.x509.pem'),
+ ]
+ entry_name = 'SYSTEM/etc/security/otacerts.zip'
+ output_file = common.MakeTempFile(suffix='.zip')
+ with zipfile.ZipFile(output_file, 'w') as output_zip:
+ WriteOtacerts(output_zip, entry_name, certs)
+ with zipfile.ZipFile(output_file) as input_zip:
+ self.assertIn(entry_name, input_zip.namelist())
+ otacerts_file = io.BytesIO(input_zip.read(entry_name))
+ with zipfile.ZipFile(otacerts_file) as otacerts_zip:
+ self.assertEqual(3, len(otacerts_zip.namelist()))
+
def test_CheckApkAndApexKeysAvailable(self):
input_file = common.MakeTempFile(suffix='.zip')
with zipfile.ZipFile(input_file, 'w') as input_zip:
@@ -508,3 +525,26 @@
'system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.pem',
'build/make/target/product/security/testkey'),
}, keys_info)
+
+ def test_ReadApexKeysInfo_presignedKeys(self):
+ apex_keys = self.APEX_KEYS_TXT + (
+ 'name="apex.apexd_test_different_app2.apex" '
+ 'private_key="PRESIGNED" '
+ 'public_key="PRESIGNED" '
+ 'container_certificate="PRESIGNED" '
+ 'container_private_key="PRESIGNED"')
+ target_files = common.MakeTempFile(suffix='.zip')
+ with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ target_files_zip.writestr('META/apexkeys.txt', apex_keys)
+
+ with zipfile.ZipFile(target_files) as target_files_zip:
+ keys_info = ReadApexKeysInfo(target_files_zip)
+
+ self.assertEqual({
+ 'apex.apexd_test.apex': (
+ 'system/apex/apexd/apexd_testdata/com.android.apex.test_package.pem',
+ 'build/make/target/product/security/testkey'),
+ 'apex.apexd_test_different_app.apex': (
+ 'system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.pem',
+ 'build/make/target/product/security/testkey'),
+ }, keys_info)
diff --git a/tools/releasetools/test_utils.py b/tools/releasetools/test_utils.py
index 1e919f7..2445671 100755
--- a/tools/releasetools/test_utils.py
+++ b/tools/releasetools/test_utils.py
@@ -32,9 +32,12 @@
logging.basicConfig(stream=sys.stdout)
# Use ANDROID_BUILD_TOP as an indicator to tell if the needed tools (e.g.
-# avbtool, mke2fs) are available while running the tests. Not having the var or
-# having empty string means we can't run the tests that require external tools.
-EXTERNAL_TOOLS_UNAVAILABLE = not os.environ.get("ANDROID_BUILD_TOP")
+# avbtool, mke2fs) are available while running the tests, unless
+# FORCE_RUN_RELEASETOOLS is set to '1'. Not having the required vars means we
+# can't run the tests that require external tools.
+EXTERNAL_TOOLS_UNAVAILABLE = (
+ not os.environ.get('ANDROID_BUILD_TOP') and
+ os.environ.get('FORCE_RUN_RELEASETOOLS') != '1')
def SkipIfExternalToolsUnavailable():
diff --git a/tools/releasetools/testdata/vintf/kernel/SYSTEM/compatibility_matrix.xml b/tools/releasetools/testdata/vintf/kernel/SYSTEM/compatibility_matrix.xml
new file mode 100644
index 0000000..ed46b6b
--- /dev/null
+++ b/tools/releasetools/testdata/vintf/kernel/SYSTEM/compatibility_matrix.xml
@@ -0,0 +1,7 @@
+<compatibility-matrix version="1.0" type="framework">
+ <kernel version="4.14.1" />
+ <sepolicy>
+ <sepolicy-version>0.0</sepolicy-version>
+ <kernel-sepolicy-version>0</kernel-sepolicy-version>
+ </sepolicy>
+</compatibility-matrix>
diff --git a/tools/releasetools/testdata/vintf/matrix_incompat/SYSTEM/compatibility_matrix.xml b/tools/releasetools/testdata/vintf/matrix_incompat/SYSTEM/compatibility_matrix.xml
new file mode 100644
index 0000000..5d891fa
--- /dev/null
+++ b/tools/releasetools/testdata/vintf/matrix_incompat/SYSTEM/compatibility_matrix.xml
@@ -0,0 +1,6 @@
+<compatibility-matrix version="1.0" type="framework">
+ <sepolicy>
+ <sepolicy-version>1.0</sepolicy-version>
+ <kernel-sepolicy-version>0</kernel-sepolicy-version>
+ </sepolicy>
+</compatibility-matrix>
diff --git a/tools/releasetools/testdata/vintf/sku_compat/ODM/etc/vintf/manifest_sku.xml b/tools/releasetools/testdata/vintf/sku_compat/ODM/etc/vintf/manifest_sku.xml
new file mode 100644
index 0000000..bcd7ce4
--- /dev/null
+++ b/tools/releasetools/testdata/vintf/sku_compat/ODM/etc/vintf/manifest_sku.xml
@@ -0,0 +1,7 @@
+<manifest version="1.0" type="device">
+ <hal format="hidl">
+ <name>foo</name>
+ <transport>hwbinder</transport>
+ <fqname>@1.0::IFoo/default</fqname>
+ </hal>
+</manifest>
diff --git a/tools/releasetools/testdata/vintf/sku_compat/SYSTEM/compatibility_matrix.xml b/tools/releasetools/testdata/vintf/sku_compat/SYSTEM/compatibility_matrix.xml
new file mode 100644
index 0000000..19a9b6a
--- /dev/null
+++ b/tools/releasetools/testdata/vintf/sku_compat/SYSTEM/compatibility_matrix.xml
@@ -0,0 +1,14 @@
+<compatibility-matrix version="1.0" type="framework">
+ <hal format="hidl" optional="false">
+ <name>foo</name>
+ <version>1.0</version>
+ <interface>
+ <name>IFoo</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <sepolicy>
+ <sepolicy-version>0.0</sepolicy-version>
+ <kernel-sepolicy-version>0</kernel-sepolicy-version>
+ </sepolicy>
+</compatibility-matrix>
diff --git a/tools/releasetools/testdata/vintf/sku_incompat/ODM/etc/vintf/manifest_sku.xml b/tools/releasetools/testdata/vintf/sku_incompat/ODM/etc/vintf/manifest_sku.xml
new file mode 100644
index 0000000..bcd7ce4
--- /dev/null
+++ b/tools/releasetools/testdata/vintf/sku_incompat/ODM/etc/vintf/manifest_sku.xml
@@ -0,0 +1,7 @@
+<manifest version="1.0" type="device">
+ <hal format="hidl">
+ <name>foo</name>
+ <transport>hwbinder</transport>
+ <fqname>@1.0::IFoo/default</fqname>
+ </hal>
+</manifest>
diff --git a/tools/releasetools/testdata/vintf/sku_incompat/SYSTEM/compatibility_matrix.xml b/tools/releasetools/testdata/vintf/sku_incompat/SYSTEM/compatibility_matrix.xml
new file mode 100644
index 0000000..e0e0d6c
--- /dev/null
+++ b/tools/releasetools/testdata/vintf/sku_incompat/SYSTEM/compatibility_matrix.xml
@@ -0,0 +1,14 @@
+<compatibility-matrix version="1.0" type="framework">
+ <hal format="hidl" optional="false">
+ <name>foo</name>
+ <version>1.1</version>
+ <interface>
+ <name>IFoo</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <sepolicy>
+ <sepolicy-version>0.0</sepolicy-version>
+ <kernel-sepolicy-version>0</kernel-sepolicy-version>
+ </sepolicy>
+</compatibility-matrix>
diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py
index 435e7f2..d189499 100755
--- a/tools/releasetools/validate_target_files.py
+++ b/tools/releasetools/validate_target_files.py
@@ -257,7 +257,10 @@
if verity_key is None:
verity_key = info_dict['verity_key'] + '.x509.pem'
for image in ('boot.img', 'recovery.img', 'recovery-two-step.img'):
- image_path = os.path.join(input_tmp, 'IMAGES', image)
+ if image == 'recovery-two-step.img':
+ image_path = os.path.join(input_tmp, 'OTA', image)
+ else:
+ image_path = os.path.join(input_tmp, 'IMAGES', image)
if not os.path.exists(image_path):
continue
diff --git a/tools/signapk/src/com/android/signapk/SignApk.java b/tools/signapk/src/com/android/signapk/SignApk.java
index 57973ec..9809ed4 100644
--- a/tools/signapk/src/com/android/signapk/SignApk.java
+++ b/tools/signapk/src/com/android/signapk/SignApk.java
@@ -381,9 +381,8 @@
byte[] buffer = new byte[4096];
int num;
- List<Pattern> pinPatterns = extractPinPatterns(in);
+ List<Hints.PatternWithRange> pinPatterns = extractPinPatterns(in);
ArrayList<Hints.ByteRange> pinByteRanges = pinPatterns == null ? null : new ArrayList<>();
- HashSet<String> namesToPin = new HashSet<>();
ArrayList<String> names = new ArrayList<String>();
for (Enumeration<JarEntry> e = in.entries(); e.hasMoreElements();) {
@@ -399,13 +398,6 @@
if (Hints.PIN_BYTE_RANGE_ZIP_ENTRY_NAME.equals(entryName)) {
continue; // We regenerate it below.
}
- if (pinPatterns != null) {
- for (Pattern pinPattern : pinPatterns) {
- if (pinPattern.matcher(entryName).matches()) {
- namesToPin.add(entryName);
- }
- }
- }
names.add(entryName);
}
Collections.sort(names);
@@ -485,6 +477,7 @@
DataSink entryDataSink =
(inspectEntryRequest != null) ? inspectEntryRequest.getDataSink() : null;
+ long entryDataStart = outCounter.getWrittenBytes();
try (InputStream data = in.getInputStream(inEntry)) {
while ((num = data.read(buffer)) > 0) {
out.write(buffer, 0, num);
@@ -500,11 +493,27 @@
inspectEntryRequest.done();
}
- if (namesToPin.contains(name)) {
- pinByteRanges.add(
- new Hints.ByteRange(
- entryHeaderStart,
- outCounter.getWrittenBytes()));
+ if (pinPatterns != null) {
+ boolean pinFileHeader = false;
+ for (Hints.PatternWithRange pinPattern : pinPatterns) {
+ if (!pinPattern.matcher(name).matches()) {
+ continue;
+ }
+ Hints.ByteRange dataRange =
+ new Hints.ByteRange(
+ entryDataStart,
+ outCounter.getWrittenBytes());
+ Hints.ByteRange pinRange =
+ pinPattern.ClampToAbsoluteByteRange(dataRange);
+ if (pinRange != null) {
+ pinFileHeader = true;
+ pinByteRanges.add(pinRange);
+ }
+ }
+ if (pinFileHeader) {
+ pinByteRanges.add(new Hints.ByteRange(entryHeaderStart,
+ entryDataStart));
+ }
}
}
@@ -528,6 +537,7 @@
DataSink entryDataSink =
(inspectEntryRequest != null) ? inspectEntryRequest.getDataSink() : null;
+ long entryDataStart = outCounter.getWrittenBytes();
InputStream data = in.getInputStream(inEntry);
while ((num = data.read(buffer)) > 0) {
out.write(buffer, 0, num);
@@ -541,11 +551,27 @@
inspectEntryRequest.done();
}
- if (namesToPin.contains(name)) {
- pinByteRanges.add(
- new Hints.ByteRange(
- entryHeaderStart,
- outCounter.getWrittenBytes()));
+ if (pinPatterns != null) {
+ boolean pinFileHeader = false;
+ for (Hints.PatternWithRange pinPattern : pinPatterns) {
+ if (!pinPattern.matcher(name).matches()) {
+ continue;
+ }
+ Hints.ByteRange dataRange =
+ new Hints.ByteRange(
+ entryDataStart,
+ outCounter.getWrittenBytes());
+ Hints.ByteRange pinRange =
+ pinPattern.ClampToAbsoluteByteRange(dataRange);
+ if (pinRange != null) {
+ pinFileHeader = true;
+ pinByteRanges.add(pinRange);
+ }
+ }
+ if (pinFileHeader) {
+ pinByteRanges.add(new Hints.ByteRange(entryHeaderStart,
+ entryDataStart));
+ }
}
}
@@ -558,7 +584,7 @@
}
}
- private static List<Pattern> extractPinPatterns(JarFile in) throws IOException {
+ private static List<Hints.PatternWithRange> extractPinPatterns(JarFile in) throws IOException {
ZipEntry pinMetaEntry = in.getEntry(Hints.PIN_HINT_ASSET_ZIP_ENTRY_NAME);
if (pinMetaEntry == null) {
return null;
diff --git a/tools/warn.py b/tools/warn.py
index 9389b7d..48feb49 100755
--- a/tools/warn.py
+++ b/tools/warn.py
@@ -2816,7 +2816,6 @@
simple_project_pattern('system/extras/memory_replay'),
simple_project_pattern('system/extras/mmap-perf'),
simple_project_pattern('system/extras/multinetwork'),
- simple_project_pattern('system/extras/perfprofd'),
simple_project_pattern('system/extras/procrank'),
simple_project_pattern('system/extras/runconuid'),
simple_project_pattern('system/extras/showmap'),