Merge "Add new core-icu4j after separating icu4j from core-libart"
diff --git a/README.md b/README.md
index 60d7d5a..18422ea 100644
--- a/README.md
+++ b/README.md
@@ -355,6 +355,18 @@
dlv connect :1234
```
+If you see an error:
+```
+Could not attach to pid 593: this could be caused by a kernel
+security setting, try writing "0" to /proc/sys/kernel/yama/ptrace_scope
+```
+you can temporarily disable
+[Yama's ptrace protection](https://www.kernel.org/doc/Documentation/security/Yama.txt)
+using:
+```bash
+sudo sysctl -w kernel.yama.ptrace_scope=0
+```
+
## Contact
Email android-building@googlegroups.com (external) for any questions, or see
diff --git a/android/config.go b/android/config.go
index 074dfc7..72372ef 100644
--- a/android/config.go
+++ b/android/config.go
@@ -689,10 +689,6 @@
return c.Targets[Android][0].Arch.ArchType
}
-func (c *config) SkipDeviceInstall() bool {
- return c.EmbeddedInMake()
-}
-
func (c *config) SkipMegaDeviceInstall(path string) bool {
return Bool(c.Mega_device) &&
strings.HasPrefix(path, filepath.Join(c.buildDir, "target", "product"))
@@ -852,6 +848,10 @@
return ExistentPathForSource(ctx, "frameworks", "base").Valid()
}
+func (c *config) VndkSnapshotBuildArtifacts() bool {
+ return Bool(c.productVariables.VndkSnapshotBuildArtifacts)
+}
+
func (c *deviceConfig) Arches() []Arch {
var arches []Arch
for _, target := range c.config.Targets[Android] {
diff --git a/android/module.go b/android/module.go
index 138b9cd..990a893 100644
--- a/android/module.go
+++ b/android/module.go
@@ -155,6 +155,7 @@
InstallInData() bool
InstallInSanitizerDir() bool
InstallInRecovery() bool
+ InstallBypassMake() bool
RequiredModuleNames() []string
HostRequiredModuleNames() []string
@@ -192,6 +193,7 @@
InstallInData() bool
InstallInSanitizerDir() bool
InstallInRecovery() bool
+ InstallBypassMake() bool
SkipInstall()
ExportedToMake() bool
NoticeFile() OptionalPath
@@ -837,6 +839,10 @@
return Bool(m.commonProperties.Recovery)
}
+func (m *ModuleBase) InstallBypassMake() bool {
+ return false
+}
+
func (m *ModuleBase) Owner() string {
return String(m.commonProperties.Owner)
}
@@ -1493,6 +1499,10 @@
return m.module.InstallInRecovery()
}
+func (m *moduleContext) InstallBypassMake() bool {
+ return m.module.InstallBypassMake()
+}
+
func (m *moduleContext) skipInstall(fullInstallPath OutputPath) bool {
if m.module.base().commonProperties.SkipInstall {
return true
@@ -1506,7 +1516,7 @@
}
if m.Device() {
- if m.Config().SkipDeviceInstall() {
+ if m.Config().EmbeddedInMake() && !m.InstallBypassMake() {
return true
}
diff --git a/android/mutator.go b/android/mutator.go
index 82376e4..8e4343d 100644
--- a/android/mutator.go
+++ b/android/mutator.go
@@ -143,6 +143,7 @@
CreateVariations(...string) []blueprint.Module
CreateLocalVariations(...string) []blueprint.Module
SetDependencyVariation(string)
+ SetDefaultDependencyVariation(*string)
AddVariationDependencies([]blueprint.Variation, blueprint.DependencyTag, ...string)
AddFarVariationDependencies([]blueprint.Variation, blueprint.DependencyTag, ...string)
AddInterVariantDependency(tag blueprint.DependencyTag, from, to blueprint.Module)
@@ -292,6 +293,10 @@
b.bp.SetDependencyVariation(variation)
}
+func (b *bottomUpMutatorContext) SetDefaultDependencyVariation(variation *string) {
+ b.bp.SetDefaultDependencyVariation(variation)
+}
+
func (b *bottomUpMutatorContext) AddVariationDependencies(variations []blueprint.Variation, tag blueprint.DependencyTag,
names ...string) {
diff --git a/android/paths.go b/android/paths.go
index e3f0544..0d99918 100644
--- a/android/paths.go
+++ b/android/paths.go
@@ -46,6 +46,7 @@
InstallInData() bool
InstallInSanitizerDir() bool
InstallInRecovery() bool
+ InstallBypassMake() bool
}
var _ ModuleInstallPathContext = ModuleContext(nil)
@@ -818,6 +819,17 @@
return OutputPath{basePath{path, ctx.Config(), ""}}
}
+// pathForInstallInMakeDir is used by PathForModuleInstall when the module returns true
+// for InstallBypassMake to produce an OutputPath that installs to $OUT_DIR instead of
+// $OUT_DIR/soong.
+func pathForInstallInMakeDir(ctx PathContext, pathComponents ...string) OutputPath {
+ path, err := validatePath(pathComponents...)
+ if err != nil {
+ reportPathError(ctx, err)
+ }
+ return OutputPath{basePath{"../" + path, ctx.Config(), ""}}
+}
+
// PathsForOutput returns Paths rooted from buildDir
func PathsForOutput(ctx PathContext, paths []string) WritablePaths {
ret := make(WritablePaths, len(paths))
@@ -1123,6 +1135,9 @@
outPaths = append([]string{"debug"}, outPaths...)
}
outPaths = append(outPaths, pathComponents...)
+ if ctx.InstallBypassMake() && ctx.Config().EmbeddedInMake() {
+ return pathForInstallInMakeDir(ctx, outPaths...)
+ }
return PathForOutput(ctx, outPaths...)
}
diff --git a/android/paths_test.go b/android/paths_test.go
index 8286e9a..f2996bf 100644
--- a/android/paths_test.go
+++ b/android/paths_test.go
@@ -227,6 +227,10 @@
return m.inRecovery
}
+func (m moduleInstallPathContextImpl) InstallBypassMake() bool {
+ return false
+}
+
func TestPathForModuleInstall(t *testing.T) {
testConfig := TestConfig("", nil)
diff --git a/android/variable.go b/android/variable.go
index bfff81c..8886bae 100644
--- a/android/variable.go
+++ b/android/variable.go
@@ -261,7 +261,8 @@
PgoAdditionalProfileDirs []string `json:",omitempty"`
- VndkUseCoreVariant *bool `json:",omitempty"`
+ VndkUseCoreVariant *bool `json:",omitempty"`
+ VndkSnapshotBuildArtifacts *bool `json:",omitempty"`
BoardVendorSepolicyDirs []string `json:",omitempty"`
BoardOdmSepolicyDirs []string `json:",omitempty"`
diff --git a/apex/apex_test.go b/apex/apex_test.go
index 38d2bf2..f034f2a 100644
--- a/apex/apex_test.go
+++ b/apex/apex_test.go
@@ -30,6 +30,16 @@
var buildDir string
+// names returns name list from white space separated string
+func names(s string) (ns []string) {
+ for _, n := range strings.Split(s, " ") {
+ if len(n) > 0 {
+ ns = append(ns, n)
+ }
+ }
+ return
+}
+
func testApexError(t *testing.T, pattern, bp string) {
ctx, config := testApexContext(t, bp)
_, errs := ctx.ParseFileList(".", []string{"Android.bp"})
@@ -626,6 +636,73 @@
ensureNotContains(t, libFooStubsLdFlags, "libbar.so")
}
+func TestApexWithRuntimeLibsDependency(t *testing.T) {
+ /*
+ myapex
+ |
+ v (runtime_libs)
+ mylib ------+------> libfoo [provides stub]
+ |
+ `------> libbar
+ */
+ ctx, _ := testApex(t, `
+ apex {
+ name: "myapex",
+ key: "myapex.key",
+ native_shared_libs: ["mylib"],
+ }
+
+ apex_key {
+ name: "myapex.key",
+ public_key: "testkey.avbpubkey",
+ private_key: "testkey.pem",
+ }
+
+ cc_library {
+ name: "mylib",
+ srcs: ["mylib.cpp"],
+ runtime_libs: ["libfoo", "libbar"],
+ system_shared_libs: [],
+ stl: "none",
+ }
+
+ cc_library {
+ name: "libfoo",
+ srcs: ["mylib.cpp"],
+ system_shared_libs: [],
+ stl: "none",
+ stubs: {
+ versions: ["10", "20", "30"],
+ },
+ }
+
+ cc_library {
+ name: "libbar",
+ srcs: ["mylib.cpp"],
+ system_shared_libs: [],
+ stl: "none",
+ }
+
+ `)
+
+ apexRule := ctx.ModuleForTests("myapex", "android_common_myapex").Rule("apexRule")
+ copyCmds := apexRule.Args["copy_commands"]
+
+ // Ensure that direct non-stubs dep is always included
+ ensureContains(t, copyCmds, "image.apex/lib64/mylib.so")
+
+ // Ensure that indirect stubs dep is not included
+ ensureNotContains(t, copyCmds, "image.apex/lib64/libfoo.so")
+
+ // Ensure that runtime_libs dep in included
+ ensureContains(t, copyCmds, "image.apex/lib64/libbar.so")
+
+ injectRule := ctx.ModuleForTests("myapex", "android_common_myapex").Rule("injectApexDependency")
+ ensureListEmpty(t, names(injectRule.Args["provideNativeLibs"]))
+ ensureListContains(t, names(injectRule.Args["requireNativeLibs"]), "libfoo.so")
+
+}
+
func TestApexWithSystemLibsStubs(t *testing.T) {
ctx, _ := testApex(t, `
apex {
@@ -1133,15 +1210,6 @@
}
`)
- names := func(s string) (ns []string) {
- for _, n := range strings.Split(s, " ") {
- if len(n) > 0 {
- ns = append(ns, n)
- }
- }
- return
- }
-
var injectRule android.TestingBuildParams
var provideNativeLibs, requireNativeLibs []string
diff --git a/cc/builder.go b/cc/builder.go
index 2909d51..89c418b 100644
--- a/cc/builder.go
+++ b/cc/builder.go
@@ -498,7 +498,9 @@
Input: srcFile,
// We must depend on objFile, since clang-tidy doesn't
// support exporting dependencies.
- Implicit: objFile,
+ Implicit: objFile,
+ Implicits: cFlagsDeps,
+ OrderOnly: pathDeps,
Args: map[string]string{
"cFlags": moduleToolingCflags,
"tidyFlags": flags.tidyFlags,
@@ -516,6 +518,8 @@
Output: sAbiDumpFile,
Input: srcFile,
Implicit: objFile,
+ Implicits: cFlagsDeps,
+ OrderOnly: pathDeps,
Args: map[string]string{
"cFlags": moduleToolingCflags,
"exportDirs": flags.sAbiFlags,
diff --git a/cc/cc.go b/cc/cc.go
index 2bde2d3..b637d3e 100644
--- a/cc/cc.go
+++ b/cc/cc.go
@@ -56,6 +56,8 @@
ctx.TopDown("fuzzer_deps", sanitizerDepsMutator(fuzzer))
ctx.BottomUp("fuzzer", sanitizerMutator(fuzzer)).Parallel()
+ // cfi mutator shouldn't run before sanitizers that return true for
+ // incompatibleWithCfi()
ctx.TopDown("cfi_deps", sanitizerDepsMutator(cfi))
ctx.BottomUp("cfi", sanitizerMutator(cfi)).Parallel()
@@ -255,6 +257,7 @@
type ModuleContextIntf interface {
static() bool
staticBinary() bool
+ header() bool
toolchain() config.Toolchain
useSdk() bool
sdkVersion() string
@@ -715,6 +718,10 @@
return ctx.mod.staticBinary()
}
+func (ctx *moduleContextImpl) header() bool {
+ return ctx.mod.header()
+}
+
func (ctx *moduleContextImpl) useSdk() bool {
if ctx.ctx.Device() && !ctx.useVndk() && !ctx.inRecovery() && !ctx.ctx.Fuchsia() {
return String(ctx.mod.Properties.Sdk_version) != ""
@@ -2023,6 +2030,15 @@
return false
}
+func (c *Module) header() bool {
+ if h, ok := c.linker.(interface {
+ header() bool
+ }); ok {
+ return h.header()
+ }
+ return false
+}
+
func (c *Module) getMakeLinkType(actx android.ModuleContext) string {
name := actx.ModuleName()
if c.useVndk() {
diff --git a/cc/compiler.go b/cc/compiler.go
index ffb6ad2..85ff400 100644
--- a/cc/compiler.go
+++ b/cc/compiler.go
@@ -309,6 +309,7 @@
flags.SystemIncludeFlags = append(flags.SystemIncludeFlags,
"-isystem "+getCurrentIncludePath(ctx).String(),
"-isystem "+getCurrentIncludePath(ctx).Join(ctx, config.NDKTriple(tc)).String())
+ flags.GlobalFlags = append(flags.GlobalFlags, "-D__ANDROID_NDK__")
}
if ctx.useVndk() {
diff --git a/cc/library.go b/cc/library.go
index b193ab7..893fc66 100644
--- a/cc/library.go
+++ b/cc/library.go
@@ -508,6 +508,7 @@
type libraryInterface interface {
getWholeStaticMissingDeps() []string
static() bool
+ shared() bool
objs() Objects
reuseObjs() (Objects, exportedFlagsProducer)
toc() android.OptionalPath
diff --git a/cc/llndk_library.go b/cc/llndk_library.go
index 8290103..4d59975 100644
--- a/cc/llndk_library.go
+++ b/cc/llndk_library.go
@@ -232,7 +232,7 @@
&library.MutatedProperties,
&library.flagExporter.Properties)
- android.InitAndroidArchModule(module, android.DeviceSupported, android.MultilibBoth)
+ module.Init()
return module
}
diff --git a/cc/sanitize.go b/cc/sanitize.go
index 261ca88..a017824 100644
--- a/cc/sanitize.go
+++ b/cc/sanitize.go
@@ -124,6 +124,10 @@
}
}
+func (t sanitizerType) incompatibleWithCfi() bool {
+ return t == asan || t == fuzzer || t == hwasan
+}
+
type SanitizeProperties struct {
// enable AddressSanitizer, ThreadSanitizer, or UndefinedBehaviorSanitizer
Sanitize struct {
@@ -556,16 +560,18 @@
}
func (sanitize *sanitize) AndroidMk(ctx AndroidMkContext, ret *android.AndroidMkData) {
- // Add a suffix for CFI-enabled static libraries to allow surfacing both to make without a
- // name conflict.
- if ret.Class == "STATIC_LIBRARIES" && Bool(sanitize.Properties.Sanitize.Cfi) {
- ret.SubName += ".cfi"
- }
- if ret.Class == "STATIC_LIBRARIES" && Bool(sanitize.Properties.Sanitize.Hwaddress) {
- ret.SubName += ".hwasan"
- }
- if ret.Class == "STATIC_LIBRARIES" && Bool(sanitize.Properties.Sanitize.Scs) {
- ret.SubName += ".scs"
+ // Add a suffix for cfi/hwasan/scs-enabled static/header libraries to allow surfacing
+ // both the sanitized and non-sanitized variants to make without a name conflict.
+ if ret.Class == "STATIC_LIBRARIES" || ret.Class == "HEADER_LIBRARIES" {
+ if Bool(sanitize.Properties.Sanitize.Cfi) {
+ ret.SubName += ".cfi"
+ }
+ if Bool(sanitize.Properties.Sanitize.Hwaddress) {
+ ret.SubName += ".hwasan"
+ }
+ if Bool(sanitize.Properties.Sanitize.Scs) {
+ ret.SubName += ".scs"
+ }
}
}
@@ -870,7 +876,7 @@
{Mutator: "image", Variation: c.imageVariation()},
{Mutator: "arch", Variation: mctx.Target().String()},
}, staticDepTag, runtimeLibrary)
- } else if !c.static() {
+ } else if !c.static() && !c.header() {
// dynamic executable and shared libs get shared runtime libs
mctx.AddFarVariationDependencies([]blueprint.Variation{
{Mutator: "link", Variation: "shared"},
@@ -899,108 +905,69 @@
modules := mctx.CreateVariations(t.variationName())
modules[0].(*Module).sanitize.SetSanitizer(t, true)
} else if c.sanitize.isSanitizerEnabled(t) || c.sanitize.Properties.SanitizeDep {
- // Save original sanitizer status before we assign values to variant
- // 0 as that overwrites the original.
isSanitizerEnabled := c.sanitize.isSanitizerEnabled(t)
+ if mctx.Device() && t.incompatibleWithCfi() {
+ // TODO: Make sure that cfi mutator runs "after" any of the sanitizers that
+ // are incompatible with cfi
+ c.sanitize.SetSanitizer(cfi, false)
+ }
+ if c.static() || c.header() || t == asan || t == fuzzer {
+ // Static and header libs are split into non-sanitized and sanitized variants.
+ // Shared libs are not split. However, for asan and fuzzer, we split even for shared
+ // libs because a library sanitized for asan/fuzzer can't be linked from a library
+ // that isn't sanitized for asan/fuzzer.
+ //
+ // Note for defaultVariation: since we don't split for shared libs but for static/header
+ // libs, it is possible for the sanitized variant of a static/header lib to depend
+ // on non-sanitized variant of a shared lib. Such unfulfilled variation causes an
+ // error when the module is split. defaultVariation is the name of the variation that
+ // will be used when such a dangling dependency occurs during the split of the current
+ // module. By setting it to the name of the sanitized variation, the dangling dependency
+ // is redirected to the sanitized variant of the dependent module.
+ defaultVariation := t.variationName()
+ mctx.SetDefaultDependencyVariation(&defaultVariation)
+ modules := mctx.CreateVariations("", t.variationName())
+ modules[0].(*Module).sanitize.SetSanitizer(t, false)
+ modules[1].(*Module).sanitize.SetSanitizer(t, true)
+ modules[0].(*Module).sanitize.Properties.SanitizeDep = false
+ modules[1].(*Module).sanitize.Properties.SanitizeDep = false
- modules := mctx.CreateVariations("", t.variationName())
- modules[0].(*Module).sanitize.SetSanitizer(t, false)
- modules[1].(*Module).sanitize.SetSanitizer(t, true)
+ // For cfi/scs/hwasan, we can export both sanitized and un-sanitized variants
+ // to Make, because the sanitized version has a different suffix in name.
+ // For other types of sanitizers, suppress the variation that is disabled.
+ if t != cfi && t != scs && t != hwasan {
+ if isSanitizerEnabled {
+ modules[0].(*Module).Properties.PreventInstall = true
+ modules[0].(*Module).Properties.HideFromMake = true
+ } else {
+ modules[1].(*Module).Properties.PreventInstall = true
+ modules[1].(*Module).Properties.HideFromMake = true
+ }
+ }
- modules[0].(*Module).sanitize.Properties.SanitizeDep = false
- modules[1].(*Module).sanitize.Properties.SanitizeDep = false
-
- // We don't need both variants active for anything but CFI-enabled
- // target static libraries, so suppress the appropriate variant in
- // all other cases.
- if t == cfi {
+ // Export the static lib name to make
if c.static() {
- if !mctx.Device() {
- if isSanitizerEnabled {
- modules[0].(*Module).Properties.PreventInstall = true
- modules[0].(*Module).Properties.HideFromMake = true
+ if t == cfi {
+ appendStringSync(c.Name(), cfiStaticLibs(mctx.Config()), &cfiStaticLibsMutex)
+ } else if t == hwasan {
+ if c.useVndk() {
+ appendStringSync(c.Name(), hwasanVendorStaticLibs(mctx.Config()),
+ &hwasanStaticLibsMutex)
} else {
- modules[1].(*Module).Properties.PreventInstall = true
- modules[1].(*Module).Properties.HideFromMake = true
+ appendStringSync(c.Name(), hwasanStaticLibs(mctx.Config()),
+ &hwasanStaticLibsMutex)
}
- } else {
- cfiStaticLibs := cfiStaticLibs(mctx.Config())
+ }
+ }
+ } else {
+ // Shared libs are not split. Only the sanitized variant is created.
+ modules := mctx.CreateVariations(t.variationName())
+ modules[0].(*Module).sanitize.SetSanitizer(t, true)
+ modules[0].(*Module).sanitize.Properties.SanitizeDep = false
- cfiStaticLibsMutex.Lock()
- *cfiStaticLibs = append(*cfiStaticLibs, c.Name())
- cfiStaticLibsMutex.Unlock()
- }
- } else {
- modules[0].(*Module).Properties.PreventInstall = true
- modules[0].(*Module).Properties.HideFromMake = true
- }
- } else if t == asan {
- if mctx.Device() {
- // CFI and ASAN are currently mutually exclusive so disable
- // CFI if this is an ASAN variant.
- modules[1].(*Module).sanitize.Properties.InSanitizerDir = true
- modules[1].(*Module).sanitize.SetSanitizer(cfi, false)
- }
- if isSanitizerEnabled {
- modules[0].(*Module).Properties.PreventInstall = true
- modules[0].(*Module).Properties.HideFromMake = true
- } else {
- modules[1].(*Module).Properties.PreventInstall = true
- modules[1].(*Module).Properties.HideFromMake = true
- }
- } else if t == scs {
- // We don't currently link any static libraries built with make into
- // libraries built with SCS, so we don't need logic for propagating
- // SCSness of dependencies into make.
- if !c.static() {
- if isSanitizerEnabled {
- modules[0].(*Module).Properties.PreventInstall = true
- modules[0].(*Module).Properties.HideFromMake = true
- } else {
- modules[1].(*Module).Properties.PreventInstall = true
- modules[1].(*Module).Properties.HideFromMake = true
- }
- }
- } else if t == fuzzer {
- // TODO(b/131771163): CFI and fuzzer support are mutually incompatible
- // as CFI pulls in LTO.
- if mctx.Device() {
- modules[1].(*Module).sanitize.SetSanitizer(cfi, false)
- }
- if isSanitizerEnabled {
- modules[0].(*Module).Properties.PreventInstall = true
- modules[0].(*Module).Properties.HideFromMake = true
- } else {
- modules[1].(*Module).Properties.PreventInstall = true
- modules[1].(*Module).Properties.HideFromMake = true
- }
- } else if t == hwasan {
- if mctx.Device() {
- // CFI and HWASAN are currently mutually exclusive so disable
- // CFI if this is an HWASAN variant.
- modules[1].(*Module).sanitize.SetSanitizer(cfi, false)
- }
-
- if c.static() {
- if c.useVndk() {
- hwasanVendorStaticLibs := hwasanVendorStaticLibs(mctx.Config())
- hwasanStaticLibsMutex.Lock()
- *hwasanVendorStaticLibs = append(*hwasanVendorStaticLibs, c.Name())
- hwasanStaticLibsMutex.Unlock()
- } else {
- hwasanStaticLibs := hwasanStaticLibs(mctx.Config())
- hwasanStaticLibsMutex.Lock()
- *hwasanStaticLibs = append(*hwasanStaticLibs, c.Name())
- hwasanStaticLibsMutex.Unlock()
- }
- } else {
- if isSanitizerEnabled {
- modules[0].(*Module).Properties.PreventInstall = true
- modules[0].(*Module).Properties.HideFromMake = true
- } else {
- modules[1].(*Module).Properties.PreventInstall = true
- modules[1].(*Module).Properties.HideFromMake = true
- }
+ // locate the asan libraries under /data/asan
+ if mctx.Device() && t == asan && isSanitizerEnabled {
+ modules[0].(*Module).sanitize.Properties.InSanitizerDir = true
}
}
}
@@ -1036,6 +1003,12 @@
}).(*[]string)
}
+func appendStringSync(item string, list *[]string, mutex *sync.Mutex) {
+ mutex.Lock()
+ *list = append(*list, item)
+ mutex.Unlock()
+}
+
func enableMinimalRuntime(sanitize *sanitize) bool {
if !Bool(sanitize.Properties.Sanitize.Address) &&
!Bool(sanitize.Properties.Sanitize.Hwaddress) &&
diff --git a/cc/vndk.go b/cc/vndk.go
index 2c78047..2a86f5b 100644
--- a/cc/vndk.go
+++ b/cc/vndk.go
@@ -15,8 +15,8 @@
package cc
import (
+ "encoding/json"
"errors"
- "fmt"
"path/filepath"
"sort"
"strings"
@@ -206,16 +206,9 @@
modulePathsKey = android.NewOnceKey("modulePaths")
vndkSnapshotOutputsKey = android.NewOnceKey("vndkSnapshotOutputs")
vndkLibrariesLock sync.Mutex
-)
-type vndkSnapshotOutputPaths struct {
- configs android.Paths
- notices android.Paths
- vndkCoreLibs android.Paths
- vndkCoreLibs2nd android.Paths
- vndkSpLibs android.Paths
- vndkSpLibs2nd android.Paths
-}
+ headerExts = []string{".h", ".hh", ".hpp", ".hxx", ".h++", ".inl", ".inc", ".ipp", ".h.generic"}
+)
func vndkCoreLibraries(config android.Config) *[]string {
return config.Once(vndkCoreLibrariesKey, func() interface{} {
@@ -253,10 +246,10 @@
}).(map[string]string)
}
-func vndkSnapshotOutputs(config android.Config) *vndkSnapshotOutputPaths {
+func vndkSnapshotOutputs(config android.Config) *android.RuleBuilderInstalls {
return config.Once(vndkSnapshotOutputsKey, func() interface{} {
- return &vndkSnapshotOutputPaths{}
- }).(*vndkSnapshotOutputPaths)
+ return &android.RuleBuilderInstalls{}
+ }).(*android.RuleBuilderInstalls)
}
func processLlndkLibrary(mctx android.BottomUpMutatorContext, m *Module) {
@@ -357,13 +350,7 @@
android.RegisterSingletonType("vndk-snapshot", VndkSnapshotSingleton)
android.RegisterMakeVarsProvider(pctx, func(ctx android.MakeVarsContext) {
outputs := vndkSnapshotOutputs(ctx.Config())
-
- ctx.Strict("SOONG_VNDK_SNAPSHOT_CONFIGS", strings.Join(outputs.configs.Strings(), " "))
- ctx.Strict("SOONG_VNDK_SNAPSHOT_NOTICES", strings.Join(outputs.notices.Strings(), " "))
- ctx.Strict("SOONG_VNDK_SNAPSHOT_CORE_LIBS", strings.Join(outputs.vndkCoreLibs.Strings(), " "))
- ctx.Strict("SOONG_VNDK_SNAPSHOT_SP_LIBS", strings.Join(outputs.vndkSpLibs.Strings(), " "))
- ctx.Strict("SOONG_VNDK_SNAPSHOT_CORE_LIBS_2ND", strings.Join(outputs.vndkCoreLibs2nd.Strings(), " "))
- ctx.Strict("SOONG_VNDK_SNAPSHOT_SP_LIBS_2ND", strings.Join(outputs.vndkSpLibs2nd.Strings(), " "))
+ ctx.Strict("SOONG_VNDK_SNAPSHOT_FILES", outputs.String())
})
}
@@ -373,26 +360,6 @@
type vndkSnapshotSingleton struct{}
-func installVndkSnapshotLib(ctx android.SingletonContext, name string, module *Module, dir string) android.Path {
- if !module.outputFile.Valid() {
- panic(fmt.Errorf("module %s has no outputFile\n", name))
- }
-
- out := android.PathForOutput(ctx, dir, name+".so")
-
- ctx.Build(pctx, android.BuildParams{
- Rule: android.Cp,
- Input: module.outputFile.Path(),
- Output: out,
- Description: "vndk snapshot " + dir + "/" + name + ".so",
- Args: map[string]string{
- "cpFlags": "-f -L",
- },
- })
-
- return out
-}
-
func (c *vndkSnapshotSingleton) GenerateBuildActions(ctx android.SingletonContext) {
// BOARD_VNDK_VERSION must be set to 'current' in order to generate a VNDK snapshot.
if ctx.DeviceConfig().VndkVersion() != "current" {
@@ -411,30 +378,58 @@
snapshotDir := "vndk-snapshot"
- var vndkLibPath, vndkLib2ndPath string
+ vndkLibDir := make(map[android.ArchType]string)
- snapshotVariantPath := filepath.Join(snapshotDir, ctx.DeviceConfig().DeviceArch())
- if ctx.DeviceConfig().BinderBitness() == "32" {
- vndkLibPath = filepath.Join(snapshotVariantPath, "binder32", fmt.Sprintf(
- "arch-%s-%s", ctx.DeviceConfig().DeviceArch(), ctx.DeviceConfig().DeviceArchVariant()))
- vndkLib2ndPath = filepath.Join(snapshotVariantPath, "binder32", fmt.Sprintf(
- "arch-%s-%s", ctx.DeviceConfig().DeviceSecondaryArch(), ctx.DeviceConfig().DeviceSecondaryArchVariant()))
- } else {
- vndkLibPath = filepath.Join(snapshotVariantPath, fmt.Sprintf(
- "arch-%s-%s", ctx.DeviceConfig().DeviceArch(), ctx.DeviceConfig().DeviceArchVariant()))
- vndkLib2ndPath = filepath.Join(snapshotVariantPath, fmt.Sprintf(
- "arch-%s-%s", ctx.DeviceConfig().DeviceSecondaryArch(), ctx.DeviceConfig().DeviceSecondaryArchVariant()))
+ snapshotVariantDir := ctx.DeviceConfig().DeviceArch()
+ for _, target := range ctx.Config().Targets[android.Android] {
+ dir := snapshotVariantDir
+ if ctx.DeviceConfig().BinderBitness() == "32" {
+ dir = filepath.Join(dir, "binder32")
+ }
+ arch := "arch-" + target.Arch.ArchType.String()
+ if target.Arch.ArchVariant != "" {
+ arch += "-" + target.Arch.ArchVariant
+ }
+ dir = filepath.Join(dir, arch)
+ vndkLibDir[target.Arch.ArchType] = dir
}
-
- vndkCoreLibPath := filepath.Join(vndkLibPath, "shared", "vndk-core")
- vndkSpLibPath := filepath.Join(vndkLibPath, "shared", "vndk-sp")
- vndkCoreLib2ndPath := filepath.Join(vndkLib2ndPath, "shared", "vndk-core")
- vndkSpLib2ndPath := filepath.Join(vndkLib2ndPath, "shared", "vndk-sp")
- noticePath := filepath.Join(snapshotVariantPath, "NOTICE_FILES")
+ configsDir := filepath.Join(snapshotVariantDir, "configs")
+ noticeDir := filepath.Join(snapshotVariantDir, "NOTICE_FILES")
+ includeDir := filepath.Join(snapshotVariantDir, "include")
noticeBuilt := make(map[string]bool)
+ installSnapshotFileFromPath := func(path android.Path, out string) {
+ ctx.Build(pctx, android.BuildParams{
+ Rule: android.Cp,
+ Input: path,
+ Output: android.PathForOutput(ctx, snapshotDir, out),
+ Description: "vndk snapshot " + out,
+ Args: map[string]string{
+ "cpFlags": "-f -L",
+ },
+ })
+ *outputs = append(*outputs, android.RuleBuilderInstall{
+ From: android.PathForOutput(ctx, snapshotDir, out),
+ To: out,
+ })
+ }
+ installSnapshotFileFromContent := func(content, out string) {
+ ctx.Build(pctx, android.BuildParams{
+ Rule: android.WriteFile,
+ Output: android.PathForOutput(ctx, snapshotDir, out),
+ Description: "vndk snapshot " + out,
+ Args: map[string]string{
+ "content": content,
+ },
+ })
+ *outputs = append(*outputs, android.RuleBuilderInstall{
+ From: android.PathForOutput(ctx, snapshotDir, out),
+ To: out,
+ })
+ }
+
tryBuildNotice := func(m *Module) {
- name := ctx.ModuleName(m)
+ name := ctx.ModuleName(m) + ".so.txt"
if _, ok := noticeBuilt[name]; ok {
return
@@ -443,17 +438,7 @@
noticeBuilt[name] = true
if m.NoticeFile().Valid() {
- out := android.PathForOutput(ctx, noticePath, name+".so.txt")
- ctx.Build(pctx, android.BuildParams{
- Rule: android.Cp,
- Input: m.NoticeFile().Path(),
- Output: out,
- Description: "vndk snapshot notice " + name + ".so.txt",
- Args: map[string]string{
- "cpFlags": "-f -L",
- },
- })
- outputs.notices = append(outputs.notices, out)
+ installSnapshotFileFromPath(m.NoticeFile().Path(), filepath.Join(noticeDir, name))
}
}
@@ -461,84 +446,160 @@
vndkSpLibraries := vndkSpLibraries(ctx.Config())
vndkPrivateLibraries := vndkPrivateLibraries(ctx.Config())
+ var generatedHeaders android.Paths
+ includeDirs := make(map[string]bool)
+
+ type vndkSnapshotLibraryInterface interface {
+ exportedFlagsProducer
+ libraryInterface
+ }
+
+ var _ vndkSnapshotLibraryInterface = (*prebuiltLibraryLinker)(nil)
+ var _ vndkSnapshotLibraryInterface = (*libraryDecorator)(nil)
+
+ installVndkSnapshotLib := func(m *Module, l vndkSnapshotLibraryInterface, dir string) bool {
+ name := ctx.ModuleName(m)
+ libOut := filepath.Join(dir, name+".so")
+
+ installSnapshotFileFromPath(m.outputFile.Path(), libOut)
+ tryBuildNotice(m)
+
+ if ctx.Config().VndkSnapshotBuildArtifacts() {
+ prop := struct {
+ ExportedDirs []string `json:",omitempty"`
+ ExportedSystemDirs []string `json:",omitempty"`
+ ExportedFlags []string `json:",omitempty"`
+ RelativeInstallPath string `json:",omitempty"`
+ }{}
+ prop.ExportedFlags = l.exportedFlags()
+ prop.ExportedDirs = l.exportedDirs()
+ prop.ExportedSystemDirs = l.exportedSystemDirs()
+ prop.RelativeInstallPath = m.RelativeInstallPath()
+
+ propOut := libOut + ".json"
+
+ j, err := json.Marshal(prop)
+ if err != nil {
+ ctx.Errorf("json marshal to %q failed: %#v", propOut, err)
+ return false
+ }
+
+ installSnapshotFileFromContent(string(j), propOut)
+ }
+ return true
+ }
+
+ isVndkSnapshotLibrary := func(m *Module) (i vndkSnapshotLibraryInterface, libDir string, isVndkSnapshotLib bool) {
+ if m.Target().NativeBridge == android.NativeBridgeEnabled {
+ return nil, "", false
+ }
+ if !m.useVndk() || !m.IsForPlatform() || !m.installable() {
+ return nil, "", false
+ }
+ l, ok := m.linker.(vndkSnapshotLibraryInterface)
+ if !ok || !l.shared() {
+ return nil, "", false
+ }
+ name := ctx.ModuleName(m)
+ if inList(name, *vndkCoreLibraries) {
+ return l, filepath.Join("shared", "vndk-core"), true
+ } else if inList(name, *vndkSpLibraries) {
+ return l, filepath.Join("shared", "vndk-sp"), true
+ } else {
+ return nil, "", false
+ }
+ }
+
ctx.VisitAllModules(func(module android.Module) {
m, ok := module.(*Module)
- if !ok || !m.Enabled() || !m.useVndk() || !m.installable() {
+ if !ok || !m.Enabled() {
return
}
- if m.Target().NativeBridge == android.NativeBridgeEnabled {
+ baseDir, ok := vndkLibDir[m.Target().Arch.ArchType]
+ if !ok {
return
}
- lib, is_lib := m.linker.(*libraryDecorator)
- prebuilt_lib, is_prebuilt_lib := m.linker.(*prebuiltLibraryLinker)
-
- if !(is_lib && lib.shared()) && !(is_prebuilt_lib && prebuilt_lib.shared()) {
+ l, libDir, ok := isVndkSnapshotLibrary(m)
+ if !ok {
return
}
- is_2nd := m.Target().Arch.ArchType != ctx.Config().DevicePrimaryArchType()
+ if !installVndkSnapshotLib(m, l, filepath.Join(baseDir, libDir)) {
+ return
+ }
- name := ctx.ModuleName(module)
+ generatedHeaders = append(generatedHeaders, l.exportedDeps()...)
+ for _, dir := range append(l.exportedDirs(), l.exportedSystemDirs()...) {
+ includeDirs[dir] = true
+ }
+ })
- if inList(name, *vndkCoreLibraries) {
- if is_2nd {
- out := installVndkSnapshotLib(ctx, name, m, vndkCoreLib2ndPath)
- outputs.vndkCoreLibs2nd = append(outputs.vndkCoreLibs2nd, out)
- } else {
- out := installVndkSnapshotLib(ctx, name, m, vndkCoreLibPath)
- outputs.vndkCoreLibs = append(outputs.vndkCoreLibs, out)
+ if ctx.Config().VndkSnapshotBuildArtifacts() {
+ headers := make(map[string]bool)
+
+ for _, dir := range android.SortedStringKeys(includeDirs) {
+ // workaround to determine if dir is under output directory
+ if strings.HasPrefix(dir, android.PathForOutput(ctx).String()) {
+ continue
}
- tryBuildNotice(m)
- } else if inList(name, *vndkSpLibraries) {
- if is_2nd {
- out := installVndkSnapshotLib(ctx, name, m, vndkSpLib2ndPath)
- outputs.vndkSpLibs2nd = append(outputs.vndkSpLibs2nd, out)
- } else {
- out := installVndkSnapshotLib(ctx, name, m, vndkSpLibPath)
- outputs.vndkSpLibs = append(outputs.vndkSpLibs, out)
+ exts := headerExts
+ // Glob all files under this special directory, because of C++ headers.
+ if strings.HasPrefix(dir, "external/libcxx/include") {
+ exts = []string{""}
}
- tryBuildNotice(m)
+ for _, ext := range exts {
+ glob, err := ctx.GlobWithDeps(dir+"/**/*"+ext, nil)
+ if err != nil {
+ ctx.Errorf("%#v\n", err)
+ return
+ }
+ for _, header := range glob {
+ if strings.HasSuffix(header, "/") {
+ continue
+ }
+ headers[header] = true
+ }
+ }
}
- })
- configsPath := filepath.Join(snapshotVariantPath, "configs")
- vndkCoreTxt := android.PathForOutput(ctx, configsPath, "vndkcore.libraries.txt")
- vndkPrivateTxt := android.PathForOutput(ctx, configsPath, "vndkprivate.libraries.txt")
- modulePathTxt := android.PathForOutput(ctx, configsPath, "module_paths.txt")
+ for _, header := range android.SortedStringKeys(headers) {
+ installSnapshotFileFromPath(android.PathForSource(ctx, header),
+ filepath.Join(includeDir, header))
+ }
- ctx.Build(pctx, android.BuildParams{
- Rule: android.WriteFile,
- Output: vndkCoreTxt,
- Description: "vndk snapshot vndkcore.libraries.txt",
- Args: map[string]string{
- "content": android.JoinWithSuffix(*vndkCoreLibraries, ".so", "\\n"),
- },
- })
- outputs.configs = append(outputs.configs, vndkCoreTxt)
+ isHeader := func(path string) bool {
+ for _, ext := range headerExts {
+ if strings.HasSuffix(path, ext) {
+ return true
+ }
+ }
+ return false
+ }
- ctx.Build(pctx, android.BuildParams{
- Rule: android.WriteFile,
- Output: vndkPrivateTxt,
- Description: "vndk snapshot vndkprivate.libraries.txt",
- Args: map[string]string{
- "content": android.JoinWithSuffix(*vndkPrivateLibraries, ".so", "\\n"),
- },
- })
- outputs.configs = append(outputs.configs, vndkPrivateTxt)
+ for _, path := range android.PathsToDirectorySortedPaths(android.FirstUniquePaths(generatedHeaders)) {
+ header := path.String()
+
+ if !isHeader(header) {
+ continue
+ }
+
+ installSnapshotFileFromPath(path, filepath.Join(includeDir, header))
+ }
+ }
+
+ installSnapshotFileFromContent(android.JoinWithSuffix(*vndkCoreLibraries, ".so", "\\n"),
+ filepath.Join(configsDir, "vndkcore.libraries.txt"))
+ installSnapshotFileFromContent(android.JoinWithSuffix(*vndkPrivateLibraries, ".so", "\\n"),
+ filepath.Join(configsDir, "vndkprivate.libraries.txt"))
var modulePathTxtBuilder strings.Builder
modulePaths := modulePaths(ctx.Config())
- var libs []string
- for lib := range modulePaths {
- libs = append(libs, lib)
- }
- sort.Strings(libs)
first := true
- for _, lib := range libs {
+ for _, lib := range android.SortedStringKeys(modulePaths) {
if first {
first = false
} else {
@@ -549,13 +610,6 @@
modulePathTxtBuilder.WriteString(modulePaths[lib])
}
- ctx.Build(pctx, android.BuildParams{
- Rule: android.WriteFile,
- Output: modulePathTxt,
- Description: "vndk snapshot module_paths.txt",
- Args: map[string]string{
- "content": modulePathTxtBuilder.String(),
- },
- })
- outputs.configs = append(outputs.configs, modulePathTxt)
+ installSnapshotFileFromContent(modulePathTxtBuilder.String(),
+ filepath.Join(configsDir, "module_paths.txt"))
}
diff --git a/cc/vndk_prebuilt.go b/cc/vndk_prebuilt.go
index 0ecf566..c8ff87f 100644
--- a/cc/vndk_prebuilt.go
+++ b/cc/vndk_prebuilt.go
@@ -61,6 +61,13 @@
// Prebuilt files for each arch.
Srcs []string `android:"arch_variant"`
+ // list of directories relative to the Blueprints file that will be added to the include
+ // path (using -isystem) for any module that links against this module.
+ Export_system_include_dirs []string `android:"arch_variant"`
+
+ // list of flags that will be used for any module that links against this module.
+ Export_flags []string `android:"arch_variant"`
+
// Check the prebuilt ELF files (e.g. DT_SONAME, DT_NEEDED, resolution of undefined symbols,
// etc).
Check_elf_files *bool
@@ -123,6 +130,9 @@
func (p *vndkPrebuiltLibraryDecorator) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
if len(p.properties.Srcs) > 0 && p.shared() {
+ p.libraryDecorator.exportIncludes(ctx)
+ p.libraryDecorator.reexportSystemDirs(p.properties.Export_system_include_dirs...)
+ p.libraryDecorator.reexportFlags(p.properties.Export_flags...)
// current VNDK prebuilts are only shared libs.
return p.singleSourcePath(ctx)
}
diff --git a/cmd/merge_zips/Android.bp b/cmd/merge_zips/Android.bp
index ab658fd..f70c86e 100644
--- a/cmd/merge_zips/Android.bp
+++ b/cmd/merge_zips/Android.bp
@@ -18,6 +18,7 @@
"android-archive-zip",
"blueprint-pathtools",
"soong-jar",
+ "soong-zip",
],
srcs: [
"merge_zips.go",
diff --git a/cmd/merge_zips/merge_zips.go b/cmd/merge_zips/merge_zips.go
index 68fe259..27179cb 100644
--- a/cmd/merge_zips/merge_zips.go
+++ b/cmd/merge_zips/merge_zips.go
@@ -30,8 +30,566 @@
"android/soong/jar"
"android/soong/third_party/zip"
+ soongZip "android/soong/zip"
)
+// Input zip: we can open it, close it, and obtain an array of entries
+type InputZip interface {
+ Name() string
+ Open() error
+ Close() error
+ Entries() []*zip.File
+ IsOpen() bool
+}
+
+// An entry that can be written to the output zip
+type ZipEntryContents interface {
+ String() string
+ IsDir() bool
+ CRC32() uint32
+ Size() uint64
+ WriteToZip(dest string, zw *zip.Writer) error
+}
+
+// a ZipEntryFromZip is a ZipEntryContents that pulls its content from another zip
+// identified by the input zip and the index of the entry in its entries array
+type ZipEntryFromZip struct {
+ inputZip InputZip
+ index int
+ name string
+ isDir bool
+ crc32 uint32
+ size uint64
+}
+
+func NewZipEntryFromZip(inputZip InputZip, entryIndex int) *ZipEntryFromZip {
+ fi := inputZip.Entries()[entryIndex]
+ newEntry := ZipEntryFromZip{inputZip: inputZip,
+ index: entryIndex,
+ name: fi.Name,
+ isDir: fi.FileInfo().IsDir(),
+ crc32: fi.CRC32,
+ size: fi.UncompressedSize64,
+ }
+ return &newEntry
+}
+
+func (ze ZipEntryFromZip) String() string {
+ return fmt.Sprintf("%s!%s", ze.inputZip.Name(), ze.name)
+}
+
+func (ze ZipEntryFromZip) IsDir() bool {
+ return ze.isDir
+}
+
+func (ze ZipEntryFromZip) CRC32() uint32 {
+ return ze.crc32
+}
+
+func (ze ZipEntryFromZip) Size() uint64 {
+ return ze.size
+}
+
+func (ze ZipEntryFromZip) WriteToZip(dest string, zw *zip.Writer) error {
+ if err := ze.inputZip.Open(); err != nil {
+ return err
+ }
+ return zw.CopyFrom(ze.inputZip.Entries()[ze.index], dest)
+}
+
+// a ZipEntryFromBuffer is a ZipEntryContents that pulls its content from a []byte
+type ZipEntryFromBuffer struct {
+ fh *zip.FileHeader
+ content []byte
+}
+
+func (be ZipEntryFromBuffer) String() string {
+ return "internal buffer"
+}
+
+func (be ZipEntryFromBuffer) IsDir() bool {
+ return be.fh.FileInfo().IsDir()
+}
+
+func (be ZipEntryFromBuffer) CRC32() uint32 {
+ return crc32.ChecksumIEEE(be.content)
+}
+
+func (be ZipEntryFromBuffer) Size() uint64 {
+ return uint64(len(be.content))
+}
+
+func (be ZipEntryFromBuffer) WriteToZip(dest string, zw *zip.Writer) error {
+ w, err := zw.CreateHeader(be.fh)
+ if err != nil {
+ return err
+ }
+
+ if !be.IsDir() {
+ _, err = w.Write(be.content)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Processing state.
+type OutputZip struct {
+ outputWriter *zip.Writer
+ stripDirEntries bool
+ emulateJar bool
+ sortEntries bool
+ ignoreDuplicates bool
+ excludeDirs []string
+ excludeFiles []string
+ sourceByDest map[string]ZipEntryContents
+}
+
+func NewOutputZip(outputWriter *zip.Writer, sortEntries, emulateJar, stripDirEntries, ignoreDuplicates bool) *OutputZip {
+ return &OutputZip{
+ outputWriter: outputWriter,
+ stripDirEntries: stripDirEntries,
+ emulateJar: emulateJar,
+ sortEntries: sortEntries,
+ sourceByDest: make(map[string]ZipEntryContents, 0),
+ ignoreDuplicates: ignoreDuplicates,
+ }
+}
+
+func (oz *OutputZip) setExcludeDirs(excludeDirs []string) {
+ oz.excludeDirs = make([]string, len(excludeDirs))
+ for i, dir := range excludeDirs {
+ oz.excludeDirs[i] = filepath.Clean(dir)
+ }
+}
+
+func (oz *OutputZip) setExcludeFiles(excludeFiles []string) {
+ oz.excludeFiles = excludeFiles
+}
+
+// Adds an entry with given name whose source is given ZipEntryContents. Returns old ZipEntryContents
+// if entry with given name already exists.
+func (oz *OutputZip) addZipEntry(name string, source ZipEntryContents) (ZipEntryContents, error) {
+ if existingSource, exists := oz.sourceByDest[name]; exists {
+ return existingSource, nil
+ }
+ oz.sourceByDest[name] = source
+ // Delay writing an entry if entries need to be rearranged.
+ if oz.emulateJar || oz.sortEntries {
+ return nil, nil
+ }
+ return nil, source.WriteToZip(name, oz.outputWriter)
+}
+
+// Adds an entry for the manifest (META-INF/MANIFEST.MF from the given file
+func (oz *OutputZip) addManifest(manifestPath string) error {
+ if !oz.stripDirEntries {
+ if _, err := oz.addZipEntry(jar.MetaDir, ZipEntryFromBuffer{jar.MetaDirFileHeader(), nil}); err != nil {
+ return err
+ }
+ }
+ contents, err := ioutil.ReadFile(manifestPath)
+ if err == nil {
+ fh, buf, err := jar.ManifestFileContents(contents)
+ if err == nil {
+ _, err = oz.addZipEntry(jar.ManifestFile, ZipEntryFromBuffer{fh, buf})
+ }
+ }
+ return err
+}
+
+// Adds an entry with given name and contents read from given file
+func (oz *OutputZip) addZipEntryFromFile(name string, path string) error {
+ buf, err := ioutil.ReadFile(path)
+ if err == nil {
+ fh := &zip.FileHeader{
+ Name: name,
+ Method: zip.Store,
+ UncompressedSize64: uint64(len(buf)),
+ }
+ fh.SetMode(0700)
+ fh.SetModTime(jar.DefaultTime)
+ _, err = oz.addZipEntry(name, ZipEntryFromBuffer{fh, buf})
+ }
+ return err
+}
+
+func (oz *OutputZip) addEmptyEntry(entry string) error {
+ var emptyBuf []byte
+ fh := &zip.FileHeader{
+ Name: entry,
+ Method: zip.Store,
+ UncompressedSize64: uint64(len(emptyBuf)),
+ }
+ fh.SetMode(0700)
+ fh.SetModTime(jar.DefaultTime)
+ _, err := oz.addZipEntry(entry, ZipEntryFromBuffer{fh, emptyBuf})
+ return err
+}
+
+// Returns true if given entry is to be excluded
+func (oz *OutputZip) isEntryExcluded(name string) bool {
+ for _, dir := range oz.excludeDirs {
+ dir = filepath.Clean(dir)
+ patterns := []string{
+ dir + "/", // the directory itself
+ dir + "/**/*", // files recursively in the directory
+ dir + "/**/*/", // directories recursively in the directory
+ }
+
+ for _, pattern := range patterns {
+ match, err := pathtools.Match(pattern, name)
+ if err != nil {
+ panic(fmt.Errorf("%s: %s", err.Error(), pattern))
+ }
+ if match {
+ if oz.emulateJar {
+ // When merging jar files, don't strip META-INF/MANIFEST.MF even if stripping META-INF is
+ // requested.
+ // TODO(ccross): which files does this affect?
+ if name != jar.MetaDir && name != jar.ManifestFile {
+ return true
+ }
+ }
+ return true
+ }
+ }
+ }
+
+ for _, pattern := range oz.excludeFiles {
+ match, err := pathtools.Match(pattern, name)
+ if err != nil {
+ panic(fmt.Errorf("%s: %s", err.Error(), pattern))
+ }
+ if match {
+ return true
+ }
+ }
+ return false
+}
+
+// Creates a zip entry whose contents is an entry from the given input zip.
+func (oz *OutputZip) copyEntry(inputZip InputZip, index int) error {
+ entry := NewZipEntryFromZip(inputZip, index)
+ if oz.stripDirEntries && entry.IsDir() {
+ return nil
+ }
+ existingEntry, err := oz.addZipEntry(entry.name, entry)
+ if err != nil {
+ return err
+ }
+ if existingEntry == nil {
+ return nil
+ }
+
+ // File types should match
+ if existingEntry.IsDir() != entry.IsDir() {
+ return fmt.Errorf("Directory/file mismatch at %v from %v and %v\n",
+ entry.name, existingEntry, entry)
+ }
+
+ if oz.ignoreDuplicates ||
+ // Skip manifest and module info files that are not from the first input file
+ (oz.emulateJar && entry.name == jar.ManifestFile || entry.name == jar.ModuleInfoClass) ||
+ // Identical entries
+ (existingEntry.CRC32() == entry.CRC32() && existingEntry.Size() == entry.Size()) ||
+ // Directory entries
+ entry.IsDir() {
+ return nil
+ }
+
+ return fmt.Errorf("Duplicate path %v found in %v and %v\n", entry.name, existingEntry, inputZip.Name())
+}
+
+func (oz *OutputZip) entriesArray() []string {
+ entries := make([]string, len(oz.sourceByDest))
+ i := 0
+ for entry := range oz.sourceByDest {
+ entries[i] = entry
+ i++
+ }
+ return entries
+}
+
+func (oz *OutputZip) jarSorted() []string {
+ entries := oz.entriesArray()
+ sort.SliceStable(entries, func(i, j int) bool { return jar.EntryNamesLess(entries[i], entries[j]) })
+ return entries
+}
+
+func (oz *OutputZip) alphanumericSorted() []string {
+ entries := oz.entriesArray()
+ sort.Strings(entries)
+ return entries
+}
+
+func (oz *OutputZip) writeEntries(entries []string) error {
+ for _, entry := range entries {
+ source, _ := oz.sourceByDest[entry]
+ if err := source.WriteToZip(entry, oz.outputWriter); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (oz *OutputZip) getUninitializedPythonPackages(inputZips []InputZip) ([]string, error) {
+ // the runfiles packages needs to be populated with "__init__.py".
+ // the runfiles dirs have been treated as packages.
+ allPackages := make(map[string]bool)
+ initedPackages := make(map[string]bool)
+ getPackage := func(path string) string {
+ ret := filepath.Dir(path)
+ // filepath.Dir("abc") -> "." and filepath.Dir("/abc") -> "/".
+ if ret == "." || ret == "/" {
+ return ""
+ }
+ return ret
+ }
+
+ // put existing __init__.py files to a set first. This set is used for preventing
+ // generated __init__.py files from overwriting existing ones.
+ for _, inputZip := range inputZips {
+ if err := inputZip.Open(); err != nil {
+ return nil, err
+ }
+ for _, file := range inputZip.Entries() {
+ pyPkg := getPackage(file.Name)
+ if filepath.Base(file.Name) == "__init__.py" {
+ if _, found := initedPackages[pyPkg]; found {
+ panic(fmt.Errorf("found __init__.py path duplicates during pars merging: %q", file.Name))
+ }
+ initedPackages[pyPkg] = true
+ }
+ for pyPkg != "" {
+ if _, found := allPackages[pyPkg]; found {
+ break
+ }
+ allPackages[pyPkg] = true
+ pyPkg = getPackage(pyPkg)
+ }
+ }
+ }
+ noInitPackages := make([]string, 0)
+ for pyPkg := range allPackages {
+ if _, found := initedPackages[pyPkg]; !found {
+ noInitPackages = append(noInitPackages, pyPkg)
+ }
+ }
+ return noInitPackages, nil
+}
+
+// An InputZip owned by the InputZipsManager. Opened ManagedInputZip's are chained in the open order.
+type ManagedInputZip struct {
+ owner *InputZipsManager
+ realInputZip InputZip
+ older *ManagedInputZip
+ newer *ManagedInputZip
+}
+
+// Maintains the array of ManagedInputZips, keeping track of open input ones. When an InputZip is opened,
+// may close some other InputZip to limit the number of open ones.
+type InputZipsManager struct {
+ inputZips []*ManagedInputZip
+ nOpenZips int
+ maxOpenZips int
+ openInputZips *ManagedInputZip
+}
+
+func (miz *ManagedInputZip) unlink() {
+ olderMiz := miz.older
+ newerMiz := miz.newer
+ if newerMiz.older != miz || olderMiz.newer != miz {
+ panic(fmt.Errorf("removing %p:%#v: broken list between %p:%#v and %p:%#v",
+ miz, miz, newerMiz, newerMiz, olderMiz, olderMiz))
+ }
+ olderMiz.newer = newerMiz
+ newerMiz.older = olderMiz
+ miz.newer = nil
+ miz.older = nil
+}
+
+func (miz *ManagedInputZip) link(olderMiz *ManagedInputZip) {
+ if olderMiz.newer != nil || olderMiz.older != nil {
+ panic(fmt.Errorf("inputZip is already open"))
+ }
+ oldOlderMiz := miz.older
+ if oldOlderMiz.newer != miz {
+ panic(fmt.Errorf("broken list between %p:%#v and %p:%#v", miz, oldOlderMiz))
+ }
+ miz.older = olderMiz
+ olderMiz.older = oldOlderMiz
+ oldOlderMiz.newer = olderMiz
+ olderMiz.newer = miz
+}
+
+func NewInputZipsManager(nInputZips, maxOpenZips int) *InputZipsManager {
+ if maxOpenZips < 3 {
+ panic(fmt.Errorf("open zips limit should be above 3"))
+ }
+ // In the dummy element .older points to the most recently opened InputZip, and .newer points to the oldest.
+ head := new(ManagedInputZip)
+ head.older = head
+ head.newer = head
+ return &InputZipsManager{
+ inputZips: make([]*ManagedInputZip, 0, nInputZips),
+ maxOpenZips: maxOpenZips,
+ openInputZips: head,
+ }
+}
+
+// InputZip factory
+func (izm *InputZipsManager) Manage(inz InputZip) InputZip {
+ iz := &ManagedInputZip{owner: izm, realInputZip: inz}
+ izm.inputZips = append(izm.inputZips, iz)
+ return iz
+}
+
+// Opens or reopens ManagedInputZip.
+func (izm *InputZipsManager) reopen(miz *ManagedInputZip) error {
+ if miz.realInputZip.IsOpen() {
+ if miz != izm.openInputZips {
+ miz.unlink()
+ izm.openInputZips.link(miz)
+ }
+ return nil
+ }
+ if izm.nOpenZips >= izm.maxOpenZips {
+ if err := izm.close(izm.openInputZips.older); err != nil {
+ return err
+ }
+ }
+ if err := miz.realInputZip.Open(); err != nil {
+ return err
+ }
+ izm.openInputZips.link(miz)
+ izm.nOpenZips++
+ return nil
+}
+
+func (izm *InputZipsManager) close(miz *ManagedInputZip) error {
+ if miz.IsOpen() {
+ err := miz.realInputZip.Close()
+ izm.nOpenZips--
+ miz.unlink()
+ return err
+ }
+ return nil
+}
+
+// Checks that openInputZips deque is valid
+func (izm *InputZipsManager) checkOpenZipsDeque() {
+ nReallyOpen := 0
+ el := izm.openInputZips
+ for {
+ elNext := el.older
+ if elNext.newer != el {
+ panic(fmt.Errorf("Element:\n %p: %v\nNext:\n %p %v", el, el, elNext, elNext))
+ }
+ if elNext == izm.openInputZips {
+ break
+ }
+ el = elNext
+ if !el.IsOpen() {
+ panic(fmt.Errorf("Found unopened element"))
+ }
+ nReallyOpen++
+ if nReallyOpen > izm.nOpenZips {
+ panic(fmt.Errorf("found %d open zips, should be %d", nReallyOpen, izm.nOpenZips))
+ }
+ }
+ if nReallyOpen > izm.nOpenZips {
+ panic(fmt.Errorf("found %d open zips, should be %d", nReallyOpen, izm.nOpenZips))
+ }
+}
+
+func (miz *ManagedInputZip) Name() string {
+ return miz.realInputZip.Name()
+}
+
+func (miz *ManagedInputZip) Open() error {
+ return miz.owner.reopen(miz)
+}
+
+func (miz *ManagedInputZip) Close() error {
+ return miz.owner.close(miz)
+}
+
+func (miz *ManagedInputZip) IsOpen() bool {
+ return miz.realInputZip.IsOpen()
+}
+
+func (miz *ManagedInputZip) Entries() []*zip.File {
+ if !miz.IsOpen() {
+ panic(fmt.Errorf("%s: is not open", miz.Name()))
+ }
+ return miz.realInputZip.Entries()
+}
+
+// Actual processing.
+func mergeZips(inputZips []InputZip, writer *zip.Writer, manifest, pyMain string,
+ sortEntries, emulateJar, emulatePar, stripDirEntries, ignoreDuplicates bool,
+ excludeFiles, excludeDirs []string, zipsToNotStrip map[string]bool) error {
+
+ out := NewOutputZip(writer, sortEntries, emulateJar, stripDirEntries, ignoreDuplicates)
+ out.setExcludeFiles(excludeFiles)
+ out.setExcludeDirs(excludeDirs)
+ if manifest != "" {
+ if err := out.addManifest(manifest); err != nil {
+ return err
+ }
+ }
+ if pyMain != "" {
+ if err := out.addZipEntryFromFile("__main__.py", pyMain); err != nil {
+ return err
+ }
+ }
+
+ if emulatePar {
+ noInitPackages, err := out.getUninitializedPythonPackages(inputZips)
+ if err != nil {
+ return err
+ }
+ for _, uninitializedPyPackage := range noInitPackages {
+ if err = out.addEmptyEntry(filepath.Join(uninitializedPyPackage, "__init__.py")); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Finally, add entries from all the input zips.
+ for _, inputZip := range inputZips {
+ _, copyFully := zipsToNotStrip[inputZip.Name()]
+ if err := inputZip.Open(); err != nil {
+ return err
+ }
+
+ for i, entry := range inputZip.Entries() {
+ if copyFully || !out.isEntryExcluded(entry.Name) {
+ if err := out.copyEntry(inputZip, i); err != nil {
+ return err
+ }
+ }
+ }
+ // Unless we need to rearrange the entries, the input zip can now be closed.
+ if !(emulateJar || sortEntries) {
+ if err := inputZip.Close(); err != nil {
+ return err
+ }
+ }
+ }
+
+ if emulateJar {
+ return out.writeEntries(out.jarSorted())
+ } else if sortEntries {
+ return out.writeEntries(out.alphanumericSorted())
+ }
+ return nil
+}
+
+// Process command line
type fileList []string
func (f *fileList) String() string {
@@ -50,9 +608,8 @@
return `""`
}
-func (s zipsToNotStripSet) Set(zip_path string) error {
- s[zip_path] = true
-
+func (s zipsToNotStripSet) Set(path string) error {
+ s[path] = true
return nil
}
@@ -60,8 +617,8 @@
sortEntries = flag.Bool("s", false, "sort entries (defaults to the order from the input zip files)")
emulateJar = flag.Bool("j", false, "sort zip entries using jar ordering (META-INF first)")
emulatePar = flag.Bool("p", false, "merge zip entries based on par format")
- stripDirs fileList
- stripFiles fileList
+ excludeDirs fileList
+ excludeFiles fileList
zipsToNotStrip = make(zipsToNotStripSet)
stripDirEntries = flag.Bool("D", false, "strip directory entries from the output zip file")
manifest = flag.String("m", "", "manifest file to insert in jar")
@@ -71,14 +628,52 @@
)
func init() {
- flag.Var(&stripDirs, "stripDir", "directories to be excluded from the output zip, accepts wildcards")
- flag.Var(&stripFiles, "stripFile", "files to be excluded from the output zip, accepts wildcards")
+ flag.Var(&excludeDirs, "stripDir", "directories to be excluded from the output zip, accepts wildcards")
+ flag.Var(&excludeFiles, "stripFile", "files to be excluded from the output zip, accepts wildcards")
flag.Var(&zipsToNotStrip, "zipToNotStrip", "the input zip file which is not applicable for stripping")
}
+type FileInputZip struct {
+ name string
+ reader *zip.ReadCloser
+}
+
+func (fiz *FileInputZip) Name() string {
+ return fiz.name
+}
+
+func (fiz *FileInputZip) Close() error {
+ if fiz.IsOpen() {
+ reader := fiz.reader
+ fiz.reader = nil
+ return reader.Close()
+ }
+ return nil
+}
+
+func (fiz *FileInputZip) Entries() []*zip.File {
+ if !fiz.IsOpen() {
+ panic(fmt.Errorf("%s: is not open", fiz.Name()))
+ }
+ return fiz.reader.File
+}
+
+func (fiz *FileInputZip) IsOpen() bool {
+ return fiz.reader != nil
+}
+
+func (fiz *FileInputZip) Open() error {
+ if fiz.IsOpen() {
+ return nil
+ }
+ var err error
+ fiz.reader, err = zip.OpenReader(fiz.Name())
+ return err
+}
+
func main() {
flag.Usage = func() {
- fmt.Fprintln(os.Stderr, "usage: merge_zips [-jpsD] [-m manifest] [--prefix script] [-pm __main__.py] output [inputs...]")
+ fmt.Fprintln(os.Stderr, "usage: merge_zips [-jpsD] [-m manifest] [--prefix script] [-pm __main__.py] OutputZip [inputs...]")
flag.PrintDefaults()
}
@@ -90,16 +685,28 @@
os.Exit(1)
}
outputPath := args[0]
- inputs := args[1:]
+ inputs := make([]string, 0)
+ for _, input := range args[1:] {
+ if input[0] == '@' {
+ bytes, err := ioutil.ReadFile(input[1:])
+ if err != nil {
+ log.Fatal(err)
+ }
+ inputs = append(inputs, soongZip.ReadRespFile(bytes)...)
+ continue
+ }
+ inputs = append(inputs, input)
+ continue
+ }
log.SetFlags(log.Lshortfile)
// make writer
- output, err := os.Create(outputPath)
+ outputZip, err := os.Create(outputPath)
if err != nil {
log.Fatal(err)
}
- defer output.Close()
+ defer outputZip.Close()
var offset int64
if *prefix != "" {
@@ -107,13 +714,13 @@
if err != nil {
log.Fatal(err)
}
- offset, err = io.Copy(output, prefixFile)
+ offset, err = io.Copy(outputZip, prefixFile)
if err != nil {
log.Fatal(err)
}
}
- writer := zip.NewWriter(output)
+ writer := zip.NewWriter(outputZip)
defer func() {
err := writer.Close()
if err != nil {
@@ -122,18 +729,6 @@
}()
writer.SetOffset(offset)
- // make readers
- readers := []namedZipReader{}
- for _, input := range inputs {
- reader, err := zip.OpenReader(input)
- if err != nil {
- log.Fatal(err)
- }
- defer reader.Close()
- namedReader := namedZipReader{path: input, reader: &reader.Reader}
- readers = append(readers, namedReader)
- }
-
if *manifest != "" && !*emulateJar {
log.Fatal(errors.New("must specify -j when specifying a manifest via -m"))
}
@@ -143,344 +738,15 @@
}
// do merge
- err = mergeZips(readers, writer, *manifest, *pyMain, *sortEntries, *emulateJar, *emulatePar,
- *stripDirEntries, *ignoreDuplicates, []string(stripFiles), []string(stripDirs), map[string]bool(zipsToNotStrip))
+ inputZipsManager := NewInputZipsManager(len(inputs), 1000)
+ inputZips := make([]InputZip, len(inputs))
+ for i, input := range inputs {
+ inputZips[i] = inputZipsManager.Manage(&FileInputZip{name: input})
+ }
+ err = mergeZips(inputZips, writer, *manifest, *pyMain, *sortEntries, *emulateJar, *emulatePar,
+ *stripDirEntries, *ignoreDuplicates, []string(excludeFiles), []string(excludeDirs),
+ map[string]bool(zipsToNotStrip))
if err != nil {
log.Fatal(err)
}
}
-
-// a namedZipReader reads a .zip file and can say which file it's reading
-type namedZipReader struct {
- path string
- reader *zip.Reader
-}
-
-// a zipEntryPath refers to a file contained in a zip
-type zipEntryPath struct {
- zipName string
- entryName string
-}
-
-func (p zipEntryPath) String() string {
- return p.zipName + "/" + p.entryName
-}
-
-// a zipEntry is a zipSource that pulls its content from another zip
-type zipEntry struct {
- path zipEntryPath
- content *zip.File
-}
-
-func (ze zipEntry) String() string {
- return ze.path.String()
-}
-
-func (ze zipEntry) IsDir() bool {
- return ze.content.FileInfo().IsDir()
-}
-
-func (ze zipEntry) CRC32() uint32 {
- return ze.content.FileHeader.CRC32
-}
-
-func (ze zipEntry) Size() uint64 {
- return ze.content.FileHeader.UncompressedSize64
-}
-
-func (ze zipEntry) WriteToZip(dest string, zw *zip.Writer) error {
- return zw.CopyFrom(ze.content, dest)
-}
-
-// a bufferEntry is a zipSource that pulls its content from a []byte
-type bufferEntry struct {
- fh *zip.FileHeader
- content []byte
-}
-
-func (be bufferEntry) String() string {
- return "internal buffer"
-}
-
-func (be bufferEntry) IsDir() bool {
- return be.fh.FileInfo().IsDir()
-}
-
-func (be bufferEntry) CRC32() uint32 {
- return crc32.ChecksumIEEE(be.content)
-}
-
-func (be bufferEntry) Size() uint64 {
- return uint64(len(be.content))
-}
-
-func (be bufferEntry) WriteToZip(dest string, zw *zip.Writer) error {
- w, err := zw.CreateHeader(be.fh)
- if err != nil {
- return err
- }
-
- if !be.IsDir() {
- _, err = w.Write(be.content)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-type zipSource interface {
- String() string
- IsDir() bool
- CRC32() uint32
- Size() uint64
- WriteToZip(dest string, zw *zip.Writer) error
-}
-
-// a fileMapping specifies to copy a zip entry from one place to another
-type fileMapping struct {
- dest string
- source zipSource
-}
-
-func mergeZips(readers []namedZipReader, writer *zip.Writer, manifest, pyMain string,
- sortEntries, emulateJar, emulatePar, stripDirEntries, ignoreDuplicates bool,
- stripFiles, stripDirs []string, zipsToNotStrip map[string]bool) error {
-
- sourceByDest := make(map[string]zipSource, 0)
- orderedMappings := []fileMapping{}
-
- // if dest already exists returns a non-null zipSource for the existing source
- addMapping := func(dest string, source zipSource) zipSource {
- mapKey := filepath.Clean(dest)
- if existingSource, exists := sourceByDest[mapKey]; exists {
- return existingSource
- }
-
- sourceByDest[mapKey] = source
- orderedMappings = append(orderedMappings, fileMapping{source: source, dest: dest})
- return nil
- }
-
- if manifest != "" {
- if !stripDirEntries {
- dirHeader := jar.MetaDirFileHeader()
- dirSource := bufferEntry{dirHeader, nil}
- addMapping(jar.MetaDir, dirSource)
- }
-
- contents, err := ioutil.ReadFile(manifest)
- if err != nil {
- return err
- }
-
- fh, buf, err := jar.ManifestFileContents(contents)
- if err != nil {
- return err
- }
-
- fileSource := bufferEntry{fh, buf}
- addMapping(jar.ManifestFile, fileSource)
- }
-
- if pyMain != "" {
- buf, err := ioutil.ReadFile(pyMain)
- if err != nil {
- return err
- }
- fh := &zip.FileHeader{
- Name: "__main__.py",
- Method: zip.Store,
- UncompressedSize64: uint64(len(buf)),
- }
- fh.SetMode(0700)
- fh.SetModTime(jar.DefaultTime)
- fileSource := bufferEntry{fh, buf}
- addMapping("__main__.py", fileSource)
- }
-
- if emulatePar {
- // the runfiles packages needs to be populated with "__init__.py".
- newPyPkgs := []string{}
- // the runfiles dirs have been treated as packages.
- existingPyPkgSet := make(map[string]bool)
- // put existing __init__.py files to a set first. This set is used for preventing
- // generated __init__.py files from overwriting existing ones.
- for _, namedReader := range readers {
- for _, file := range namedReader.reader.File {
- if filepath.Base(file.Name) != "__init__.py" {
- continue
- }
- pyPkg := pathBeforeLastSlash(file.Name)
- if _, found := existingPyPkgSet[pyPkg]; found {
- panic(fmt.Errorf("found __init__.py path duplicates during pars merging: %q.", file.Name))
- } else {
- existingPyPkgSet[pyPkg] = true
- }
- }
- }
- for _, namedReader := range readers {
- for _, file := range namedReader.reader.File {
- var parentPath string /* the path after trimming last "/" */
- if filepath.Base(file.Name) == "__init__.py" {
- // for existing __init__.py files, we should trim last "/" for twice.
- // eg. a/b/c/__init__.py ---> a/b
- parentPath = pathBeforeLastSlash(pathBeforeLastSlash(file.Name))
- } else {
- parentPath = pathBeforeLastSlash(file.Name)
- }
- populateNewPyPkgs(parentPath, existingPyPkgSet, &newPyPkgs)
- }
- }
- for _, pkg := range newPyPkgs {
- var emptyBuf []byte
- fh := &zip.FileHeader{
- Name: filepath.Join(pkg, "__init__.py"),
- Method: zip.Store,
- UncompressedSize64: uint64(len(emptyBuf)),
- }
- fh.SetMode(0700)
- fh.SetModTime(jar.DefaultTime)
- fileSource := bufferEntry{fh, emptyBuf}
- addMapping(filepath.Join(pkg, "__init__.py"), fileSource)
- }
- }
- for _, namedReader := range readers {
- _, skipStripThisZip := zipsToNotStrip[namedReader.path]
- for _, file := range namedReader.reader.File {
- if !skipStripThisZip {
- if skip, err := shouldStripEntry(emulateJar, stripFiles, stripDirs, file.Name); err != nil {
- return err
- } else if skip {
- continue
- }
- }
-
- if stripDirEntries && file.FileInfo().IsDir() {
- continue
- }
-
- // check for other files or directories destined for the same path
- dest := file.Name
-
- // make a new entry to add
- source := zipEntry{path: zipEntryPath{zipName: namedReader.path, entryName: file.Name}, content: file}
-
- if existingSource := addMapping(dest, source); existingSource != nil {
- // handle duplicates
- if existingSource.IsDir() != source.IsDir() {
- return fmt.Errorf("Directory/file mismatch at %v from %v and %v\n",
- dest, existingSource, source)
- }
-
- if ignoreDuplicates {
- continue
- }
-
- if emulateJar &&
- file.Name == jar.ManifestFile || file.Name == jar.ModuleInfoClass {
- // Skip manifest and module info files that are not from the first input file
- continue
- }
-
- if source.IsDir() {
- continue
- }
-
- if existingSource.CRC32() == source.CRC32() && existingSource.Size() == source.Size() {
- continue
- }
-
- return fmt.Errorf("Duplicate path %v found in %v and %v\n",
- dest, existingSource, source)
- }
- }
- }
-
- if emulateJar {
- jarSort(orderedMappings)
- } else if sortEntries {
- alphanumericSort(orderedMappings)
- }
-
- for _, entry := range orderedMappings {
- if err := entry.source.WriteToZip(entry.dest, writer); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Sets the given directory and all its ancestor directories as Python packages.
-func populateNewPyPkgs(pkgPath string, existingPyPkgSet map[string]bool, newPyPkgs *[]string) {
- for pkgPath != "" {
- if _, found := existingPyPkgSet[pkgPath]; !found {
- existingPyPkgSet[pkgPath] = true
- *newPyPkgs = append(*newPyPkgs, pkgPath)
- // Gets its ancestor directory by trimming last slash.
- pkgPath = pathBeforeLastSlash(pkgPath)
- } else {
- break
- }
- }
-}
-
-func pathBeforeLastSlash(path string) string {
- ret := filepath.Dir(path)
- // filepath.Dir("abc") -> "." and filepath.Dir("/abc") -> "/".
- if ret == "." || ret == "/" {
- return ""
- }
- return ret
-}
-
-func shouldStripEntry(emulateJar bool, stripFiles, stripDirs []string, name string) (bool, error) {
- for _, dir := range stripDirs {
- dir = filepath.Clean(dir)
- patterns := []string{
- dir + "/", // the directory itself
- dir + "/**/*", // files recursively in the directory
- dir + "/**/*/", // directories recursively in the directory
- }
-
- for _, pattern := range patterns {
- match, err := pathtools.Match(pattern, name)
- if err != nil {
- return false, fmt.Errorf("%s: %s", err.Error(), pattern)
- } else if match {
- if emulateJar {
- // When merging jar files, don't strip META-INF/MANIFEST.MF even if stripping META-INF is
- // requested.
- // TODO(ccross): which files does this affect?
- if name != jar.MetaDir && name != jar.ManifestFile {
- return true, nil
- }
- }
- return true, nil
- }
- }
- }
-
- for _, pattern := range stripFiles {
- if match, err := pathtools.Match(pattern, name); err != nil {
- return false, fmt.Errorf("%s: %s", err.Error(), pattern)
- } else if match {
- return true, nil
- }
- }
- return false, nil
-}
-
-func jarSort(files []fileMapping) {
- sort.SliceStable(files, func(i, j int) bool {
- return jar.EntryNamesLess(files[i].dest, files[j].dest)
- })
-}
-
-func alphanumericSort(files []fileMapping) {
- sort.SliceStable(files, func(i, j int) bool {
- return files[i].dest < files[j].dest
- })
-}
diff --git a/cmd/merge_zips/merge_zips_test.go b/cmd/merge_zips/merge_zips_test.go
index dbde270..cb58436 100644
--- a/cmd/merge_zips/merge_zips_test.go
+++ b/cmd/merge_zips/merge_zips_test.go
@@ -51,6 +51,39 @@
moduleInfoFile = testZipEntry{jar.ModuleInfoClass, 0755, []byte("module-info")}
)
+type testInputZip struct {
+ name string
+ entries []testZipEntry
+ reader *zip.Reader
+}
+
+func (tiz *testInputZip) Name() string {
+ return tiz.name
+}
+
+func (tiz *testInputZip) Open() error {
+ if tiz.reader == nil {
+ tiz.reader = testZipEntriesToZipReader(tiz.entries)
+ }
+ return nil
+}
+
+func (tiz *testInputZip) Close() error {
+ tiz.reader = nil
+ return nil
+}
+
+func (tiz *testInputZip) Entries() []*zip.File {
+ if tiz.reader == nil {
+ panic(fmt.Errorf("%s: should be open to get entries", tiz.Name()))
+ }
+ return tiz.reader.File
+}
+
+func (tiz *testInputZip) IsOpen() bool {
+ return tiz.reader != nil
+}
+
func TestMergeZips(t *testing.T) {
testCases := []struct {
name string
@@ -207,13 +240,9 @@
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
- var readers []namedZipReader
+ inputZips := make([]InputZip, len(test.in))
for i, in := range test.in {
- r := testZipEntriesToZipReader(in)
- readers = append(readers, namedZipReader{
- path: "in" + strconv.Itoa(i),
- reader: r,
- })
+ inputZips[i] = &testInputZip{name: "in" + strconv.Itoa(i), entries: in}
}
want := testZipEntriesToBuf(test.out)
@@ -221,7 +250,7 @@
out := &bytes.Buffer{}
writer := zip.NewWriter(out)
- err := mergeZips(readers, writer, "", "",
+ err := mergeZips(inputZips, writer, "", "",
test.sort, test.jar, false, test.stripDirEntries, test.ignoreDuplicates,
test.stripFiles, test.stripDirs, test.zipsToNotStrip)
@@ -304,3 +333,60 @@
return ret
}
+
+type DummyInpuZip struct {
+ isOpen bool
+}
+
+func (diz *DummyInpuZip) Name() string {
+ return "dummy"
+}
+
+func (diz *DummyInpuZip) Open() error {
+ diz.isOpen = true
+ return nil
+}
+
+func (diz *DummyInpuZip) Close() error {
+ diz.isOpen = false
+ return nil
+}
+
+func (DummyInpuZip) Entries() []*zip.File {
+ panic("implement me")
+}
+
+func (diz *DummyInpuZip) IsOpen() bool {
+ return diz.isOpen
+}
+
+func TestInputZipsManager(t *testing.T) {
+ const nInputZips = 20
+ const nMaxOpenZips = 10
+ izm := NewInputZipsManager(20, 10)
+ managedZips := make([]InputZip, nInputZips)
+ for i := 0; i < nInputZips; i++ {
+ managedZips[i] = izm.Manage(&DummyInpuZip{})
+ }
+
+ t.Run("InputZipsManager", func(t *testing.T) {
+ for i, iz := range managedZips {
+ if err := iz.Open(); err != nil {
+ t.Fatalf("Step %d: open failed: %s", i, err)
+ return
+ }
+ if izm.nOpenZips > nMaxOpenZips {
+ t.Errorf("Step %d: should be <=%d open zips", i, nMaxOpenZips)
+ }
+ }
+ if !managedZips[nInputZips-1].IsOpen() {
+ t.Error("The last input should stay open")
+ }
+ for _, iz := range managedZips {
+ iz.Close()
+ }
+ if izm.nOpenZips > 0 {
+ t.Error("Some input zips are still open")
+ }
+ })
+}
diff --git a/java/java.go b/java/java.go
index fea38b5..3b789f6 100644
--- a/java/java.go
+++ b/java/java.go
@@ -370,6 +370,8 @@
return append(android.Paths{j.outputFile}, j.extraOutputFiles...), nil
case ".jar":
return android.Paths{j.implementationAndResourcesJar}, nil
+ case ".proguard_map":
+ return android.Paths{j.proguardDictionary}, nil
default:
return nil, fmt.Errorf("unsupported module reference tag %q", tag)
}