/
filegroup.go
147 lines (135 loc) · 5.12 KB
/
filegroup.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
// Logic relating to building filegroups.
//
// Unlike most targets, filegroups are special in that (1) they are known to the
// system and have a custom implementation and (2) multiple filegroups can output
// the same file. This does lead to a potential race condition where we have to
// be sure to build each output file only once.
// Currently this is implemented by a single thread that builds them all; there
// are other schemes we could have but this is simple enough (and since we link
// them rather than copying there should not be a lot of I/O wait).
package build
import (
"encoding/base64"
"os"
"path"
"sync"
"core"
"fs"
)
func init() {
theFilegroupBuilder = &filegroupBuilder{
built: map[string]bool{},
}
}
// A filegroupBuilder is a singleton that we have that builds all filegroups.
// This works around the problem where multiple filegroups can output the same
// file, which means that if built simultaneously they can fight with one another.
type filegroupBuilder struct {
mutex sync.Mutex
built map[string]bool
}
var theFilegroupBuilder *filegroupBuilder
// Build builds a single filegroup file.
func (builder *filegroupBuilder) Build(target *core.BuildTarget, from, to string) error {
builder.mutex.Lock()
defer builder.mutex.Unlock()
if builder.built[to] {
return nil // File's already been built.
}
if fs.IsSameFile(from, to) {
// File exists already and is the same file. Nothing to do.
// TODO(peterebden): This should also have a recursive case for when it's a directory...
builder.built[to] = true
return nil
}
// Must actually build the file.
if err := os.RemoveAll(to); err != nil {
return err
} else if err := fs.EnsureDir(to); err != nil {
return err
} else if err := core.RecursiveCopyFile(from, to, target.OutMode(), true, false); err != nil {
return err
}
builder.built[to] = true
movePathHash(from, to, true) // In case we've already hashed the source, don't do it again.
return nil
}
// buildFilegroup runs the manual build steps for a filegroup rule.
// We don't force this to be done in bash to avoid errors with maximum command lengths,
// and it's actually quite fiddly to get just so there.
func buildFilegroup(tid int, state *core.BuildState, target *core.BuildTarget) error {
if err := prepareDirectory(target.OutDir(), false); err != nil {
return err
}
if err := os.RemoveAll(ruleHashFileName(target)); err != nil {
return err
}
outDir := target.OutDir()
localSources := target.AllLocalSourcePaths(state.Graph)
for i, source := range target.AllFullSourcePaths(state.Graph) {
out, _ := filegroupOutputPath(target, outDir, localSources[i], source)
if err := theFilegroupBuilder.Build(target, source, out); err != nil {
return err
}
}
if target.HasLabel("py") && !target.IsBinary {
// Pre-emptively create __init__.py files so the outputs can be loaded dynamically.
// It's a bit cheeky to do non-essential language-specific logic but this enables
// a lot of relatively normal Python workflows.
// Errors are deliberately ignored.
if pkg := state.Graph.Package(target.Label.PackageName); pkg == nil || !pkg.HasOutput("__init__.py") {
// Don't create this if someone else is going to create this in the package.
createInitPy(outDir)
}
}
return nil
}
// copyFilegroupHashes copies the hashes of the inputs of this filegroup to their outputs.
// This is a small optimisation to ensure we don't need to recalculate them unnecessarily.
func copyFilegroupHashes(state *core.BuildState, target *core.BuildTarget) {
outDir := target.OutDir()
localSources := target.AllLocalSourcePaths(state.Graph)
for i, source := range target.AllFullSourcePaths(state.Graph) {
if out, _ := filegroupOutputPath(target, outDir, localSources[i], source); out != source {
movePathHash(source, out, true)
}
}
}
// updateHashFilegroupPaths sets the output paths on a hash_filegroup rule.
// Unlike normal filegroups, hash filegroups can't calculate these themselves very readily.
func updateHashFilegroupPaths(state *core.BuildState, target *core.BuildTarget) {
outDir := target.OutDir()
localSources := target.AllLocalSourcePaths(state.Graph)
for i, source := range target.AllFullSourcePaths(state.Graph) {
_, relOut := filegroupOutputPath(target, outDir, localSources[i], source)
target.AddOutput(relOut)
}
}
// filegroupOutputPath returns the output path for a single filegroup source.
func filegroupOutputPath(target *core.BuildTarget, outDir, source, full string) (string, string) {
if !target.IsHashFilegroup {
return path.Join(outDir, source), source
}
// Hash filegroups have a hash embedded into the output name.
ext := path.Ext(source)
before := source[:len(source)-len(ext)]
hash, err := pathHash(full, false)
if err != nil {
panic(err)
}
out := before + "-" + base64.RawURLEncoding.EncodeToString(hash) + ext
return path.Join(outDir, out), out
}
func createInitPy(dir string) {
initPy := path.Join(dir, "__init__.py")
if core.PathExists(initPy) {
return
}
if f, err := os.OpenFile(initPy, os.O_RDONLY|os.O_CREATE, 0444); err == nil {
f.Close()
}
dir = path.Dir(dir)
if dir != core.GenDir && dir != "." && !core.PathExists(path.Join(dir, "__init__.py")) {
createInitPy(dir)
}
}