1// Copyright 2015 Google Inc. All rights reserved. 2// 3// Licensed under the Apache License, Version 2.0 (the "License"); 4// you may not use this file except in compliance with the License. 5// You may obtain a copy of the License at 6// 7// http://www.apache.org/licenses/LICENSE-2.0 8// 9// Unless required by applicable law or agreed to in writing, software 10// distributed under the License is distributed on an "AS IS" BASIS, 11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12// See the License for the specific language governing permissions and 13// limitations under the License. 14 15package zip 16 17import ( 18 "bytes" 19 "compress/flate" 20 "crypto/sha256" 21 "encoding/binary" 22 "errors" 23 "fmt" 24 "hash" 25 "hash/crc32" 26 "io" 27 "io/ioutil" 28 "os" 29 "path/filepath" 30 "sort" 31 "strings" 32 "sync" 33 "syscall" 34 "time" 35 36 "android/soong/response" 37 38 "github.com/google/blueprint/pathtools" 39 40 "android/soong/jar" 41 "android/soong/third_party/zip" 42) 43 44// Sha256HeaderID is a custom Header ID for the `extra` field in 45// the file header to store the SHA checksum. 46const Sha256HeaderID = 0x4967 47 48// Sha256HeaderSignature is the signature to verify that the extra 49// data block is used to store the SHA checksum. 50const Sha256HeaderSignature = 0x9514 51 52// Block size used during parallel compression of a single file. 53const parallelBlockSize = 1 * 1024 * 1024 // 1MB 54 55// Minimum file size to use parallel compression. It requires more 56// flate.Writer allocations, since we can't change the dictionary 57// during Reset 58const minParallelFileSize = parallelBlockSize * 6 59 60// Size of the ZIP compression window (32KB) 61const windowSize = 32 * 1024 62 63type nopCloser struct { 64 io.Writer 65} 66 67func (nopCloser) Close() error { 68 return nil 69} 70 71type byteReaderCloser struct { 72 *bytes.Reader 73 io.Closer 74} 75 76type pathMapping struct { 77 dest, src string 78 zipMethod uint16 79} 80 81type FileArg struct { 82 PathPrefixInZip, SourcePrefixToStrip string 83 ExplicitPathInZip string 84 SourceFiles []string 85 JunkPaths bool 86 GlobDir string 87} 88 89type FileArgsBuilder struct { 90 state FileArg 91 err error 92 fs pathtools.FileSystem 93 94 fileArgs []FileArg 95} 96 97func NewFileArgsBuilder() *FileArgsBuilder { 98 return &FileArgsBuilder{ 99 fs: pathtools.OsFs, 100 } 101} 102 103func (b *FileArgsBuilder) JunkPaths(v bool) *FileArgsBuilder { 104 b.state.JunkPaths = v 105 b.state.SourcePrefixToStrip = "" 106 return b 107} 108 109func (b *FileArgsBuilder) SourcePrefixToStrip(prefixToStrip string) *FileArgsBuilder { 110 b.state.JunkPaths = false 111 b.state.SourcePrefixToStrip = prefixToStrip 112 return b 113} 114 115func (b *FileArgsBuilder) PathPrefixInZip(rootPrefix string) *FileArgsBuilder { 116 b.state.PathPrefixInZip = rootPrefix 117 return b 118} 119 120func (b *FileArgsBuilder) File(name string) *FileArgsBuilder { 121 if b.err != nil { 122 return b 123 } 124 125 arg := b.state 126 arg.SourceFiles = []string{name} 127 b.fileArgs = append(b.fileArgs, arg) 128 129 if b.state.ExplicitPathInZip != "" { 130 b.state.ExplicitPathInZip = "" 131 } 132 return b 133} 134 135func (b *FileArgsBuilder) Dir(name string) *FileArgsBuilder { 136 if b.err != nil { 137 return b 138 } 139 140 arg := b.state 141 arg.GlobDir = name 142 b.fileArgs = append(b.fileArgs, arg) 143 return b 144} 145 146// List reads the file names from the given file and adds them to the source files list. 147func (b *FileArgsBuilder) List(name string) *FileArgsBuilder { 148 if b.err != nil { 149 return b 150 } 151 152 f, err := b.fs.Open(name) 153 if err != nil { 154 b.err = err 155 return b 156 } 157 defer f.Close() 158 159 list, err := ioutil.ReadAll(f) 160 if err != nil { 161 b.err = err 162 return b 163 } 164 165 arg := b.state 166 arg.SourceFiles = strings.Fields(string(list)) 167 b.fileArgs = append(b.fileArgs, arg) 168 return b 169} 170 171// RspFile reads the file names from given .rsp file and adds them to the source files list. 172func (b *FileArgsBuilder) RspFile(name string) *FileArgsBuilder { 173 if b.err != nil { 174 return b 175 } 176 177 f, err := b.fs.Open(name) 178 if err != nil { 179 b.err = err 180 return b 181 } 182 defer f.Close() 183 184 arg := b.state 185 arg.SourceFiles, err = response.ReadRspFile(f) 186 if err != nil { 187 b.err = err 188 return b 189 } 190 for i := range arg.SourceFiles { 191 arg.SourceFiles[i] = pathtools.MatchEscape(arg.SourceFiles[i]) 192 } 193 b.fileArgs = append(b.fileArgs, arg) 194 return b 195} 196 197// ExplicitPathInZip sets the path in the zip file for the next File call. 198func (b *FileArgsBuilder) ExplicitPathInZip(s string) *FileArgsBuilder { 199 b.state.ExplicitPathInZip = s 200 return b 201} 202 203func (b *FileArgsBuilder) Error() error { 204 if b == nil { 205 return nil 206 } 207 return b.err 208} 209 210func (b *FileArgsBuilder) FileArgs() []FileArg { 211 if b == nil { 212 return nil 213 } 214 return b.fileArgs 215} 216 217type IncorrectRelativeRootError struct { 218 RelativeRoot string 219 Path string 220} 221 222func (x IncorrectRelativeRootError) Error() string { 223 return fmt.Sprintf("path %q is outside relative root %q", x.Path, x.RelativeRoot) 224} 225 226type ConflictingFileError struct { 227 Dest string 228 Prev string 229 Src string 230} 231 232func (x ConflictingFileError) Error() string { 233 return fmt.Sprintf("destination %q has two files %q and %q", x.Dest, x.Prev, x.Src) 234} 235 236type ZipWriter struct { 237 time time.Time 238 createdFiles map[string]string 239 createdDirs map[string]string 240 directories bool 241 242 errors chan error 243 writeOps chan chan *zipEntry 244 245 cpuRateLimiter *CPURateLimiter 246 memoryRateLimiter *MemoryRateLimiter 247 248 compressorPool sync.Pool 249 compLevel int 250 251 followSymlinks pathtools.ShouldFollowSymlinks 252 ignoreMissingFiles bool 253 254 stderr io.Writer 255 fs pathtools.FileSystem 256 257 sha256Checksum bool 258} 259 260type zipEntry struct { 261 fh *zip.FileHeader 262 263 // List of delayed io.Reader 264 futureReaders chan chan io.Reader 265 266 // Only used for passing into the MemoryRateLimiter to ensure we 267 // release as much memory as much as we request 268 allocatedSize int64 269} 270 271type ZipArgs struct { 272 FileArgs []FileArg 273 OutputFilePath string 274 EmulateJar bool 275 SrcJar bool 276 AddDirectoryEntriesToZip bool 277 CompressionLevel int 278 ManifestSourcePath string 279 NumParallelJobs int 280 NonDeflatedFiles map[string]bool 281 WriteIfChanged bool 282 StoreSymlinks bool 283 IgnoreMissingFiles bool 284 Sha256Checksum bool 285 DoNotWrite bool 286 Quiet bool 287 288 Stderr io.Writer 289 Filesystem pathtools.FileSystem 290} 291 292func zipTo(args ZipArgs, w io.Writer) error { 293 if args.EmulateJar { 294 args.AddDirectoryEntriesToZip = true 295 } 296 297 // Have Glob follow symlinks if they are not being stored as symlinks in the zip file. 298 followSymlinks := pathtools.ShouldFollowSymlinks(!args.StoreSymlinks) 299 300 z := &ZipWriter{ 301 time: jar.DefaultTime, 302 createdDirs: make(map[string]string), 303 createdFiles: make(map[string]string), 304 directories: args.AddDirectoryEntriesToZip, 305 compLevel: args.CompressionLevel, 306 followSymlinks: followSymlinks, 307 ignoreMissingFiles: args.IgnoreMissingFiles, 308 stderr: args.Stderr, 309 fs: args.Filesystem, 310 sha256Checksum: args.Sha256Checksum, 311 } 312 313 if z.fs == nil { 314 z.fs = pathtools.OsFs 315 } 316 317 if z.stderr == nil { 318 z.stderr = os.Stderr 319 } 320 321 pathMappings := []pathMapping{} 322 323 noCompression := args.CompressionLevel == 0 324 325 for _, fa := range args.FileArgs { 326 var srcs []string 327 for _, s := range fa.SourceFiles { 328 s = strings.TrimSpace(s) 329 if s == "" { 330 continue 331 } 332 333 result, err := z.fs.Glob(s, nil, followSymlinks) 334 if err != nil { 335 return err 336 } 337 if len(result.Matches) == 0 { 338 err := &os.PathError{ 339 Op: "lstat", 340 Path: s, 341 Err: os.ErrNotExist, 342 } 343 if args.IgnoreMissingFiles { 344 if !args.Quiet { 345 fmt.Fprintln(z.stderr, "warning:", err) 346 } 347 } else { 348 return err 349 } 350 } 351 srcs = append(srcs, result.Matches...) 352 } 353 if fa.GlobDir != "" { 354 if exists, isDir, err := z.fs.Exists(fa.GlobDir); err != nil { 355 return err 356 } else if !exists && !args.IgnoreMissingFiles { 357 err := &os.PathError{ 358 Op: "lstat", 359 Path: fa.GlobDir, 360 Err: os.ErrNotExist, 361 } 362 if args.IgnoreMissingFiles { 363 if !args.Quiet { 364 fmt.Fprintln(z.stderr, "warning:", err) 365 } 366 } else { 367 return err 368 } 369 } else if !isDir && !args.IgnoreMissingFiles { 370 err := &os.PathError{ 371 Op: "lstat", 372 Path: fa.GlobDir, 373 Err: syscall.ENOTDIR, 374 } 375 if args.IgnoreMissingFiles { 376 if !args.Quiet { 377 fmt.Fprintln(z.stderr, "warning:", err) 378 } 379 } else { 380 return err 381 } 382 } 383 result, err := z.fs.Glob(filepath.Join(fa.GlobDir, "**/*"), nil, followSymlinks) 384 if err != nil { 385 return err 386 } 387 srcs = append(srcs, result.Matches...) 388 } 389 for _, src := range srcs { 390 err := fillPathPairs(fa, src, &pathMappings, args.NonDeflatedFiles, noCompression) 391 if err != nil { 392 return err 393 } 394 } 395 } 396 397 return z.write(w, pathMappings, args.ManifestSourcePath, args.EmulateJar, args.SrcJar, args.NumParallelJobs) 398} 399 400// Zip creates an output zip archive from given sources. 401func Zip(args ZipArgs) error { 402 if args.OutputFilePath == "" { 403 return fmt.Errorf("output file path must be nonempty") 404 } 405 406 buf := &bytes.Buffer{} 407 var out io.Writer = buf 408 409 var zipErr error 410 411 if args.DoNotWrite { 412 out = io.Discard 413 } else if !args.WriteIfChanged { 414 f, err := os.Create(args.OutputFilePath) 415 if err != nil { 416 return err 417 } 418 419 defer f.Close() 420 defer func() { 421 if zipErr != nil { 422 os.Remove(args.OutputFilePath) 423 } 424 }() 425 426 out = f 427 } 428 429 zipErr = zipTo(args, out) 430 if zipErr != nil { 431 return zipErr 432 } 433 434 if args.WriteIfChanged && !args.DoNotWrite { 435 err := pathtools.WriteFileIfChanged(args.OutputFilePath, buf.Bytes(), 0666) 436 if err != nil { 437 return err 438 } 439 } 440 441 return nil 442} 443 444func fillPathPairs(fa FileArg, src string, pathMappings *[]pathMapping, 445 nonDeflatedFiles map[string]bool, noCompression bool) error { 446 447 var dest string 448 449 if fa.ExplicitPathInZip != "" { 450 dest = fa.ExplicitPathInZip 451 } else if fa.JunkPaths { 452 dest = filepath.Base(src) 453 } else { 454 var err error 455 dest, err = filepath.Rel(fa.SourcePrefixToStrip, src) 456 if err != nil { 457 return err 458 } 459 if strings.HasPrefix(dest, "../") { 460 return IncorrectRelativeRootError{ 461 Path: src, 462 RelativeRoot: fa.SourcePrefixToStrip, 463 } 464 } 465 } 466 dest = filepath.Join(fa.PathPrefixInZip, dest) 467 468 zipMethod := zip.Deflate 469 if _, found := nonDeflatedFiles[dest]; found || noCompression { 470 zipMethod = zip.Store 471 } 472 *pathMappings = append(*pathMappings, 473 pathMapping{dest: dest, src: src, zipMethod: zipMethod}) 474 475 return nil 476} 477 478func jarSort(mappings []pathMapping) { 479 sort.SliceStable(mappings, func(i int, j int) bool { 480 return jar.EntryNamesLess(mappings[i].dest, mappings[j].dest) 481 }) 482} 483 484func (z *ZipWriter) write(f io.Writer, pathMappings []pathMapping, manifest string, emulateJar, srcJar bool, 485 parallelJobs int) error { 486 487 z.errors = make(chan error) 488 defer close(z.errors) 489 490 // This channel size can be essentially unlimited -- it's used as a fifo 491 // queue decouple the CPU and IO loads. Directories don't require any 492 // compression time, but still cost some IO. Similar with small files that 493 // can be very fast to compress. Some files that are more difficult to 494 // compress won't take a corresponding longer time writing out. 495 // 496 // The optimum size here depends on your CPU and IO characteristics, and 497 // the the layout of your zip file. 1000 was chosen mostly at random as 498 // something that worked reasonably well for a test file. 499 // 500 // The RateLimit object will put the upper bounds on the number of 501 // parallel compressions and outstanding buffers. 502 z.writeOps = make(chan chan *zipEntry, 1000) 503 z.cpuRateLimiter = NewCPURateLimiter(int64(parallelJobs)) 504 z.memoryRateLimiter = NewMemoryRateLimiter(0) 505 defer func() { 506 z.cpuRateLimiter.Stop() 507 z.memoryRateLimiter.Stop() 508 }() 509 510 if manifest != "" && !emulateJar { 511 return errors.New("must specify --jar when specifying a manifest via -m") 512 } 513 514 if emulateJar { 515 // manifest may be empty, in which case addManifest will fill in a default 516 pathMappings = append(pathMappings, pathMapping{jar.ManifestFile, manifest, zip.Deflate}) 517 518 jarSort(pathMappings) 519 } 520 521 go func() { 522 var err error 523 defer close(z.writeOps) 524 525 for _, ele := range pathMappings { 526 if emulateJar && ele.dest == jar.ManifestFile { 527 err = z.addManifest(ele.dest, ele.src, ele.zipMethod) 528 } else { 529 err = z.addFile(ele.dest, ele.src, ele.zipMethod, emulateJar, srcJar) 530 } 531 if err != nil { 532 z.errors <- err 533 return 534 } 535 } 536 }() 537 538 zipw := zip.NewWriter(f) 539 540 var currentWriteOpChan chan *zipEntry 541 var currentWriter io.WriteCloser 542 var currentReaders chan chan io.Reader 543 var currentReader chan io.Reader 544 var done bool 545 546 for !done { 547 var writeOpsChan chan chan *zipEntry 548 var writeOpChan chan *zipEntry 549 var readersChan chan chan io.Reader 550 551 if currentReader != nil { 552 // Only read and process errors 553 } else if currentReaders != nil { 554 readersChan = currentReaders 555 } else if currentWriteOpChan != nil { 556 writeOpChan = currentWriteOpChan 557 } else { 558 writeOpsChan = z.writeOps 559 } 560 561 select { 562 case writeOp, ok := <-writeOpsChan: 563 if !ok { 564 done = true 565 } 566 567 currentWriteOpChan = writeOp 568 569 case op := <-writeOpChan: 570 currentWriteOpChan = nil 571 572 var err error 573 if op.fh.Method == zip.Deflate { 574 currentWriter, err = zipw.CreateCompressedHeader(op.fh) 575 } else { 576 var zw io.Writer 577 578 op.fh.CompressedSize64 = op.fh.UncompressedSize64 579 580 zw, err = zipw.CreateHeaderAndroid(op.fh) 581 currentWriter = nopCloser{zw} 582 } 583 if err != nil { 584 return err 585 } 586 587 currentReaders = op.futureReaders 588 if op.futureReaders == nil { 589 currentWriter.Close() 590 currentWriter = nil 591 } 592 z.memoryRateLimiter.Finish(op.allocatedSize) 593 594 case futureReader, ok := <-readersChan: 595 if !ok { 596 // Done with reading 597 currentWriter.Close() 598 currentWriter = nil 599 currentReaders = nil 600 } 601 602 currentReader = futureReader 603 604 case reader := <-currentReader: 605 _, err := io.Copy(currentWriter, reader) 606 if err != nil { 607 return err 608 } 609 610 currentReader = nil 611 612 case err := <-z.errors: 613 return err 614 } 615 } 616 617 // One last chance to catch an error 618 select { 619 case err := <-z.errors: 620 return err 621 default: 622 zipw.Close() 623 return nil 624 } 625} 626 627// imports (possibly with compression) <src> into the zip at sub-path <dest> 628func (z *ZipWriter) addFile(dest, src string, method uint16, emulateJar, srcJar bool) error { 629 var fileSize int64 630 var executable bool 631 632 var s os.FileInfo 633 var err error 634 if z.followSymlinks { 635 s, err = z.fs.Stat(src) 636 } else { 637 s, err = z.fs.Lstat(src) 638 } 639 640 if err != nil { 641 if os.IsNotExist(err) && z.ignoreMissingFiles { 642 fmt.Fprintln(z.stderr, "warning:", err) 643 return nil 644 } 645 return err 646 } 647 648 createParentDirs := func(dest, src string) error { 649 if err := z.writeDirectory(filepath.Dir(dest), src, emulateJar); err != nil { 650 return err 651 } 652 653 if prev, exists := z.createdDirs[dest]; exists { 654 return fmt.Errorf("destination %q is both a directory %q and a file %q", dest, prev, src) 655 } 656 657 return nil 658 } 659 660 checkDuplicateFiles := func(dest, src string) (bool, error) { 661 if prev, exists := z.createdFiles[dest]; exists { 662 if prev != src { 663 return true, ConflictingFileError{ 664 Dest: dest, 665 Prev: prev, 666 Src: src, 667 } 668 } 669 return true, nil 670 } 671 672 z.createdFiles[dest] = src 673 return false, nil 674 } 675 676 if s.IsDir() { 677 if z.directories { 678 return z.writeDirectory(dest, src, emulateJar) 679 } 680 return nil 681 } else if s.Mode()&os.ModeSymlink != 0 { 682 err = createParentDirs(dest, src) 683 if err != nil { 684 return err 685 } 686 687 duplicate, err := checkDuplicateFiles(dest, src) 688 if err != nil { 689 return err 690 } 691 if duplicate { 692 return nil 693 } 694 695 return z.writeSymlink(dest, src) 696 } else if s.Mode().IsRegular() { 697 r, err := z.fs.Open(src) 698 if err != nil { 699 return err 700 } 701 702 if srcJar && filepath.Ext(src) == ".java" { 703 // rewrite the destination using the package path if it can be determined 704 pkg, err := jar.JavaPackage(r, src) 705 if err != nil { 706 // ignore errors for now, leaving the file at in its original location in the zip 707 } else { 708 dest = filepath.Join(filepath.Join(strings.Split(pkg, ".")...), filepath.Base(src)) 709 } 710 711 _, err = r.Seek(0, io.SeekStart) 712 if err != nil { 713 return err 714 } 715 } 716 717 fileSize = s.Size() 718 executable = s.Mode()&0100 != 0 719 720 header := &zip.FileHeader{ 721 Name: dest, 722 Method: method, 723 UncompressedSize64: uint64(fileSize), 724 } 725 726 mode := os.FileMode(0644) 727 if executable { 728 mode = 0755 729 } 730 header.SetMode(mode) 731 732 err = createParentDirs(dest, src) 733 if err != nil { 734 return err 735 } 736 737 duplicate, err := checkDuplicateFiles(dest, src) 738 if err != nil { 739 return err 740 } 741 if duplicate { 742 return nil 743 } 744 745 return z.writeFileContents(header, r) 746 } else { 747 return fmt.Errorf("%s is not a file, directory, or symlink", src) 748 } 749} 750 751func (z *ZipWriter) addManifest(dest string, src string, _ uint16) error { 752 if prev, exists := z.createdDirs[dest]; exists { 753 return fmt.Errorf("destination %q is both a directory %q and a file %q", dest, prev, src) 754 } 755 if prev, exists := z.createdFiles[dest]; exists { 756 if prev != src { 757 return ConflictingFileError{ 758 Dest: dest, 759 Prev: prev, 760 Src: src, 761 } 762 } 763 return nil 764 } 765 766 if err := z.writeDirectory(filepath.Dir(dest), src, true); err != nil { 767 return err 768 } 769 770 var contents []byte 771 if src != "" { 772 f, err := z.fs.Open(src) 773 if err != nil { 774 return err 775 } 776 777 contents, err = ioutil.ReadAll(f) 778 f.Close() 779 if err != nil { 780 return err 781 } 782 } 783 784 fh, buf, err := jar.ManifestFileContents(contents) 785 if err != nil { 786 return err 787 } 788 789 reader := &byteReaderCloser{bytes.NewReader(buf), ioutil.NopCloser(nil)} 790 791 return z.writeFileContents(fh, reader) 792} 793 794func (z *ZipWriter) writeFileContents(header *zip.FileHeader, r pathtools.ReaderAtSeekerCloser) (err error) { 795 796 header.SetModTime(z.time) 797 798 compressChan := make(chan *zipEntry, 1) 799 z.writeOps <- compressChan 800 801 // Pre-fill a zipEntry, it will be sent in the compressChan once 802 // we're sure about the Method and CRC. 803 ze := &zipEntry{ 804 fh: header, 805 } 806 807 ze.allocatedSize = int64(header.UncompressedSize64) 808 z.cpuRateLimiter.Request() 809 z.memoryRateLimiter.Request(ze.allocatedSize) 810 811 fileSize := int64(header.UncompressedSize64) 812 if fileSize == 0 { 813 fileSize = int64(header.UncompressedSize) 814 } 815 816 if header.Method == zip.Deflate && fileSize >= minParallelFileSize { 817 wg := new(sync.WaitGroup) 818 819 // Allocate enough buffer to hold all readers. We'll limit 820 // this based on actual buffer sizes in RateLimit. 821 ze.futureReaders = make(chan chan io.Reader, (fileSize/parallelBlockSize)+1) 822 823 // Calculate the CRC and SHA256 in the background, since reading 824 // the entire file could take a while. 825 // 826 // We could split this up into chunks as well, but it's faster 827 // than the compression. Due to the Go Zip API, we also need to 828 // know the result before we can begin writing the compressed 829 // data out to the zipfile. 830 // 831 // We calculate SHA256 only if `-sha256` is set. 832 wg.Add(1) 833 go z.checksumFileAsync(r, ze, compressChan, wg) 834 835 for start := int64(0); start < fileSize; start += parallelBlockSize { 836 sr := io.NewSectionReader(r, start, parallelBlockSize) 837 resultChan := make(chan io.Reader, 1) 838 ze.futureReaders <- resultChan 839 840 z.cpuRateLimiter.Request() 841 842 last := !(start+parallelBlockSize < fileSize) 843 var dict []byte 844 if start >= windowSize { 845 dict, err = ioutil.ReadAll(io.NewSectionReader(r, start-windowSize, windowSize)) 846 if err != nil { 847 return err 848 } 849 } 850 851 wg.Add(1) 852 go z.compressPartialFile(sr, dict, last, resultChan, wg) 853 } 854 855 close(ze.futureReaders) 856 857 // Close the file handle after all readers are done 858 go func(wg *sync.WaitGroup, closer io.Closer) { 859 wg.Wait() 860 closer.Close() 861 }(wg, r) 862 } else { 863 go func() { 864 z.compressWholeFile(ze, r, compressChan) 865 r.Close() 866 }() 867 } 868 869 return nil 870} 871 872func (z *ZipWriter) checksumFileAsync(r io.ReadSeeker, ze *zipEntry, resultChan chan *zipEntry, wg *sync.WaitGroup) { 873 defer wg.Done() 874 defer z.cpuRateLimiter.Finish() 875 876 z.checksumFile(r, ze) 877 878 resultChan <- ze 879 close(resultChan) 880} 881 882func (z *ZipWriter) checksumFile(r io.ReadSeeker, ze *zipEntry) { 883 crc := crc32.NewIEEE() 884 writers := []io.Writer{crc} 885 886 var shaHasher hash.Hash 887 if z.sha256Checksum && !ze.fh.Mode().IsDir() { 888 shaHasher = sha256.New() 889 writers = append(writers, shaHasher) 890 } 891 892 w := io.MultiWriter(writers...) 893 894 _, err := io.Copy(w, r) 895 if err != nil { 896 z.errors <- err 897 return 898 } 899 900 ze.fh.CRC32 = crc.Sum32() 901 if shaHasher != nil { 902 z.appendSHAToExtra(ze, shaHasher.Sum(nil)) 903 } 904} 905 906func (z *ZipWriter) appendSHAToExtra(ze *zipEntry, checksum []byte) { 907 // The block of SHA256 checksum consist of: 908 // - Header ID, equals to Sha256HeaderID (2 bytes) 909 // - Data size (2 bytes) 910 // - Data block: 911 // - Signature, equals to Sha256HeaderSignature (2 bytes) 912 // - Data, SHA checksum value 913 var buf []byte 914 buf = binary.LittleEndian.AppendUint16(buf, Sha256HeaderID) 915 buf = binary.LittleEndian.AppendUint16(buf, uint16(len(checksum)+2)) 916 buf = binary.LittleEndian.AppendUint16(buf, Sha256HeaderSignature) 917 buf = append(buf, checksum...) 918 ze.fh.Extra = append(ze.fh.Extra, buf...) 919} 920 921func (z *ZipWriter) compressPartialFile(r io.Reader, dict []byte, last bool, resultChan chan io.Reader, wg *sync.WaitGroup) { 922 defer wg.Done() 923 924 result, err := z.compressBlock(r, dict, last) 925 if err != nil { 926 z.errors <- err 927 return 928 } 929 930 z.cpuRateLimiter.Finish() 931 932 resultChan <- result 933} 934 935func (z *ZipWriter) compressBlock(r io.Reader, dict []byte, last bool) (*bytes.Buffer, error) { 936 buf := new(bytes.Buffer) 937 var fw *flate.Writer 938 var err error 939 if len(dict) > 0 { 940 // There's no way to Reset a Writer with a new dictionary, so 941 // don't use the Pool 942 fw, err = flate.NewWriterDict(buf, z.compLevel, dict) 943 } else { 944 var ok bool 945 if fw, ok = z.compressorPool.Get().(*flate.Writer); ok { 946 fw.Reset(buf) 947 } else { 948 fw, err = flate.NewWriter(buf, z.compLevel) 949 } 950 defer z.compressorPool.Put(fw) 951 } 952 if err != nil { 953 return nil, err 954 } 955 956 _, err = io.Copy(fw, r) 957 if err != nil { 958 return nil, err 959 } 960 if last { 961 fw.Close() 962 } else { 963 fw.Flush() 964 } 965 966 return buf, nil 967} 968 969func (z *ZipWriter) compressWholeFile(ze *zipEntry, r io.ReadSeeker, compressChan chan *zipEntry) { 970 z.checksumFile(r, ze) 971 972 _, err := r.Seek(0, 0) 973 if err != nil { 974 z.errors <- err 975 return 976 } 977 978 readFile := func(reader io.ReadSeeker) ([]byte, error) { 979 _, err := reader.Seek(0, 0) 980 if err != nil { 981 return nil, err 982 } 983 984 buf, err := ioutil.ReadAll(reader) 985 if err != nil { 986 return nil, err 987 } 988 989 return buf, nil 990 } 991 992 ze.futureReaders = make(chan chan io.Reader, 1) 993 futureReader := make(chan io.Reader, 1) 994 ze.futureReaders <- futureReader 995 close(ze.futureReaders) 996 997 if ze.fh.Method == zip.Deflate { 998 compressed, err := z.compressBlock(r, nil, true) 999 if err != nil { 1000 z.errors <- err 1001 return 1002 } 1003 if uint64(compressed.Len()) < ze.fh.UncompressedSize64 { 1004 futureReader <- compressed 1005 } else { 1006 buf, err := readFile(r) 1007 if err != nil { 1008 z.errors <- err 1009 return 1010 } 1011 ze.fh.Method = zip.Store 1012 futureReader <- bytes.NewReader(buf) 1013 } 1014 } else { 1015 buf, err := readFile(r) 1016 if err != nil { 1017 z.errors <- err 1018 return 1019 } 1020 ze.fh.Method = zip.Store 1021 futureReader <- bytes.NewReader(buf) 1022 } 1023 1024 z.cpuRateLimiter.Finish() 1025 1026 close(futureReader) 1027 1028 compressChan <- ze 1029 close(compressChan) 1030} 1031 1032// writeDirectory annotates that dir is a directory created for the src file or directory, and adds 1033// the directory entry to the zip file if directories are enabled. 1034func (z *ZipWriter) writeDirectory(dir string, src string, emulateJar bool) error { 1035 // clean the input 1036 dir = filepath.Clean(dir) 1037 1038 // discover any uncreated directories in the path 1039 var zipDirs []string 1040 for dir != "" && dir != "." { 1041 if _, exists := z.createdDirs[dir]; exists { 1042 break 1043 } 1044 1045 if prev, exists := z.createdFiles[dir]; exists { 1046 return fmt.Errorf("destination %q is both a directory %q and a file %q", dir, src, prev) 1047 } 1048 1049 z.createdDirs[dir] = src 1050 // parent directories precede their children 1051 zipDirs = append([]string{dir}, zipDirs...) 1052 1053 dir = filepath.Dir(dir) 1054 } 1055 1056 if z.directories { 1057 // make a directory entry for each uncreated directory 1058 for _, cleanDir := range zipDirs { 1059 var dirHeader *zip.FileHeader 1060 1061 if emulateJar && cleanDir+"/" == jar.MetaDir { 1062 dirHeader = jar.MetaDirFileHeader() 1063 } else { 1064 dirHeader = &zip.FileHeader{ 1065 Name: cleanDir + "/", 1066 } 1067 dirHeader.SetMode(0755 | os.ModeDir) 1068 } 1069 1070 dirHeader.SetModTime(z.time) 1071 1072 ze := make(chan *zipEntry, 1) 1073 ze <- &zipEntry{ 1074 fh: dirHeader, 1075 } 1076 close(ze) 1077 z.writeOps <- ze 1078 } 1079 } 1080 1081 return nil 1082} 1083 1084func (z *ZipWriter) writeSymlink(rel, file string) error { 1085 fileHeader := &zip.FileHeader{ 1086 Name: rel, 1087 } 1088 fileHeader.SetModTime(z.time) 1089 fileHeader.SetMode(0777 | os.ModeSymlink) 1090 1091 dest, err := z.fs.Readlink(file) 1092 if err != nil { 1093 return err 1094 } 1095 1096 fileHeader.UncompressedSize64 = uint64(len(dest)) 1097 fileHeader.CRC32 = crc32.ChecksumIEEE([]byte(dest)) 1098 1099 ze := make(chan *zipEntry, 1) 1100 futureReaders := make(chan chan io.Reader, 1) 1101 futureReader := make(chan io.Reader, 1) 1102 futureReaders <- futureReader 1103 close(futureReaders) 1104 futureReader <- bytes.NewBufferString(dest) 1105 close(futureReader) 1106 1107 ze <- &zipEntry{ 1108 fh: fileHeader, 1109 futureReaders: futureReaders, 1110 } 1111 close(ze) 1112 z.writeOps <- ze 1113 1114 return nil 1115} 1116