mirror of
https://github.com/JonasunderscoreJones/api.jonasjones.dev.git
synced 2025-10-23 12:09:19 +02:00
Initial commit (by create-cloudflare CLI)
This commit is contained in:
commit
58a42872a0
1745 changed files with 741893 additions and 0 deletions
6
node_modules/tar-fs/.travis.yml
generated
vendored
Normal file
6
node_modules/tar-fs/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
language: node_js
|
||||
node_js:
|
||||
- 8
|
||||
- 10
|
||||
- 12
|
||||
- 14
|
21
node_modules/tar-fs/LICENSE
generated
vendored
Normal file
21
node_modules/tar-fs/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Mathias Buus
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
165
node_modules/tar-fs/README.md
generated
vendored
Normal file
165
node_modules/tar-fs/README.md
generated
vendored
Normal file
|
@ -0,0 +1,165 @@
|
|||
# tar-fs
|
||||
|
||||
filesystem bindings for [tar-stream](https://github.com/mafintosh/tar-stream).
|
||||
|
||||
```
|
||||
npm install tar-fs
|
||||
```
|
||||
|
||||
[](http://travis-ci.org/mafintosh/tar-fs)
|
||||
|
||||
## Usage
|
||||
|
||||
tar-fs allows you to pack directories into tarballs and extract tarballs into directories.
|
||||
|
||||
It doesn't gunzip for you, so if you want to extract a `.tar.gz` with this you'll need to use something like [gunzip-maybe](https://github.com/mafintosh/gunzip-maybe) in addition to this.
|
||||
|
||||
``` js
|
||||
var tar = require('tar-fs')
|
||||
var fs = require('fs')
|
||||
|
||||
// packing a directory
|
||||
tar.pack('./my-directory').pipe(fs.createWriteStream('my-tarball.tar'))
|
||||
|
||||
// extracting a directory
|
||||
fs.createReadStream('my-other-tarball.tar').pipe(tar.extract('./my-other-directory'))
|
||||
```
|
||||
|
||||
To ignore various files when packing or extracting add a ignore function to the options. `ignore`
|
||||
is also an alias for `filter`. Additionally you get `header` if you use ignore while extracting.
|
||||
That way you could also filter by metadata.
|
||||
|
||||
``` js
|
||||
var pack = tar.pack('./my-directory', {
|
||||
ignore: function(name) {
|
||||
return path.extname(name) === '.bin' // ignore .bin files when packing
|
||||
}
|
||||
})
|
||||
|
||||
var extract = tar.extract('./my-other-directory', {
|
||||
ignore: function(name) {
|
||||
return path.extname(name) === '.bin' // ignore .bin files inside the tarball when extracing
|
||||
}
|
||||
})
|
||||
|
||||
var extractFilesDirs = tar.extract('./my-other-other-directory', {
|
||||
ignore: function(_, header) {
|
||||
// pass files & directories, ignore e.g. symlinks
|
||||
return header.type !== 'file' && header.type !== 'directory'
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
You can also specify which entries to pack using the `entries` option
|
||||
|
||||
```js
|
||||
var pack = tar.pack('./my-directory', {
|
||||
entries: ['file1', 'subdir/file2'] // only the specific entries will be packed
|
||||
})
|
||||
```
|
||||
|
||||
If you want to modify the headers when packing/extracting add a map function to the options
|
||||
|
||||
``` js
|
||||
var pack = tar.pack('./my-directory', {
|
||||
map: function(header) {
|
||||
header.name = 'prefixed/'+header.name
|
||||
return header
|
||||
}
|
||||
})
|
||||
|
||||
var extract = tar.extract('./my-directory', {
|
||||
map: function(header) {
|
||||
header.name = 'another-prefix/'+header.name
|
||||
return header
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
Similarly you can use `mapStream` incase you wanna modify the input/output file streams
|
||||
|
||||
``` js
|
||||
var pack = tar.pack('./my-directory', {
|
||||
mapStream: function(fileStream, header) {
|
||||
// NOTE: the returned stream HAS to have the same length as the input stream.
|
||||
// If not make sure to update the size in the header passed in here.
|
||||
if (path.extname(header.name) === '.js') {
|
||||
return fileStream.pipe(someTransform)
|
||||
}
|
||||
return fileStream;
|
||||
}
|
||||
})
|
||||
|
||||
var extract = tar.extract('./my-directory', {
|
||||
mapStream: function(fileStream, header) {
|
||||
if (path.extname(header.name) === '.js') {
|
||||
return fileStream.pipe(someTransform)
|
||||
}
|
||||
return fileStream;
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
Set `options.fmode` and `options.dmode` to ensure that files/directories extracted have the corresponding modes
|
||||
|
||||
``` js
|
||||
var extract = tar.extract('./my-directory', {
|
||||
dmode: parseInt(555, 8), // all dirs should be readable
|
||||
fmode: parseInt(444, 8) // all files should be readable
|
||||
})
|
||||
```
|
||||
|
||||
It can be useful to use `dmode` and `fmode` if you are packing/unpacking tarballs between *nix/windows to ensure that all files/directories unpacked are readable.
|
||||
|
||||
Alternatively you can set `options.readable` and/or `options.writable` to set the dmode and fmode to readable/writable.
|
||||
|
||||
``` js
|
||||
var extract = tar.extract('./my-directory', {
|
||||
readable: true, // all dirs and files should be readable
|
||||
writable: true, // all dirs and files should be writable
|
||||
})
|
||||
```
|
||||
|
||||
Set `options.strict` to `false` if you want to ignore errors due to unsupported entry types (like device files)
|
||||
|
||||
To dereference symlinks (pack the contents of the symlink instead of the link itself) set `options.dereference` to `true`.
|
||||
|
||||
## Copy a directory
|
||||
|
||||
Copying a directory with permissions and mtime intact is as simple as
|
||||
|
||||
``` js
|
||||
tar.pack('source-directory').pipe(tar.extract('dest-directory'))
|
||||
```
|
||||
|
||||
## Interaction with [`tar-stream`](https://github.com/mafintosh/tar-stream)
|
||||
|
||||
Use `finalize: false` and the `finish` hook to
|
||||
leave the pack stream open for further entries (see
|
||||
[`tar-stream#pack`](https://github.com/mafintosh/tar-stream#packing)),
|
||||
and use `pack` to pass an existing pack stream.
|
||||
|
||||
``` js
|
||||
var mypack = tar.pack('./my-directory', {
|
||||
finalize: false,
|
||||
finish: function(sameAsMypack) {
|
||||
mypack.entry({name: 'generated-file.txt'}, "hello")
|
||||
tar.pack('./other-directory', {
|
||||
pack: sameAsMypack
|
||||
})
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
## Performance
|
||||
|
||||
Packing and extracting a 6.1 GB with 2496 directories and 2398 files yields the following results on my Macbook Air.
|
||||
[See the benchmark here](https://gist.github.com/mafintosh/8102201)
|
||||
|
||||
* tar-fs: 34.261 seconds
|
||||
* [node-tar](https://github.com/isaacs/node-tar): 366.123 seconds (or 10x slower)
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
351
node_modules/tar-fs/index.js
generated
vendored
Normal file
351
node_modules/tar-fs/index.js
generated
vendored
Normal file
|
@ -0,0 +1,351 @@
|
|||
var chownr = require('chownr')
|
||||
var tar = require('tar-stream')
|
||||
var pump = require('pump')
|
||||
var mkdirp = require('mkdirp-classic')
|
||||
var fs = require('fs')
|
||||
var path = require('path')
|
||||
var os = require('os')
|
||||
|
||||
var win32 = os.platform() === 'win32'
|
||||
|
||||
var noop = function () {}
|
||||
|
||||
var echo = function (name) {
|
||||
return name
|
||||
}
|
||||
|
||||
var normalize = !win32 ? echo : function (name) {
|
||||
return name.replace(/\\/g, '/').replace(/[:?<>|]/g, '_')
|
||||
}
|
||||
|
||||
var statAll = function (fs, stat, cwd, ignore, entries, sort) {
|
||||
var queue = entries || ['.']
|
||||
|
||||
return function loop (callback) {
|
||||
if (!queue.length) return callback()
|
||||
var next = queue.shift()
|
||||
var nextAbs = path.join(cwd, next)
|
||||
|
||||
stat.call(fs, nextAbs, function (err, stat) {
|
||||
if (err) return callback(err)
|
||||
|
||||
if (!stat.isDirectory()) return callback(null, next, stat)
|
||||
|
||||
fs.readdir(nextAbs, function (err, files) {
|
||||
if (err) return callback(err)
|
||||
|
||||
if (sort) files.sort()
|
||||
for (var i = 0; i < files.length; i++) {
|
||||
if (!ignore(path.join(cwd, next, files[i]))) queue.push(path.join(next, files[i]))
|
||||
}
|
||||
|
||||
callback(null, next, stat)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var strip = function (map, level) {
|
||||
return function (header) {
|
||||
header.name = header.name.split('/').slice(level).join('/')
|
||||
|
||||
var linkname = header.linkname
|
||||
if (linkname && (header.type === 'link' || path.isAbsolute(linkname))) {
|
||||
header.linkname = linkname.split('/').slice(level).join('/')
|
||||
}
|
||||
|
||||
return map(header)
|
||||
}
|
||||
}
|
||||
|
||||
exports.pack = function (cwd, opts) {
|
||||
if (!cwd) cwd = '.'
|
||||
if (!opts) opts = {}
|
||||
|
||||
var xfs = opts.fs || fs
|
||||
var ignore = opts.ignore || opts.filter || noop
|
||||
var map = opts.map || noop
|
||||
var mapStream = opts.mapStream || echo
|
||||
var statNext = statAll(xfs, opts.dereference ? xfs.stat : xfs.lstat, cwd, ignore, opts.entries, opts.sort)
|
||||
var strict = opts.strict !== false
|
||||
var umask = typeof opts.umask === 'number' ? ~opts.umask : ~processUmask()
|
||||
var dmode = typeof opts.dmode === 'number' ? opts.dmode : 0
|
||||
var fmode = typeof opts.fmode === 'number' ? opts.fmode : 0
|
||||
var pack = opts.pack || tar.pack()
|
||||
var finish = opts.finish || noop
|
||||
|
||||
if (opts.strip) map = strip(map, opts.strip)
|
||||
|
||||
if (opts.readable) {
|
||||
dmode |= parseInt(555, 8)
|
||||
fmode |= parseInt(444, 8)
|
||||
}
|
||||
if (opts.writable) {
|
||||
dmode |= parseInt(333, 8)
|
||||
fmode |= parseInt(222, 8)
|
||||
}
|
||||
|
||||
var onsymlink = function (filename, header) {
|
||||
xfs.readlink(path.join(cwd, filename), function (err, linkname) {
|
||||
if (err) return pack.destroy(err)
|
||||
header.linkname = normalize(linkname)
|
||||
pack.entry(header, onnextentry)
|
||||
})
|
||||
}
|
||||
|
||||
var onstat = function (err, filename, stat) {
|
||||
if (err) return pack.destroy(err)
|
||||
if (!filename) {
|
||||
if (opts.finalize !== false) pack.finalize()
|
||||
return finish(pack)
|
||||
}
|
||||
|
||||
if (stat.isSocket()) return onnextentry() // tar does not support sockets...
|
||||
|
||||
var header = {
|
||||
name: normalize(filename),
|
||||
mode: (stat.mode | (stat.isDirectory() ? dmode : fmode)) & umask,
|
||||
mtime: stat.mtime,
|
||||
size: stat.size,
|
||||
type: 'file',
|
||||
uid: stat.uid,
|
||||
gid: stat.gid
|
||||
}
|
||||
|
||||
if (stat.isDirectory()) {
|
||||
header.size = 0
|
||||
header.type = 'directory'
|
||||
header = map(header) || header
|
||||
return pack.entry(header, onnextentry)
|
||||
}
|
||||
|
||||
if (stat.isSymbolicLink()) {
|
||||
header.size = 0
|
||||
header.type = 'symlink'
|
||||
header = map(header) || header
|
||||
return onsymlink(filename, header)
|
||||
}
|
||||
|
||||
// TODO: add fifo etc...
|
||||
|
||||
header = map(header) || header
|
||||
|
||||
if (!stat.isFile()) {
|
||||
if (strict) return pack.destroy(new Error('unsupported type for ' + filename))
|
||||
return onnextentry()
|
||||
}
|
||||
|
||||
var entry = pack.entry(header, onnextentry)
|
||||
if (!entry) return
|
||||
|
||||
var rs = mapStream(xfs.createReadStream(path.join(cwd, filename), { start: 0, end: header.size > 0 ? header.size - 1 : header.size }), header)
|
||||
|
||||
rs.on('error', function (err) { // always forward errors on destroy
|
||||
entry.destroy(err)
|
||||
})
|
||||
|
||||
pump(rs, entry)
|
||||
}
|
||||
|
||||
var onnextentry = function (err) {
|
||||
if (err) return pack.destroy(err)
|
||||
statNext(onstat)
|
||||
}
|
||||
|
||||
onnextentry()
|
||||
|
||||
return pack
|
||||
}
|
||||
|
||||
var head = function (list) {
|
||||
return list.length ? list[list.length - 1] : null
|
||||
}
|
||||
|
||||
var processGetuid = function () {
|
||||
return process.getuid ? process.getuid() : -1
|
||||
}
|
||||
|
||||
var processUmask = function () {
|
||||
return process.umask ? process.umask() : 0
|
||||
}
|
||||
|
||||
exports.extract = function (cwd, opts) {
|
||||
if (!cwd) cwd = '.'
|
||||
if (!opts) opts = {}
|
||||
|
||||
var xfs = opts.fs || fs
|
||||
var ignore = opts.ignore || opts.filter || noop
|
||||
var map = opts.map || noop
|
||||
var mapStream = opts.mapStream || echo
|
||||
var own = opts.chown !== false && !win32 && processGetuid() === 0
|
||||
var extract = opts.extract || tar.extract()
|
||||
var stack = []
|
||||
var now = new Date()
|
||||
var umask = typeof opts.umask === 'number' ? ~opts.umask : ~processUmask()
|
||||
var dmode = typeof opts.dmode === 'number' ? opts.dmode : 0
|
||||
var fmode = typeof opts.fmode === 'number' ? opts.fmode : 0
|
||||
var strict = opts.strict !== false
|
||||
|
||||
if (opts.strip) map = strip(map, opts.strip)
|
||||
|
||||
if (opts.readable) {
|
||||
dmode |= parseInt(555, 8)
|
||||
fmode |= parseInt(444, 8)
|
||||
}
|
||||
if (opts.writable) {
|
||||
dmode |= parseInt(333, 8)
|
||||
fmode |= parseInt(222, 8)
|
||||
}
|
||||
|
||||
var utimesParent = function (name, cb) { // we just set the mtime on the parent dir again everytime we write an entry
|
||||
var top
|
||||
while ((top = head(stack)) && name.slice(0, top[0].length) !== top[0]) stack.pop()
|
||||
if (!top) return cb()
|
||||
xfs.utimes(top[0], now, top[1], cb)
|
||||
}
|
||||
|
||||
var utimes = function (name, header, cb) {
|
||||
if (opts.utimes === false) return cb()
|
||||
|
||||
if (header.type === 'directory') return xfs.utimes(name, now, header.mtime, cb)
|
||||
if (header.type === 'symlink') return utimesParent(name, cb) // TODO: how to set mtime on link?
|
||||
|
||||
xfs.utimes(name, now, header.mtime, function (err) {
|
||||
if (err) return cb(err)
|
||||
utimesParent(name, cb)
|
||||
})
|
||||
}
|
||||
|
||||
var chperm = function (name, header, cb) {
|
||||
var link = header.type === 'symlink'
|
||||
|
||||
/* eslint-disable node/no-deprecated-api */
|
||||
var chmod = link ? xfs.lchmod : xfs.chmod
|
||||
var chown = link ? xfs.lchown : xfs.chown
|
||||
/* eslint-enable node/no-deprecated-api */
|
||||
|
||||
if (!chmod) return cb()
|
||||
|
||||
var mode = (header.mode | (header.type === 'directory' ? dmode : fmode)) & umask
|
||||
|
||||
if (chown && own) chown.call(xfs, name, header.uid, header.gid, onchown)
|
||||
else onchown(null)
|
||||
|
||||
function onchown (err) {
|
||||
if (err) return cb(err)
|
||||
if (!chmod) return cb()
|
||||
chmod.call(xfs, name, mode, cb)
|
||||
}
|
||||
}
|
||||
|
||||
extract.on('entry', function (header, stream, next) {
|
||||
header = map(header) || header
|
||||
header.name = normalize(header.name)
|
||||
var name = path.join(cwd, path.join('/', header.name))
|
||||
|
||||
if (ignore(name, header)) {
|
||||
stream.resume()
|
||||
return next()
|
||||
}
|
||||
|
||||
var stat = function (err) {
|
||||
if (err) return next(err)
|
||||
utimes(name, header, function (err) {
|
||||
if (err) return next(err)
|
||||
if (win32) return next()
|
||||
chperm(name, header, next)
|
||||
})
|
||||
}
|
||||
|
||||
var onsymlink = function () {
|
||||
if (win32) return next() // skip symlinks on win for now before it can be tested
|
||||
xfs.unlink(name, function () {
|
||||
xfs.symlink(header.linkname, name, stat)
|
||||
})
|
||||
}
|
||||
|
||||
var onlink = function () {
|
||||
if (win32) return next() // skip links on win for now before it can be tested
|
||||
xfs.unlink(name, function () {
|
||||
var srcpath = path.join(cwd, path.join('/', header.linkname))
|
||||
|
||||
xfs.link(srcpath, name, function (err) {
|
||||
if (err && err.code === 'EPERM' && opts.hardlinkAsFilesFallback) {
|
||||
stream = xfs.createReadStream(srcpath)
|
||||
return onfile()
|
||||
}
|
||||
|
||||
stat(err)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
var onfile = function () {
|
||||
var ws = xfs.createWriteStream(name)
|
||||
var rs = mapStream(stream, header)
|
||||
|
||||
ws.on('error', function (err) { // always forward errors on destroy
|
||||
rs.destroy(err)
|
||||
})
|
||||
|
||||
pump(rs, ws, function (err) {
|
||||
if (err) return next(err)
|
||||
ws.on('close', stat)
|
||||
})
|
||||
}
|
||||
|
||||
if (header.type === 'directory') {
|
||||
stack.push([name, header.mtime])
|
||||
return mkdirfix(name, {
|
||||
fs: xfs, own: own, uid: header.uid, gid: header.gid
|
||||
}, stat)
|
||||
}
|
||||
|
||||
var dir = path.dirname(name)
|
||||
|
||||
validate(xfs, dir, path.join(cwd, '.'), function (err, valid) {
|
||||
if (err) return next(err)
|
||||
if (!valid) return next(new Error(dir + ' is not a valid path'))
|
||||
|
||||
mkdirfix(dir, {
|
||||
fs: xfs, own: own, uid: header.uid, gid: header.gid
|
||||
}, function (err) {
|
||||
if (err) return next(err)
|
||||
|
||||
switch (header.type) {
|
||||
case 'file': return onfile()
|
||||
case 'link': return onlink()
|
||||
case 'symlink': return onsymlink()
|
||||
}
|
||||
|
||||
if (strict) return next(new Error('unsupported type for ' + name + ' (' + header.type + ')'))
|
||||
|
||||
stream.resume()
|
||||
next()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
if (opts.finish) extract.on('finish', opts.finish)
|
||||
|
||||
return extract
|
||||
}
|
||||
|
||||
function validate (fs, name, root, cb) {
|
||||
if (name === root) return cb(null, true)
|
||||
fs.lstat(name, function (err, st) {
|
||||
if (err && err.code !== 'ENOENT') return cb(err)
|
||||
if (err || st.isDirectory()) return validate(fs, path.join(name, '..'), root, cb)
|
||||
cb(null, false)
|
||||
})
|
||||
}
|
||||
|
||||
function mkdirfix (name, opts, cb) {
|
||||
mkdirp(name, { fs: opts.fs }, function (err, made) {
|
||||
if (!err && made && opts.own) {
|
||||
chownr(made, opts.uid, opts.gid, cb)
|
||||
} else {
|
||||
cb(err)
|
||||
}
|
||||
})
|
||||
}
|
41
node_modules/tar-fs/package.json
generated
vendored
Normal file
41
node_modules/tar-fs/package.json
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
{
|
||||
"name": "tar-fs",
|
||||
"version": "2.1.1",
|
||||
"description": "filesystem bindings for tar-stream",
|
||||
"dependencies": {
|
||||
"chownr": "^1.1.1",
|
||||
"mkdirp-classic": "^0.5.2",
|
||||
"pump": "^3.0.0",
|
||||
"tar-stream": "^2.1.4"
|
||||
},
|
||||
"keywords": [
|
||||
"tar",
|
||||
"fs",
|
||||
"file",
|
||||
"tarball",
|
||||
"directory",
|
||||
"stream"
|
||||
],
|
||||
"devDependencies": {
|
||||
"rimraf": "^2.6.3",
|
||||
"standard": "^13.0.1",
|
||||
"tape": "^4.9.2"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "standard && tape test/index.js"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/mafintosh/tar-fs/issues"
|
||||
},
|
||||
"homepage": "https://github.com/mafintosh/tar-fs",
|
||||
"main": "index.js",
|
||||
"directories": {
|
||||
"test": "test"
|
||||
},
|
||||
"author": "Mathias Buus",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/mafintosh/tar-fs.git"
|
||||
}
|
||||
}
|
1
node_modules/tar-fs/test/fixtures/a/hello.txt
generated
vendored
Normal file
1
node_modules/tar-fs/test/fixtures/a/hello.txt
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
hello world
|
1
node_modules/tar-fs/test/fixtures/b/a/test.txt
generated
vendored
Normal file
1
node_modules/tar-fs/test/fixtures/b/a/test.txt
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
test
|
0
node_modules/tar-fs/test/fixtures/d/file1
generated
vendored
Normal file
0
node_modules/tar-fs/test/fixtures/d/file1
generated
vendored
Normal file
0
node_modules/tar-fs/test/fixtures/d/file2
generated
vendored
Normal file
0
node_modules/tar-fs/test/fixtures/d/file2
generated
vendored
Normal file
0
node_modules/tar-fs/test/fixtures/d/sub-dir/file5
generated
vendored
Normal file
0
node_modules/tar-fs/test/fixtures/d/sub-dir/file5
generated
vendored
Normal file
0
node_modules/tar-fs/test/fixtures/d/sub-files/file3
generated
vendored
Normal file
0
node_modules/tar-fs/test/fixtures/d/sub-files/file3
generated
vendored
Normal file
0
node_modules/tar-fs/test/fixtures/d/sub-files/file4
generated
vendored
Normal file
0
node_modules/tar-fs/test/fixtures/d/sub-files/file4
generated
vendored
Normal file
0
node_modules/tar-fs/test/fixtures/e/directory/.ignore
generated
vendored
Normal file
0
node_modules/tar-fs/test/fixtures/e/directory/.ignore
generated
vendored
Normal file
0
node_modules/tar-fs/test/fixtures/e/file
generated
vendored
Normal file
0
node_modules/tar-fs/test/fixtures/e/file
generated
vendored
Normal file
BIN
node_modules/tar-fs/test/fixtures/invalid.tar
generated
vendored
Normal file
BIN
node_modules/tar-fs/test/fixtures/invalid.tar
generated
vendored
Normal file
Binary file not shown.
346
node_modules/tar-fs/test/index.js
generated
vendored
Normal file
346
node_modules/tar-fs/test/index.js
generated
vendored
Normal file
|
@ -0,0 +1,346 @@
|
|||
var test = require('tape')
|
||||
var rimraf = require('rimraf')
|
||||
var tar = require('../index')
|
||||
var tarStream = require('tar-stream')
|
||||
var path = require('path')
|
||||
var fs = require('fs')
|
||||
var os = require('os')
|
||||
|
||||
var win32 = os.platform() === 'win32'
|
||||
|
||||
var mtime = function (st) {
|
||||
return Math.floor(st.mtime.getTime() / 1000)
|
||||
}
|
||||
|
||||
test('copy a -> copy/a', function (t) {
|
||||
t.plan(5)
|
||||
|
||||
var a = path.join(__dirname, 'fixtures', 'a')
|
||||
var b = path.join(__dirname, 'fixtures', 'copy', 'a')
|
||||
|
||||
rimraf.sync(b)
|
||||
tar.pack(a)
|
||||
.pipe(tar.extract(b))
|
||||
.on('finish', function () {
|
||||
var files = fs.readdirSync(b)
|
||||
t.same(files.length, 1)
|
||||
t.same(files[0], 'hello.txt')
|
||||
var fileB = path.join(b, files[0])
|
||||
var fileA = path.join(a, files[0])
|
||||
t.same(fs.readFileSync(fileB, 'utf-8'), fs.readFileSync(fileA, 'utf-8'))
|
||||
t.same(fs.statSync(fileB).mode, fs.statSync(fileA).mode)
|
||||
t.same(mtime(fs.statSync(fileB)), mtime(fs.statSync(fileA)))
|
||||
})
|
||||
})
|
||||
|
||||
test('copy b -> copy/b', function (t) {
|
||||
t.plan(8)
|
||||
|
||||
var a = path.join(__dirname, 'fixtures', 'b')
|
||||
var b = path.join(__dirname, 'fixtures', 'copy', 'b')
|
||||
|
||||
rimraf.sync(b)
|
||||
tar.pack(a)
|
||||
.pipe(tar.extract(b))
|
||||
.on('finish', function () {
|
||||
var files = fs.readdirSync(b)
|
||||
t.same(files.length, 1)
|
||||
t.same(files[0], 'a')
|
||||
var dirB = path.join(b, files[0])
|
||||
var dirA = path.join(a, files[0])
|
||||
t.same(fs.statSync(dirB).mode, fs.statSync(dirA).mode)
|
||||
t.same(mtime(fs.statSync(dirB)), mtime(fs.statSync(dirA)))
|
||||
t.ok(fs.statSync(dirB).isDirectory())
|
||||
var fileB = path.join(dirB, 'test.txt')
|
||||
var fileA = path.join(dirA, 'test.txt')
|
||||
t.same(fs.readFileSync(fileB, 'utf-8'), fs.readFileSync(fileA, 'utf-8'))
|
||||
t.same(fs.statSync(fileB).mode, fs.statSync(fileA).mode)
|
||||
t.same(mtime(fs.statSync(fileB)), mtime(fs.statSync(fileA)))
|
||||
})
|
||||
})
|
||||
|
||||
test('symlink', function (t) {
|
||||
if (win32) { // no symlink support on win32 currently. TODO: test if this can be enabled somehow
|
||||
t.plan(1)
|
||||
t.ok(true)
|
||||
return
|
||||
}
|
||||
|
||||
t.plan(5)
|
||||
|
||||
var a = path.join(__dirname, 'fixtures', 'c')
|
||||
|
||||
rimraf.sync(path.join(a, 'link'))
|
||||
fs.symlinkSync('.gitignore', path.join(a, 'link'))
|
||||
|
||||
var b = path.join(__dirname, 'fixtures', 'copy', 'c')
|
||||
|
||||
rimraf.sync(b)
|
||||
tar.pack(a)
|
||||
.pipe(tar.extract(b))
|
||||
.on('finish', function () {
|
||||
var files = fs.readdirSync(b).sort()
|
||||
t.same(files.length, 2)
|
||||
t.same(files[0], '.gitignore')
|
||||
t.same(files[1], 'link')
|
||||
|
||||
var linkA = path.join(a, 'link')
|
||||
var linkB = path.join(b, 'link')
|
||||
|
||||
t.same(mtime(fs.lstatSync(linkB)), mtime(fs.lstatSync(linkA)))
|
||||
t.same(fs.readlinkSync(linkB), fs.readlinkSync(linkA))
|
||||
})
|
||||
})
|
||||
|
||||
test('follow symlinks', function (t) {
|
||||
if (win32) { // no symlink support on win32 currently. TODO: test if this can be enabled somehow
|
||||
t.plan(1)
|
||||
t.ok(true)
|
||||
return
|
||||
}
|
||||
|
||||
t.plan(5)
|
||||
|
||||
var a = path.join(__dirname, 'fixtures', 'c')
|
||||
|
||||
rimraf.sync(path.join(a, 'link'))
|
||||
fs.symlinkSync('.gitignore', path.join(a, 'link'))
|
||||
|
||||
var b = path.join(__dirname, 'fixtures', 'copy', 'c-dereference')
|
||||
|
||||
rimraf.sync(b)
|
||||
tar.pack(a, { dereference: true })
|
||||
.pipe(tar.extract(b))
|
||||
.on('finish', function () {
|
||||
var files = fs.readdirSync(b).sort()
|
||||
t.same(files.length, 2)
|
||||
t.same(files[0], '.gitignore')
|
||||
t.same(files[1], 'link')
|
||||
|
||||
var file1 = path.join(b, '.gitignore')
|
||||
var file2 = path.join(b, 'link')
|
||||
|
||||
t.same(mtime(fs.lstatSync(file1)), mtime(fs.lstatSync(file2)))
|
||||
t.same(fs.readFileSync(file1), fs.readFileSync(file2))
|
||||
})
|
||||
})
|
||||
|
||||
test('strip', function (t) {
|
||||
t.plan(2)
|
||||
|
||||
var a = path.join(__dirname, 'fixtures', 'b')
|
||||
var b = path.join(__dirname, 'fixtures', 'copy', 'b-strip')
|
||||
|
||||
rimraf.sync(b)
|
||||
|
||||
tar.pack(a)
|
||||
.pipe(tar.extract(b, { strip: 1 }))
|
||||
.on('finish', function () {
|
||||
var files = fs.readdirSync(b).sort()
|
||||
t.same(files.length, 1)
|
||||
t.same(files[0], 'test.txt')
|
||||
})
|
||||
})
|
||||
|
||||
test('strip + map', function (t) {
|
||||
t.plan(2)
|
||||
|
||||
var a = path.join(__dirname, 'fixtures', 'b')
|
||||
var b = path.join(__dirname, 'fixtures', 'copy', 'b-strip')
|
||||
|
||||
rimraf.sync(b)
|
||||
|
||||
var uppercase = function (header) {
|
||||
header.name = header.name.toUpperCase()
|
||||
return header
|
||||
}
|
||||
|
||||
tar.pack(a)
|
||||
.pipe(tar.extract(b, { strip: 1, map: uppercase }))
|
||||
.on('finish', function () {
|
||||
var files = fs.readdirSync(b).sort()
|
||||
t.same(files.length, 1)
|
||||
t.same(files[0], 'TEST.TXT')
|
||||
})
|
||||
})
|
||||
|
||||
test('map + dir + permissions', function (t) {
|
||||
t.plan(win32 ? 1 : 2) // skip chmod test, it's not working like unix
|
||||
|
||||
var a = path.join(__dirname, 'fixtures', 'b')
|
||||
var b = path.join(__dirname, 'fixtures', 'copy', 'a-perms')
|
||||
|
||||
rimraf.sync(b)
|
||||
|
||||
var aWithMode = function (header) {
|
||||
if (header.name === 'a') {
|
||||
header.mode = parseInt(700, 8)
|
||||
}
|
||||
return header
|
||||
}
|
||||
|
||||
tar.pack(a)
|
||||
.pipe(tar.extract(b, { map: aWithMode }))
|
||||
.on('finish', function () {
|
||||
var files = fs.readdirSync(b).sort()
|
||||
var stat = fs.statSync(path.join(b, 'a'))
|
||||
t.same(files.length, 1)
|
||||
if (!win32) {
|
||||
t.same(stat.mode & parseInt(777, 8), parseInt(700, 8))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
test('specific entries', function (t) {
|
||||
t.plan(6)
|
||||
|
||||
var a = path.join(__dirname, 'fixtures', 'd')
|
||||
var b = path.join(__dirname, 'fixtures', 'copy', 'd-entries')
|
||||
|
||||
var entries = ['file1', 'sub-files/file3', 'sub-dir']
|
||||
|
||||
rimraf.sync(b)
|
||||
tar.pack(a, { entries: entries })
|
||||
.pipe(tar.extract(b))
|
||||
.on('finish', function () {
|
||||
var files = fs.readdirSync(b)
|
||||
t.same(files.length, 3)
|
||||
t.notSame(files.indexOf('file1'), -1)
|
||||
t.notSame(files.indexOf('sub-files'), -1)
|
||||
t.notSame(files.indexOf('sub-dir'), -1)
|
||||
var subFiles = fs.readdirSync(path.join(b, 'sub-files'))
|
||||
t.same(subFiles, ['file3'])
|
||||
var subDir = fs.readdirSync(path.join(b, 'sub-dir'))
|
||||
t.same(subDir, ['file5'])
|
||||
})
|
||||
})
|
||||
|
||||
test('check type while mapping header on packing', function (t) {
|
||||
t.plan(3)
|
||||
|
||||
var e = path.join(__dirname, 'fixtures', 'e')
|
||||
|
||||
var checkHeaderType = function (header) {
|
||||
if (header.name.indexOf('.') === -1) t.same(header.type, header.name)
|
||||
}
|
||||
|
||||
tar.pack(e, { map: checkHeaderType })
|
||||
})
|
||||
|
||||
test('finish callbacks', function (t) {
|
||||
t.plan(3)
|
||||
|
||||
var a = path.join(__dirname, 'fixtures', 'a')
|
||||
var b = path.join(__dirname, 'fixtures', 'copy', 'a')
|
||||
|
||||
rimraf.sync(b)
|
||||
|
||||
var packEntries = 0
|
||||
var extractEntries = 0
|
||||
|
||||
var countPackEntry = function (header) { packEntries++ }
|
||||
var countExtractEntry = function (header) { extractEntries++ }
|
||||
|
||||
var pack
|
||||
var onPackFinish = function (passedPack) {
|
||||
t.equal(packEntries, 2, 'All entries have been packed') // 2 entries - the file and base directory
|
||||
t.equal(passedPack, pack, 'The finish hook passes the pack')
|
||||
}
|
||||
|
||||
var onExtractFinish = function () { t.equal(extractEntries, 2) }
|
||||
|
||||
pack = tar.pack(a, { map: countPackEntry, finish: onPackFinish })
|
||||
|
||||
pack.pipe(tar.extract(b, { map: countExtractEntry, finish: onExtractFinish }))
|
||||
.on('finish', function () {
|
||||
t.end()
|
||||
})
|
||||
})
|
||||
|
||||
test('not finalizing the pack', function (t) {
|
||||
t.plan(2)
|
||||
|
||||
var a = path.join(__dirname, 'fixtures', 'a')
|
||||
var b = path.join(__dirname, 'fixtures', 'b')
|
||||
|
||||
var out = path.join(__dirname, 'fixtures', 'copy', 'merged-packs')
|
||||
|
||||
rimraf.sync(out)
|
||||
|
||||
var prefixer = function (prefix) {
|
||||
return function (header) {
|
||||
header.name = path.join(prefix, header.name)
|
||||
return header
|
||||
}
|
||||
}
|
||||
|
||||
tar.pack(a, {
|
||||
map: prefixer('a-files'),
|
||||
finalize: false,
|
||||
finish: packB
|
||||
})
|
||||
|
||||
function packB (pack) {
|
||||
tar.pack(b, { pack: pack, map: prefixer('b-files') })
|
||||
.pipe(tar.extract(out))
|
||||
.on('finish', assertResults)
|
||||
}
|
||||
|
||||
function assertResults () {
|
||||
var containers = fs.readdirSync(out)
|
||||
t.deepEqual(containers, ['a-files', 'b-files'])
|
||||
var aFiles = fs.readdirSync(path.join(out, 'a-files'))
|
||||
t.deepEqual(aFiles, ['hello.txt'])
|
||||
}
|
||||
})
|
||||
|
||||
test('do not extract invalid tar', function (t) {
|
||||
var a = path.join(__dirname, 'fixtures', 'invalid.tar')
|
||||
|
||||
var out = path.join(__dirname, 'fixtures', 'invalid')
|
||||
|
||||
rimraf.sync(out)
|
||||
|
||||
fs.createReadStream(a)
|
||||
.pipe(tar.extract(out))
|
||||
.on('error', function (err) {
|
||||
t.ok(/is not a valid path/i.test(err.message))
|
||||
fs.stat(path.join(out, '../bar'), function (err) {
|
||||
t.ok(err)
|
||||
t.end()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
test('no abs hardlink targets', function (t) {
|
||||
var out = path.join(__dirname, 'fixtures', 'invalid')
|
||||
var outside = path.join(__dirname, 'fixtures', 'outside')
|
||||
|
||||
rimraf.sync(out)
|
||||
|
||||
var s = tarStream.pack()
|
||||
|
||||
fs.writeFileSync(outside, 'something')
|
||||
|
||||
s.entry({
|
||||
type: 'link',
|
||||
name: 'link',
|
||||
linkname: outside
|
||||
})
|
||||
|
||||
s.entry({
|
||||
name: 'link'
|
||||
}, 'overwrite')
|
||||
|
||||
s.finalize()
|
||||
|
||||
s.pipe(tar.extract(out))
|
||||
.on('error', function (err) {
|
||||
t.ok(err, 'had error')
|
||||
fs.readFile(outside, 'utf-8', function (err, str) {
|
||||
t.error(err, 'no error')
|
||||
t.same(str, 'something')
|
||||
t.end()
|
||||
})
|
||||
})
|
||||
})
|
Loading…
Add table
Add a link
Reference in a new issue