Merge remote-tracking branch 'ry/v0.10' into master

Conflicts:
	AUTHORS
	ChangeLog
	deps/uv/ChangeLog
	deps/uv/config-unix.mk
	deps/uv/src/unix/stream.c
	deps/uv/src/version.c
	deps/uv/uv.gyp
	src/node.cc
	src/node_buffer.cc
	src/node_crypto.cc
	src/node_version.h
	src/stream_wrap.cc
	src/stream_wrap.h
This commit is contained in:
isaacs 2013-05-17 13:04:02 -07:00
Родитель 7998843807 f59ab10a64
Коммит 61c9f78c63
32 изменённых файлов: 1395 добавлений и 908 удалений

Просмотреть файл

@ -451,4 +451,5 @@ Sam Roberts <vieuxtech@gmail.com>
Kevin Locke <kevin@kevinlocke.name>
Daniel Moore <polaris@northhorizon.net>
Robert Kowalski <rok@kowalski.gd>
Benoit Vallée <github@benoitvallee.net>
Nick Sullivan <nick@sullivanflock.com>

Просмотреть файл

@ -91,7 +91,8 @@ nicely even when it is indented.
The header line should be meaningful; it is what other people see when they
run `git shortlog` or `git log --oneline`.
Have a look at `git log` for inspiration.
Check the output of `git log --oneline files_that_you_changed` to find out
what subsystem (or subsystems) your changes touch.
### REBASE

Просмотреть файл

@ -60,6 +60,19 @@
* zlib: allow passing options to convenience methods (Kyle Robinson Young)
2013.05.14, Version 0.10.6 (Stable), 5deb1672f2b5794f8be19498a425ea4dc0b0711f
* module: Deprecate require.extensions (isaacs)
* stream: make Readable.wrap support objectMode, empty streams (Daniel Moore)
* child_process: fix handle delivery (Ben Noordhuis)
* crypto: Fix performance regression (isaacs)
* src: DRY string encoding/decoding (isaacs)
2013.04.23, Version 0.10.5 (Stable), deeaf8fab978e3cadb364e46fb32dafdebe5f095
* uv: Upgrade to 0.10.5 (isaacs)

Просмотреть файл

@ -16,8 +16,6 @@ function main(conf) {
api = 'legacy';
}
var dur = conf.dur;
var crypto = require('crypto');
var assert = require('assert');
var alice = crypto.getDiffieHellman('modp5');
@ -73,7 +71,7 @@ function streamWrite(alice, bob, message, encoding, writes) {
bob.on('end', function() {
// Gbits
var bits = written * 8;
var gbits = written / (1024 * 1024 * 1024);
var gbits = bits / (1024 * 1024 * 1024);
bench.end(gbits);
});

Просмотреть файл

@ -0,0 +1,86 @@
// throughput benchmark
// creates a single hasher, then pushes a bunch of data through it
var common = require('../common.js');
var crypto = require('crypto');
var bench = common.createBenchmark(main, {
writes: [500],
algo: [ 'sha256', 'md5' ],
type: ['asc', 'utf', 'buf'],
out: ['hex', 'binary', 'buffer'],
len: [2, 1024, 102400, 1024 * 1024],
api: ['legacy', 'stream']
});
function main(conf) {
var api = conf.api;
if (api === 'stream' && process.version.match(/^v0\.[0-8]\./)) {
console.error('Crypto streams not available until v0.10');
// use the legacy, just so that we can compare them.
api = 'legacy';
}
var crypto = require('crypto');
var assert = require('assert');
var message;
var encoding;
switch (conf.type) {
case 'asc':
message = new Array(conf.len + 1).join('a');
encoding = 'ascii';
break;
case 'utf':
message = new Array(conf.len / 2 + 1).join('ü');
encoding = 'utf8';
break;
case 'buf':
message = new Buffer(conf.len);
message.fill('b');
break;
default:
throw new Error('unknown message type: ' + conf.type);
}
var fn = api === 'stream' ? streamWrite : legacyWrite;
bench.start();
fn(conf.algo, message, encoding, conf.writes, conf.len, conf.out);
}
function legacyWrite(algo, message, encoding, writes, len, outEnc) {
var written = writes * len;
var bits = written * 8;
var gbits = bits / (1024 * 1024 * 1024);
while (writes-- > 0) {
var h = crypto.createHash(algo);
h.update(message, encoding);
var res = h.digest(outEnc);
// include buffer creation costs for older versions
if (outEnc === 'buffer' && typeof res === 'string')
res = new Buffer(res, 'binary');
}
bench.end(gbits);
}
function streamWrite(algo, message, encoding, writes, len, outEnc) {
var written = writes * len;
var bits = written * 8;
var gbits = bits / (1024 * 1024 * 1024);
while (writes-- > 0) {
var h = crypto.createHash(algo);
if (outEnc !== 'buffer')
h.setEncoding(outEnc);
h.write(message, encoding);
h.end();
h.read();
}
bench.end(gbits);
}

Просмотреть файл

@ -0,0 +1,77 @@
// throughput benchmark
// creates a single hasher, then pushes a bunch of data through it
var common = require('../common.js');
var crypto = require('crypto');
var bench = common.createBenchmark(main, {
writes: [500],
algo: [ 'sha256', 'md5' ],
type: ['asc', 'utf', 'buf'],
len: [2, 1024, 102400, 1024 * 1024],
api: ['legacy', 'stream']
});
function main(conf) {
var api = conf.api;
if (api === 'stream' && process.version.match(/^v0\.[0-8]\./)) {
console.error('Crypto streams not available until v0.10');
// use the legacy, just so that we can compare them.
api = 'legacy';
}
var crypto = require('crypto');
var assert = require('assert');
var message;
var encoding;
switch (conf.type) {
case 'asc':
message = new Array(conf.len + 1).join('a');
encoding = 'ascii';
break;
case 'utf':
message = new Array(conf.len / 2 + 1).join('ü');
encoding = 'utf8';
break;
case 'buf':
message = new Buffer(conf.len);
message.fill('b');
break;
default:
throw new Error('unknown message type: ' + conf.type);
}
var fn = api === 'stream' ? streamWrite : legacyWrite;
bench.start();
fn(conf.algo, message, encoding, conf.writes, conf.len);
}
function legacyWrite(algo, message, encoding, writes, len) {
var written = writes * len;
var bits = written * 8;
var gbits = bits / (1024 * 1024 * 1024);
var h = crypto.createHash(algo);
while (writes-- > 0)
h.update(message, encoding);
h.digest();
bench.end(gbits);
}
function streamWrite(algo, message, encoding, writes, len) {
var written = writes * len;
var bits = written * 8;
var gbits = bits / (1024 * 1024 * 1024);
var h = crypto.createHash(algo);
while (writes-- > 0)
h.write(message, encoding);
h.end();
h.read();
bench.end(gbits);
}

Просмотреть файл

@ -686,7 +686,7 @@ An example to read the last 10 bytes of a file which is 100 bytes long:
## Class: fs.ReadStream
`ReadStream` is a [Readable Stream](stream.html#stream_readable_stream).
`ReadStream` is a [Readable Stream](stream.html#stream_class_stream_readable).
### Event: 'open'
@ -710,9 +710,9 @@ some position past the beginning of the file. Modifying a file rather
than replacing it may require a `flags` mode of `r+` rather than the
default mode `w`.
## fs.WriteStream
## Class: fs.WriteStream
`WriteStream` is a [Writable Stream](stream.html#stream_writable_stream).
`WriteStream` is a [Writable Stream](stream.html#stream_class_stream_writable).
### Event: 'open'

Просмотреть файл

@ -133,9 +133,10 @@ See the [module system documentation][] for more information.
<!-- type=var -->
An object which is shared between all instances of the current module and
made accessible through `require()`.
`exports` is the same as the `module.exports` object.
A reference to the `module.exports` object which is shared between all
instances of the current module and made accessible through `require()`.
See [module system documentation][] for details on when to use `exports` and
when to use `module.exports`.
`exports` isn't actually a global but rather local to each module.
See the [module system documentation][] for more information.

Просмотреть файл

@ -30,6 +30,20 @@ The module `circle.js` has exported the functions `area()` and
`circumference()`. To export an object, add to the special `exports`
object.
Note that `exports` is a reference to `module.exports` making it suitable
for augmentation only. If you are exporting a single item such as a
constructor you will want to use `module.exports` directly instead.
function MyConstructor (opts) {
//...
}
// BROKEN: Does not modify exports
exports = MyConstructor;
// exports the constructor properly
module.exports = MyConstructor;
Variables
local to the module will be private. In this example the variable `PI` is
private to `circle.js`.
@ -73,7 +87,7 @@ Consider this situation:
When `main.js` loads `a.js`, then `a.js` in turn loads `b.js`. At that
point, `b.js` tries to load `a.js`. In order to prevent an infinite
loop an **unfinished copy** of the `a.js` exports object is returned to the
`b.js` module. `b.js` then finishes loading, and its exports object is
`b.js` module. `b.js` then finishes loading, and its `exports` object is
provided to the `a.js` module.
By the time `main.js` has loaded both modules, they're both finished.
@ -219,14 +233,14 @@ would resolve to different files.
In each module, the `module` free variable is a reference to the object
representing the current module. In particular
`module.exports` is the same as the `exports` object.
`module.exports` is accessible via the `exports` module-global.
`module` isn't actually a global but rather local to each module.
### module.exports
* {Object}
The `exports` object is created by the Module system. Sometimes this is not
The `module.exports` object is created by the Module system. Sometimes this is not
acceptable, many want their module to be an instance of some class. To do this
assign the desired export object to `module.exports`. For example suppose we
were making a module called `a.js`
@ -267,13 +281,13 @@ y.js:
### module.require(id)
* `id` {String}
* Return: {Object} `exports` from the resolved module
* Return: {Object} `module.exports` from the resolved module
The `module.require` method provides a way to load a module as if
`require()` was called from the original module.
Note that in order to do this, you must get a reference to the `module`
object. Since `require()` returns the `exports`, and the `module` is
object. Since `require()` returns the `module.exports`, and the `module` is
typically *only* available within a specific module's code, it must be
explicitly exported in order to be used.

Просмотреть файл

@ -131,13 +131,15 @@ TLS, may ignore this argument, and simply provide data whenever it
becomes available. There is no need, for example to "wait" until
`size` bytes are available before calling `stream.push(chunk)`.
### readable.push(chunk)
### readable.push(chunk, [encoding])
* `chunk` {Buffer | null | String} Chunk of data to push into the read queue
* `encoding` {String} Encoding of String chunks. Must be a valid
Buffer encoding, such as `'utf8'` or `'ascii'`
* return {Boolean} Whether or not more pushes should be performed
Note: **This function should be called by Readable implementors, NOT
by consumers of Readable subclasses.** The `_read()` function will not
by consumers of Readable streams.** The `_read()` function will not
be called again until at least one `push(chunk)` call is made. If no
data is available, then you MAY call `push('')` (an empty string) to
allow a future `_read` call, without adding any data to the queue.

Просмотреть файл

@ -0,0 +1,65 @@
date: Tue May 14 14:32:31 PDT 2013
version: 0.10.6
category: release
title: Node v0.10.6 (Stable)
slug: node-v0-10-6-stable
2013.05.14, Version 0.10.6 (Stable)
* module: Deprecate require.extensions (isaacs)
* stream: make Readable.wrap support objectMode, empty streams (Daniel Moore)
* child_process: fix handle delivery (Ben Noordhuis)
* crypto: Fix performance regression (isaacs)
* src: DRY string encoding/decoding (isaacs)
Source Code: http://nodejs.org/dist/v0.10.6/node-v0.10.6.tar.gz
Macintosh Installer (Universal): http://nodejs.org/dist/v0.10.6/node-v0.10.6.pkg
Windows Installer: http://nodejs.org/dist/v0.10.6/node-v0.10.6-x86.msi
Windows x64 Installer: http://nodejs.org/dist/v0.10.6/x64/node-v0.10.6-x64.msi
Windows x64 Files: http://nodejs.org/dist/v0.10.6/x64/
Linux 32-bit Binary: http://nodejs.org/dist/v0.10.6/node-v0.10.6-linux-x86.tar.gz
Linux 64-bit Binary: http://nodejs.org/dist/v0.10.6/node-v0.10.6-linux-x64.tar.gz
Solaris 32-bit Binary: http://nodejs.org/dist/v0.10.6/node-v0.10.6-sunos-x86.tar.gz
Solaris 64-bit Binary: http://nodejs.org/dist/v0.10.6/node-v0.10.6-sunos-x64.tar.gz
Other release files: http://nodejs.org/dist/v0.10.6/
Website: http://nodejs.org/docs/v0.10.6/
Documentation: http://nodejs.org/docs/v0.10.6/api/
Shasums:
```
24982edc3b6aafd019273fa5e8a2031353314b56 node-v0.10.6-darwin-x64.tar.gz
e208c5dc83864a7f35f9df60ee35642bc7dd689c node-v0.10.6-darwin-x86.tar.gz
ab2ad473f5aa0f1c5adb50b9ea47fd05010bca2c node-v0.10.6-linux-x64.tar.gz
29cdac417449f088e6e6fa67d57c9205d8bff6c5 node-v0.10.6-linux-x86.tar.gz
66e3a9e53af6d8f27c690a77c329a2bd108965ac node-v0.10.6-sunos-x64.tar.gz
05bda089f4f702deddb8e593653676ede5f0e10b node-v0.10.6-sunos-x86.tar.gz
6ceb80be28c63f2c57e8479755d206400b205c46 node-v0.10.6-x86.msi
cd0acf9b332c30aba6a72979d3373e342fad6b95 node-v0.10.6.pkg
fa06101af8890eeaf997bd2620d7742b71a7223c node-v0.10.6.tar.gz
a2a2befa62b3cd2da9c2e51204df017e0f0c0cae node.exe
8401647a2f8fb3486fa08d7e603822ae12cf6dee node.exp
8ab923eb23584310874a4f63d71244cca5bfc0f8 node.lib
b9042fec324b202853f7f1d8b1d26ea49d944913 node.pdb
647a2ea899113e14d8f0894aa969ffd3a5d407c6 x64/node-v0.10.6-x64.msi
021048aa29fce5ce200b22896fd5f1b053f0d40c x64/node.exe
adbdc6112b3172c259a3fa9e07b7d456a0c65beb x64/node.exp
226af9033a41c96c68ade96cecedcf1289414424 x64/node.lib
cca201bfe38713ffde4a0cad70cb3a5325097257 x64/node.pdb
```

Просмотреть файл

@ -0,0 +1,87 @@
date: Mon May 13 15:53:06 PDT 2013
version: 0.11.2
category: release
title: Node v0.11.2 (Unstable)
slug: node-v0-11-2-unstable
2013.05.13, Version 0.11.2 (Unstable)
* uv: Upgrade to 0.11.2
* V8: Upgrade to 3.19.0
* npm: Upgrade to 1.2.21
* build: Makefile should respect configure --prefix (Timothy J Fontaine)
* cluster: use round-robin load balancing (Ben Noordhuis)
* debugger, cluster: each worker has new debug port (Miroslav Bajtoš)
* debugger: `restart` with custom debug port (Miroslav Bajtoš)
* debugger: breakpoints in scripts not loaded yet (Miroslav Bajtoš)
* event: EventEmitter#setMaxListeners() returns this (Sam Roberts)
* events: add EventEmitter.defaultMaxListeners (Ben Noordhuis)
* install: Support $(PREFIX) install target directory prefix (Olof Johansson)
* os: Include netmask in os.networkInterfaces() (Ben Kelly)
* path: add path.isAbsolute(path) (Ryan Doenges)
* stream: Guarantee ordering of 'finish' event (isaacs)
* streams: introduce .cork/.uncork/._writev (Fedor Indutny)
* vm: add support for timeout argument (Andrew Paprocki)
Source Code: http://nodejs.org/dist/v0.11.2/node-v0.11.2.tar.gz
Macintosh Installer (Universal): http://nodejs.org/dist/v0.11.2/node-v0.11.2.pkg
Windows Installer: http://nodejs.org/dist/v0.11.2/node-v0.11.2-x86.msi
Windows x64 Installer: http://nodejs.org/dist/v0.11.2/x64/node-v0.11.2-x64.msi
Windows x64 Files: http://nodejs.org/dist/v0.11.2/x64/
Linux 32-bit Binary: http://nodejs.org/dist/v0.11.2/node-v0.11.2-linux-x86.tar.gz
Linux 64-bit Binary: http://nodejs.org/dist/v0.11.2/node-v0.11.2-linux-x64.tar.gz
Solaris 32-bit Binary: http://nodejs.org/dist/v0.11.2/node-v0.11.2-sunos-x86.tar.gz
Solaris 64-bit Binary: http://nodejs.org/dist/v0.11.2/node-v0.11.2-sunos-x64.tar.gz
Other release files: http://nodejs.org/dist/v0.11.2/
Website: http://nodejs.org/docs/v0.11.2/
Documentation: http://nodejs.org/docs/v0.11.2/api/
Shasums:
```
ddc85fd6ed70057c64d7c9cd64bb94f28596d163 node-v0.11.2-darwin-x64.tar.gz
9893a3a3598d2e5ed24bfee8642b72c37808dbae node-v0.11.2-darwin-x86.tar.gz
f81189c30aa268f2b43572e1795fedd50f3495c3 node-v0.11.2-linux-x64.tar.gz
6322bf3be78f907a3b5e06f38af1b33c52957612 node-v0.11.2-linux-x86.tar.gz
3becca01532e104081ca51a265f07e77b6e9e25f node-v0.11.2-sunos-x64.tar.gz
9489238384edb456d9a603e5bef1128dfafe69b1 node-v0.11.2-sunos-x86.tar.gz
76421e22cff4d4f4d1cb2ce3e3566e2c9004cdee node-v0.11.2-x86.msi
a23d607f7b433197533cd6d88c981c75463efff8 node-v0.11.2.pkg
1d1080598431062ccb4bbbf7ecbb7596fe664c67 node-v0.11.2.tar.gz
b45a04167d32887c32a2479c4567af394627c8ad node.exe
c65ce6e073e173ae5769fe4dd9ff83f2f56ce05d node.exp
31f569697cb8447492e3172e614c3c4cfff81d09 node.lib
c98f8a717ef9d660ff3d45e86e2ee396ca02e721 node.pdb
7caabd3a774c96a8126f10d2e184727bd5160526 x64/node-v0.11.2-x64.msi
3b049227e3c392fdb88de9a5da7ad1ec14c82d17 x64/node.exe
c95f9746e180c064a5225ab83cca604bf918e59a x64/node.exp
78c94386c312ded2f7cb0c84951535b67e36fecf x64/node.lib
ad774b472a3cfa03374aac2d1dac19f9599ad2f8 x64/node.pdb
```

Просмотреть файл

@ -72,6 +72,11 @@ function ReadableState(options, stream) {
// make all the buffer merging and length checks go away
this.objectMode = !!options.objectMode;
// Crypto is kind of old and crusty. Historically, its default string
// encoding is 'binary' so we have to make this configurable.
// Everything else in the universe uses 'utf8', though.
this.defaultEncoding = options.defaultEncoding || 'utf8';
// when piping, we only care about 'readable' events that happen
// after read()ing all the bytes and not getting any pushback.
this.ranOut = false;
@ -83,10 +88,12 @@ function ReadableState(options, stream) {
this.readingMore = false;
this.decoder = null;
this.encoding = null;
if (options.encoding) {
if (!StringDecoder)
StringDecoder = require('string_decoder').StringDecoder;
this.decoder = new StringDecoder(options.encoding);
this.encoding = options.encoding;
}
}
@ -106,19 +113,27 @@ function Readable(options) {
// This returns true if the highWaterMark has not been hit yet,
// similar to how Writable.write() returns true if you should
// write() some more.
Readable.prototype.push = function(chunk) {
Readable.prototype.push = function(chunk, encoding) {
var state = this._readableState;
if (typeof chunk === 'string' && !state.objectMode)
chunk = new Buffer(chunk, arguments[1]);
return readableAddChunk(this, state, chunk, false);
if (typeof chunk === 'string' && !state.objectMode) {
encoding = encoding || state.defaultEncoding;
if (encoding !== state.encoding) {
chunk = new Buffer(chunk, encoding);
encoding = '';
}
}
return readableAddChunk(this, state, chunk, encoding, false);
};
// Unshift should *always* be something directly out of read()
Readable.prototype.unshift = function(chunk) {
var state = this._readableState;
return readableAddChunk(this, state, chunk, true);
return readableAddChunk(this, state, chunk, '', true);
};
function readableAddChunk(stream, state, chunk, addToFront) {
function readableAddChunk(stream, state, chunk, encoding, addToFront) {
var er = chunkInvalid(state, chunk);
if (er) {
stream.emit('error', er);
@ -134,7 +149,7 @@ function readableAddChunk(stream, state, chunk, addToFront) {
var e = new Error('stream.unshift() after end event');
stream.emit('error', e);
} else {
if (state.decoder && !addToFront)
if (state.decoder && !addToFront && !encoding)
chunk = state.decoder.write(chunk);
// update the buffer info.
@ -179,6 +194,7 @@ Readable.prototype.setEncoding = function(enc) {
if (!StringDecoder)
StringDecoder = require('string_decoder').StringDecoder;
this._readableState.decoder = new StringDecoder(enc);
this._readableState.encoding = enc;
};
// Don't raise the hwm > 128MB

Просмотреть файл

@ -135,9 +135,9 @@ function Transform(options) {
});
}
Transform.prototype.push = function(chunk) {
Transform.prototype.push = function(chunk, encoding) {
this._transformState.needTransform = false;
return Duplex.prototype.push.call(this, chunk);
return Duplex.prototype.push.call(this, chunk, encoding);
};
// This is the part where you do stuff!

Просмотреть файл

@ -68,6 +68,11 @@ function WritableState(options, stream) {
var noDecode = options.decodeStrings === false;
this.decodeStrings = !noDecode;
// Crypto is kind of old and crusty. Historically, its default string
// encoding is 'binary' so we have to make this configurable.
// Everything else in the universe uses 'utf8', though.
this.defaultEncoding = options.defaultEncoding || 'utf8';
// not an actual buffer we keep track of, but a measurement
// of how much we're waiting to get pushed to some underlying
// socket or file.
@ -171,8 +176,11 @@ Writable.prototype.write = function(chunk, encoding, cb) {
cb = encoding;
encoding = null;
}
if (!encoding)
encoding = 'utf8';
if (Buffer.isBuffer(chunk))
encoding = 'buffer';
else if (!encoding)
encoding = state.defaultEncoding;
if (typeof cb !== 'function')
cb = function() {};

Просмотреть файл

@ -163,6 +163,8 @@ util.inherits(LazyTransform, stream.Transform);
Object.defineProperty(LazyTransform.prototype, prop, {
get: function() {
stream.Transform.call(this, this._options);
this._writableState.decodeStrings = false;
this._writableState.defaultEncoding = 'binary';
return this[prop];
},
set: function(val) {
@ -195,24 +197,23 @@ Hash.prototype._transform = function(chunk, encoding, callback) {
};
Hash.prototype._flush = function(callback) {
this.push(this._binding.digest());
var encoding = this._readableState.encoding || 'buffer';
this.push(this._binding.digest(encoding), encoding);
callback();
};
Hash.prototype.update = function(data, encoding) {
encoding = encoding || exports.DEFAULT_ENCODING;
data = toBuf(data, encoding);
this._binding.update(data);
if (encoding === 'buffer' && typeof data === 'string')
encoding = 'binary';
this._binding.update(data, encoding);
return this;
};
Hash.prototype.digest = function(outputEncoding) {
outputEncoding = outputEncoding || exports.DEFAULT_ENCODING;
var result = this._binding.digest();
if (outputEncoding && outputEncoding !== 'buffer')
result = result.toString(outputEncoding);
return result;
return this._binding.digest(outputEncoding);
};
@ -268,9 +269,8 @@ Cipher.prototype._flush = function(callback) {
Cipher.prototype.update = function(data, inputEncoding, outputEncoding) {
inputEncoding = inputEncoding || exports.DEFAULT_ENCODING;
outputEncoding = outputEncoding || exports.DEFAULT_ENCODING;
data = toBuf(data, inputEncoding);
var ret = this._binding.update(data);
var ret = this._binding.update(data, inputEncoding);
if (outputEncoding && outputEncoding !== 'buffer') {
this._decoder = getDecoder(this._decoder, outputEncoding);

Просмотреть файл

@ -628,16 +628,10 @@ Socket.prototype._writeGeneric = function(writev, data, encoding, cb) {
var chunks = new Array(data.length << 1);
for (var i = 0; i < data.length; i++) {
var entry = data[i];
var enc = entry.encoding;
var chunk = entry.chunk;
var code = getEncodingId(enc);
// Buffer encoding, translate argument to buffer
if (code === 0 && !Buffer.isBuffer(chunk))
chunk = new Buffer(chunk, enc);
var enc = entry.encoding;
chunks[i * 2] = chunk;
chunks[i * 2 + 1] = code;
chunks[i * 2 + 1] = enc;
}
var writeReq = this._handle.writev(chunks);

Просмотреть файл

@ -222,7 +222,7 @@ exports.setTimeout = function(callback, after) {
exports.clearTimeout = function(timer) {
if (timer && (timer.ontimeout || timer._onTimeout)) {
timer.ontimeout = timer._onTimeout = null;
if (timer instanceof Timer || timer instanceof Timeout) {
if (timer instanceof Timeout) {
timer.close(); // for after === 0
} else {
exports.unenroll(timer);
@ -232,39 +232,52 @@ exports.clearTimeout = function(timer) {
exports.setInterval = function(callback, repeat) {
var timer = new Timer();
if (process.domain) timer.domain = process.domain;
repeat *= 1; // coalesce to number or NaN
if (!(repeat >= 1 && repeat <= TIMEOUT_MAX)) {
repeat = 1; // schedule on next tick, follows browser behaviour
}
var timer = new Timeout(repeat);
var args = Array.prototype.slice.call(arguments, 2);
timer.ontimeout = function() {
callback.apply(timer, args);
}
timer._onTimeout = wrapper;
timer._repeat = true;
if (process.domain) timer.domain = process.domain;
exports.active(timer);
timer.start(repeat, repeat);
return timer;
function wrapper() {
callback.apply(this, args);
// If callback called clearInterval().
if (timer._repeat === false) return;
// If timer is unref'd (or was - it's permanently removed from the list.)
if (this._handle) {
this._handle.start(repeat, 0);
} else {
timer._idleTimeout = repeat;
exports.active(timer);
}
}
};
exports.clearInterval = function(timer) {
if (timer instanceof Timer) {
timer.ontimeout = null;
timer.close();
if (timer && timer._repeat) {
timer._repeat = false;
clearTimeout(timer);
}
};
var Timeout = function(after) {
this._idleTimeout = after;
this._idlePrev = this;
this._idleNext = this;
this._idleStart = null;
this._onTimeout = null;
this._repeat = false;
};
Timeout.prototype.unref = function() {

Просмотреть файл

@ -103,6 +103,7 @@
'src/node_zlib.cc',
'src/pipe_wrap.cc',
'src/signal_wrap.cc',
'src/string_bytes.cc',
'src/stream_wrap.cc',
'src/slab_allocator.cc',
'src/tcp_wrap.cc',
@ -135,6 +136,7 @@
'src/udp_wrap.h',
'src/req_wrap.h',
'src/slab_allocator.h',
'src/string_bytes.h',
'src/stream_wrap.h',
'src/tree.h',
'src/v8_typed_array.h',

Просмотреть файл

@ -22,6 +22,7 @@
#include "node.h"
#include "req_wrap.h"
#include "handle_wrap.h"
#include "string_bytes.h"
#include "ares.h"
#include "uv.h"
@ -1138,30 +1139,9 @@ enum encoding ParseEncoding(Handle<Value> encoding_v, enum encoding _default) {
}
Local<Value> Encode(const void *buf, size_t len, enum encoding encoding) {
HandleScope scope(node_isolate);
if (encoding == BUFFER) {
return scope.Close(
Buffer::New(static_cast<const char*>(buf), len)->handle_);
}
if (!len) return scope.Close(String::Empty(node_isolate));
if (encoding == BINARY) {
const unsigned char *cbuf = static_cast<const unsigned char*>(buf);
uint16_t * twobytebuf = new uint16_t[len];
for (size_t i = 0; i < len; i++) {
// XXX is the following line platform independent?
twobytebuf[i] = cbuf[i];
}
Local<String> chunk = String::New(twobytebuf, len);
delete [] twobytebuf; // TODO use ExternalTwoByteString?
return scope.Close(chunk);
}
// utf8 or ascii encoding
Local<String> chunk = String::New((const char*)buf, len);
return scope.Close(chunk);
return StringBytes::Encode(static_cast<const char*>(buf),
len,
encoding);
}
// Returns -1 if the handle was not valid for decoding
@ -1175,17 +1155,7 @@ ssize_t DecodeBytes(v8::Handle<v8::Value> val, enum encoding encoding) {
return -1;
}
if ((encoding == BUFFER || encoding == BINARY) && Buffer::HasInstance(val)) {
return Buffer::Length(val->ToObject());
}
Local<String> str = val->ToString();
if (encoding == UTF8) return str->Utf8Length();
else if (encoding == UCS2) return str->Length() * 2;
else if (encoding == HEX) return str->Length() / 2;
return str->Length();
return StringBytes::Size(val, encoding);
}
#ifndef MIN
@ -1197,71 +1167,7 @@ ssize_t DecodeWrite(char *buf,
size_t buflen,
v8::Handle<v8::Value> val,
enum encoding encoding) {
HandleScope scope(node_isolate);
// XXX
// A lot of improvement can be made here. See:
// http://code.google.com/p/v8/issues/detail?id=270
// http://groups.google.com/group/v8-dev/browse_thread/thread/dba28a81d9215291/ece2b50a3b4022c
// http://groups.google.com/group/v8-users/browse_thread/thread/1f83b0ba1f0a611
if (val->IsArray()) {
fprintf(stderr, "'raw' encoding (array of integers) has been removed. "
"Use 'binary'.\n");
assert(0);
return -1;
}
bool is_buffer = Buffer::HasInstance(val);
if (is_buffer && (encoding == BINARY || encoding == BUFFER)) {
// fast path, copy buffer data
const char* data = Buffer::Data(val.As<Object>());
size_t size = Buffer::Length(val.As<Object>());
size_t len = size < buflen ? size : buflen;
memcpy(buf, data, len);
return len;
}
Local<String> str;
if (is_buffer) { // slow path, convert to binary string
Local<Value> arg = String::New("binary");
str = MakeCallback(val.As<Object>(), "toString", 1, &arg)->ToString();
}
else {
str = val->ToString();
}
if (encoding == UTF8) {
str->WriteUtf8(buf, buflen, NULL, String::HINT_MANY_WRITES_EXPECTED);
return buflen;
}
if (encoding == ASCII) {
str->WriteOneByte(reinterpret_cast<uint8_t*>(buf),
0,
buflen,
String::HINT_MANY_WRITES_EXPECTED);
return buflen;
}
// THIS IS AWFUL!!! FIXME
assert(encoding == BINARY);
uint16_t * twobytebuf = new uint16_t[buflen];
str->Write(twobytebuf, 0, buflen, String::HINT_MANY_WRITES_EXPECTED);
for (size_t i = 0; i < buflen; i++) {
unsigned char *b = reinterpret_cast<unsigned char*>(&twobytebuf[i]);
buf[i] = b[0];
}
delete [] twobytebuf;
return buflen;
return StringBytes::Write(buf, buflen, val, encoding, NULL);
}
void DisplayExceptionLine (TryCatch &try_catch) {

Просмотреть файл

@ -20,9 +20,11 @@
// USE OR OTHER DEALINGS IN THE SOFTWARE.
#include "node.h"
#include "node_buffer.h"
#include "node.h"
#include "string_bytes.h"
#include "v8.h"
#include "v8-profiler.h"
@ -65,49 +67,6 @@ static Persistent<Function> fast_buffer_constructor;
Persistent<FunctionTemplate> Buffer::constructor_template;
static inline size_t base64_decoded_size(const char *src, size_t size) {
const char *const end = src + size;
const int remainder = size % 4;
size = (size / 4) * 3;
if (remainder) {
if (size == 0 && remainder == 1) {
// special case: 1-byte input cannot be decoded
size = 0;
} else {
// non-padded input, add 1 or 2 extra bytes
size += 1 + (remainder == 3);
}
}
// check for trailing padding (1 or 2 bytes)
if (size > 0) {
if (end[-1] == '=') size--;
if (end[-2] == '=') size--;
}
return size;
}
static size_t ByteLength (Handle<String> string, enum encoding enc) {
HandleScope scope(node_isolate);
if (enc == UTF8) {
return string->Utf8Length();
} else if (enc == BASE64) {
String::Utf8Value v(string);
return base64_decoded_size(*v, v.length());
} else if (enc == UCS2) {
return string->Length() * 2;
} else if (enc == HEX) {
return string->Length() / 2;
} else {
return string->Length();
}
}
Handle<Object> Buffer::New(Handle<String> string) {
HandleScope scope(node_isolate);
@ -162,7 +121,7 @@ Buffer* Buffer::New(char *data, size_t length,
}
Handle<Value> Buffer::New(const Arguments &args) {
Handle<Value> Buffer::New(const Arguments& args) {
if (!args.IsConstructCall()) {
return FromConstructorTemplate(constructor_template, args);
}
@ -234,269 +193,46 @@ void Buffer::Replace(char *data, size_t length,
}
Handle<Value> Buffer::BinarySlice(const Arguments &args) {
template <encoding encoding>
Handle<Value> Buffer::StringSlice(const Arguments& args) {
HandleScope scope(node_isolate);
Buffer *parent = ObjectWrap::Unwrap<Buffer>(args.This());
SLICE_ARGS(args[0], args[1])
char *data = parent->data_ + start;
//Local<String> string = String::New(data, end - start);
Local<Value> b = Encode(data, end - start, BINARY);
return scope.Close(b);
}
static bool contains_non_ascii_slow(const char* buf, size_t len) {
for (size_t i = 0; i < len; ++i) {
if (buf[i] & 0x80) return true;
}
return false;
}
static bool contains_non_ascii(const char* src, size_t len) {
if (len < 16) {
return contains_non_ascii_slow(src, len);
}
const unsigned bytes_per_word = BITS_PER_LONG / CHAR_BIT;
const unsigned align_mask = bytes_per_word - 1;
const unsigned unaligned = reinterpret_cast<uintptr_t>(src) & align_mask;
if (unaligned > 0) {
const unsigned n = bytes_per_word - unaligned;
if (contains_non_ascii_slow(src, n)) return true;
src += n;
len -= n;
}
#if BITS_PER_LONG == 64
typedef uint64_t word;
const uint64_t mask = 0x8080808080808080ll;
#else
typedef uint32_t word;
const uint32_t mask = 0x80808080l;
#endif
const word* srcw = reinterpret_cast<const word*>(src);
for (size_t i = 0, n = len / bytes_per_word; i < n; ++i) {
if (srcw[i] & mask) return true;
}
const unsigned remainder = len & align_mask;
if (remainder > 0) {
const size_t offset = len - remainder;
if (contains_non_ascii_slow(src + offset, remainder)) return true;
}
return false;
}
static void force_ascii_slow(const char* src, char* dst, size_t len) {
for (size_t i = 0; i < len; ++i) {
dst[i] = src[i] & 0x7f;
}
}
static void force_ascii(const char* src, char* dst, size_t len) {
if (len < 16) {
force_ascii_slow(src, dst, len);
return;
}
const unsigned bytes_per_word = BITS_PER_LONG / CHAR_BIT;
const unsigned align_mask = bytes_per_word - 1;
const unsigned src_unalign = reinterpret_cast<uintptr_t>(src) & align_mask;
const unsigned dst_unalign = reinterpret_cast<uintptr_t>(dst) & align_mask;
if (src_unalign > 0) {
if (src_unalign == dst_unalign) {
const unsigned unalign = bytes_per_word - src_unalign;
force_ascii_slow(src, dst, unalign);
src += unalign;
dst += unalign;
len -= src_unalign;
} else {
force_ascii_slow(src, dst, len);
return;
}
}
#if BITS_PER_LONG == 64
typedef uint64_t word;
const uint64_t mask = ~0x8080808080808080ll;
#else
typedef uint32_t word;
const uint32_t mask = ~0x80808080l;
#endif
const word* srcw = reinterpret_cast<const word*>(src);
word* dstw = reinterpret_cast<word*>(dst);
for (size_t i = 0, n = len / bytes_per_word; i < n; ++i) {
dstw[i] = srcw[i] & mask;
}
const unsigned remainder = len & align_mask;
if (remainder > 0) {
const size_t offset = len - remainder;
force_ascii_slow(src + offset, dst + offset, remainder);
}
}
Handle<Value> Buffer::AsciiSlice(const Arguments &args) {
HandleScope scope(node_isolate);
Buffer *parent = ObjectWrap::Unwrap<Buffer>(args.This());
SLICE_ARGS(args[0], args[1])
char* data = parent->data_ + start;
size_t len = end - start;
if (contains_non_ascii(data, len)) {
char* out = new char[len];
force_ascii(data, out, len);
Local<String> rc = String::New(out, len);
delete[] out;
return scope.Close(rc);
}
return scope.Close(String::New(data, len));
}
Handle<Value> Buffer::Utf8Slice(const Arguments &args) {
HandleScope scope(node_isolate);
Buffer *parent = ObjectWrap::Unwrap<Buffer>(args.This());
SLICE_ARGS(args[0], args[1])
char *data = parent->data_ + start;
Local<String> string = String::New(data, end - start);
return scope.Close(string);
}
Handle<Value> Buffer::Ucs2Slice(const Arguments &args) {
HandleScope scope(node_isolate);
Buffer *parent = ObjectWrap::Unwrap<Buffer>(args.This());
SLICE_ARGS(args[0], args[1])
uint16_t *data = (uint16_t*)(parent->data_ + start);
Local<String> string = String::New(data, (end - start) / 2);
return scope.Close(string);
}
Handle<Value> Buffer::HexSlice(const Arguments &args) {
HandleScope scope(node_isolate);
Buffer* parent = ObjectWrap::Unwrap<Buffer>(args.This());
SLICE_ARGS(args[0], args[1])
char* src = parent->data_ + start;
uint32_t dstlen = (end - start) * 2;
if (dstlen == 0) return scope.Close(String::Empty(node_isolate));
char* dst = new char[dstlen];
for (uint32_t i = 0, k = 0; k < dstlen; i += 1, k += 2) {
static const char hex[] = "0123456789abcdef";
uint8_t val = static_cast<uint8_t>(src[i]);
dst[k + 0] = hex[val >> 4];
dst[k + 1] = hex[val & 15];
}
Local<String> string = String::New(dst, dstlen);
delete[] dst;
return scope.Close(string);
}
// supports regular and URL-safe base64
static const int unbase64_table[] =
{-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-2,-1,-1,-2,-1,-1
,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
,-2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,62,-1,62,-1,63
,52,53,54,55,56,57,58,59,60,61,-1,-1,-1,-1,-1,-1
,-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14
,15,16,17,18,19,20,21,22,23,24,25,-1,-1,-1,-1,63
,-1,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40
,41,42,43,44,45,46,47,48,49,50,51,-1,-1,-1,-1,-1
,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
};
#define unbase64(x) unbase64_table[(uint8_t)(x)]
Handle<Value> Buffer::Base64Slice(const Arguments &args) {
HandleScope scope(node_isolate);
Buffer *parent = ObjectWrap::Unwrap<Buffer>(args.This());
SLICE_ARGS(args[0], args[1])
unsigned slen = end - start;
const char* src = parent->data_ + start;
size_t slen = (end - start);
return scope.Close(StringBytes::Encode(src, slen, encoding));
}
unsigned dlen = (slen + 2 - ((slen + 2) % 3)) / 3 * 4;
char* dst = new char[dlen];
unsigned a;
unsigned b;
unsigned c;
unsigned i;
unsigned k;
unsigned n;
Handle<Value> Buffer::BinarySlice(const Arguments& args) {
return Buffer::StringSlice<BINARY>(args);
}
static const char table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+/";
i = 0;
k = 0;
n = slen / 3 * 3;
Handle<Value> Buffer::AsciiSlice(const Arguments& args) {
return Buffer::StringSlice<ASCII>(args);
}
while (i < n) {
a = src[i + 0] & 0xff;
b = src[i + 1] & 0xff;
c = src[i + 2] & 0xff;
dst[k + 0] = table[a >> 2];
dst[k + 1] = table[((a & 3) << 4) | (b >> 4)];
dst[k + 2] = table[((b & 0x0f) << 2) | (c >> 6)];
dst[k + 3] = table[c & 0x3f];
Handle<Value> Buffer::Utf8Slice(const Arguments& args) {
return Buffer::StringSlice<UTF8>(args);
}
i += 3;
k += 4;
}
if (n != slen) {
switch (slen - n) {
case 1:
a = src[i + 0] & 0xff;
dst[k + 0] = table[a >> 2];
dst[k + 1] = table[(a & 3) << 4];
dst[k + 2] = '=';
dst[k + 3] = '=';
break;
Handle<Value> Buffer::Ucs2Slice(const Arguments& args) {
return Buffer::StringSlice<UCS2>(args);
}
case 2:
a = src[i + 0] & 0xff;
b = src[i + 1] & 0xff;
dst[k + 0] = table[a >> 2];
dst[k + 1] = table[((a & 3) << 4) | (b >> 4)];
dst[k + 2] = table[(b & 0x0f) << 2];
dst[k + 3] = '=';
break;
}
}
Local<String> string = String::New(dst, dlen);
delete [] dst;
return scope.Close(string);
Handle<Value> Buffer::HexSlice(const Arguments& args) {
return Buffer::StringSlice<HEX>(args);
}
Handle<Value> Buffer::Base64Slice(const Arguments& args) {
return Buffer::StringSlice<BASE64>(args);
}
@ -573,266 +309,74 @@ Handle<Value> Buffer::Copy(const Arguments &args) {
}
// var charsWritten = buffer.utf8Write(string, offset, [maxLength]);
Handle<Value> Buffer::Utf8Write(const Arguments &args) {
Handle<Value> Buffer::Base64Write(const Arguments& args) {
return Buffer::StringWrite<BASE64>(args);
}
Handle<Value> Buffer::BinaryWrite(const Arguments& args) {
return Buffer::StringWrite<BINARY>(args);
}
Handle<Value> Buffer::Utf8Write(const Arguments& args) {
return Buffer::StringWrite<UTF8>(args);
}
Handle<Value> Buffer::Ucs2Write(const Arguments& args) {
return Buffer::StringWrite<UCS2>(args);
}
Handle<Value> Buffer::HexWrite(const Arguments& args) {
return Buffer::StringWrite<HEX>(args);
}
Handle<Value> Buffer::AsciiWrite(const Arguments& args) {
return Buffer::StringWrite<ASCII>(args);
}
template <encoding encoding>
Handle<Value> Buffer::StringWrite(const Arguments& args) {
HandleScope scope(node_isolate);
Buffer *buffer = ObjectWrap::Unwrap<Buffer>(args.This());
Buffer* buffer = ObjectWrap::Unwrap<Buffer>(args.This());
if (!args[0]->IsString()) {
return ThrowException(Exception::TypeError(String::New(
"Argument must be a string")));
return ThrowTypeError("Argument must be a string");
}
Local<String> s = args[0]->ToString();
Local<String> str = args[0].As<String>();
size_t offset = args[1]->Uint32Value();
if (encoding == HEX && str->Length() % 2 != 0)
return ThrowTypeError("Invalid hex string");
int length = s->Length();
if (length == 0) {
return scope.Close(Integer::New(0, node_isolate));
}
if (length > 0 && offset >= buffer->length_) {
return ThrowTypeError("Offset is out of bounds");
}
size_t offset = args[1]->Int32Value();
size_t max_length = args[2]->IsUndefined() ? buffer->length_ - offset
: args[2]->Uint32Value();
max_length = MIN(buffer->length_ - offset, max_length);
char* p = buffer->data_ + offset;
int char_written;
int written = s->WriteUtf8(p,
max_length,
&char_written,
(String::HINT_MANY_WRITES_EXPECTED |
String::NO_NULL_TERMINATION));
return scope.Close(Integer::New(written, node_isolate));
}
// var charsWritten = buffer.ucs2Write(string, offset, [maxLength]);
Handle<Value> Buffer::Ucs2Write(const Arguments &args) {
HandleScope scope(node_isolate);
Buffer *buffer = ObjectWrap::Unwrap<Buffer>(args.This());
if (!args[0]->IsString()) {
return ThrowTypeError("Argument must be a string");
}
Local<String> s = args[0]->ToString();
size_t offset = args[1]->Uint32Value();
if (s->Length() > 0 && offset >= buffer->length_) {
return ThrowException(Exception::TypeError(String::New(
"Offset is out of bounds")));
}
size_t max_length = args[2]->IsUndefined() ? buffer->length_ - offset
: args[2]->Uint32Value();
max_length = MIN(buffer->length_ - offset, max_length) / 2;
uint16_t* p = (uint16_t*)(buffer->data_ + offset);
int written = s->Write(p,
0,
max_length,
(String::HINT_MANY_WRITES_EXPECTED |
String::NO_NULL_TERMINATION));
return scope.Close(Integer::New(written * 2, node_isolate));
}
inline unsigned hex2bin(char c) {
if (c >= '0' && c <= '9') return c - '0';
if (c >= 'A' && c <= 'F') return 10 + (c - 'A');
if (c >= 'a' && c <= 'f') return 10 + (c - 'a');
return static_cast<unsigned>(-1);
}
Handle<Value> Buffer::HexWrite(const Arguments& args) {
HandleScope scope(node_isolate);
Buffer* parent = ObjectWrap::Unwrap<Buffer>(args.This());
if (args[0]->IsString() == false) {
return ThrowTypeError("Argument must be a string");
}
Local<String> s = args[0].As<String>();
if (s->Length() % 2 != 0) {
return ThrowTypeError("Invalid hex string");
}
uint32_t start = args[1]->Uint32Value();
uint32_t size = args[2]->Uint32Value();
uint32_t end = start + size;
if (start >= parent->length_) {
Local<Integer> val = Integer::New(0, node_isolate);
if (max_length == 0) {
// shortcut: nothing to write anyway
Local<Integer> val = Integer::New(0);
return scope.Close(val);
}
if (end < start || end > parent->length_) { // Overflow + bounds check.
end = parent->length_;
size = parent->length_ - start;
}
if (encoding == UCS2)
max_length = max_length / 2;
if (size == 0) {
Local<Integer> val = Integer::New(0, node_isolate);
return scope.Close(val);
}
char* dst = parent->data_ + start;
String::AsciiValue string(s);
const char* src = *string;
uint32_t max = string.length() / 2;
if (max > size) {
max = size;
}
for (uint32_t i = 0; i < max; ++i) {
unsigned a = hex2bin(src[i * 2 + 0]);
unsigned b = hex2bin(src[i * 2 + 1]);
if (!~a || !~b) return ThrowTypeError("Invalid hex string");
dst[i] = a * 16 + b;
}
return scope.Close(Integer::New(max, node_isolate));
}
// var charsWritten = buffer.asciiWrite(string, offset);
Handle<Value> Buffer::AsciiWrite(const Arguments &args) {
HandleScope scope(node_isolate);
Buffer *buffer = ObjectWrap::Unwrap<Buffer>(args.This());
if (!args[0]->IsString()) {
return ThrowTypeError("Argument must be a string");
}
Local<String> s = args[0]->ToString();
size_t length = s->Length();
size_t offset = args[1]->Int32Value();
if (length > 0 && offset >= buffer->length_) {
if (offset >= buffer->length_) {
return ThrowTypeError("Offset is out of bounds");
}
size_t max_length = args[2]->IsUndefined() ? buffer->length_ - offset
: args[2]->Uint32Value();
max_length = MIN(length, MIN(buffer->length_ - offset, max_length));
char *p = buffer->data_ + offset;
int written = s->WriteOneByte(reinterpret_cast<uint8_t*>(p),
0,
max_length,
(String::HINT_MANY_WRITES_EXPECTED |
String::NO_NULL_TERMINATION));
return scope.Close(Integer::New(written, node_isolate));
}
// var bytesWritten = buffer.base64Write(string, offset, [maxLength]);
Handle<Value> Buffer::Base64Write(const Arguments &args) {
HandleScope scope(node_isolate);
Buffer *buffer = ObjectWrap::Unwrap<Buffer>(args.This());
if (!args[0]->IsString()) {
return ThrowTypeError("Argument must be a string");
}
String::AsciiValue s(args[0]);
size_t length = s.length();
size_t offset = args[1]->Int32Value();
size_t max_length = args[2]->IsUndefined() ? buffer->length_ - offset
: args[2]->Uint32Value();
max_length = MIN(length, MIN(buffer->length_ - offset, max_length));
if (max_length && offset >= buffer->length_) {
return ThrowTypeError("Offset is out of bounds");
}
char a, b, c, d;
char* start = buffer->data_ + offset;
char* dst = start;
char* const dstEnd = dst + max_length;
const char* src = *s;
const char* const srcEnd = src + s.length();
while (src < srcEnd && dst < dstEnd) {
int remaining = srcEnd - src;
while (unbase64(*src) < 0 && src < srcEnd) src++, remaining--;
if (remaining == 0 || *src == '=') break;
a = unbase64(*src++);
while (unbase64(*src) < 0 && src < srcEnd) src++, remaining--;
if (remaining <= 1 || *src == '=') break;
b = unbase64(*src++);
*dst++ = (a << 2) | ((b & 0x30) >> 4);
if (dst == dstEnd) break;
while (unbase64(*src) < 0 && src < srcEnd) src++, remaining--;
if (remaining <= 2 || *src == '=') break;
c = unbase64(*src++);
*dst++ = ((b & 0x0F) << 4) | ((c & 0x3C) >> 2);
if (dst == dstEnd) break;
while (unbase64(*src) < 0 && src < srcEnd) src++, remaining--;
if (remaining <= 3 || *src == '=') break;
d = unbase64(*src++);
*dst++ = ((c & 0x03) << 6) | (d & 0x3F);
}
return scope.Close(Integer::New(dst - start, node_isolate));
}
Handle<Value> Buffer::BinaryWrite(const Arguments &args) {
HandleScope scope(node_isolate);
Buffer *buffer = ObjectWrap::Unwrap<Buffer>(args.This());
if (!args[0]->IsString()) {
return ThrowTypeError("Argument must be a string");
}
Local<String> s = args[0]->ToString();
size_t length = s->Length();
size_t offset = args[1]->Int32Value();
if (s->Length() > 0 && offset >= buffer->length_) {
return ThrowTypeError("Offset is out of bounds");
}
char *p = (char*)buffer->data_ + offset;
size_t max_length = args[2]->IsUndefined() ? buffer->length_ - offset
: args[2]->Uint32Value();
max_length = MIN(length, MIN(buffer->length_ - offset, max_length));
int written = DecodeWrite(p, max_length, s, BINARY);
size_t written = StringBytes::Write(start,
max_length,
str,
encoding,
NULL);
return scope.Close(Integer::New(written, node_isolate));
}
static bool is_big_endian() {
const union { uint8_t u8[2]; uint16_t u16; } u = {{0, 1}};
return u.u16 == 1 ? true : false;
@ -961,7 +505,7 @@ Handle<Value> Buffer::ByteLength(const Arguments &args) {
Local<String> s = args[0]->ToString();
enum encoding e = ParseEncoding(args[1], UTF8);
return scope.Close(Integer::New(node::ByteLength(s, e), node_isolate));
return scope.Close(Integer::New(StringBytes::Size(s, e), node_isolate));
}
@ -1081,17 +625,6 @@ RetainedObjectInfo* WrapperInfo(uint16_t class_id, Handle<Value> wrapper) {
void Buffer::Initialize(Handle<Object> target) {
HandleScope scope(node_isolate);
// sanity checks
assert(unbase64('/') == 63);
assert(unbase64('+') == 62);
assert(unbase64('T') == 19);
assert(unbase64('Z') == 25);
assert(unbase64('t') == 45);
assert(unbase64('z') == 51);
assert(unbase64(' ') == -2);
assert(unbase64('\n') == -2);
assert(unbase64('\r') == -2);
length_symbol = NODE_PSYMBOL("length");
Local<FunctionTemplate> t = FunctionTemplate::New(Buffer::New);
@ -1099,15 +632,13 @@ void Buffer::Initialize(Handle<Object> target) {
constructor_template->InstanceTemplate()->SetInternalFieldCount(1);
constructor_template->SetClassName(String::NewSymbol("SlowBuffer"));
// copy free
NODE_SET_PROTOTYPE_METHOD(constructor_template, "binarySlice", Buffer::BinarySlice);
NODE_SET_PROTOTYPE_METHOD(constructor_template, "asciiSlice", Buffer::AsciiSlice);
NODE_SET_PROTOTYPE_METHOD(constructor_template, "base64Slice", Buffer::Base64Slice);
NODE_SET_PROTOTYPE_METHOD(constructor_template, "ucs2Slice", Buffer::Ucs2Slice);
NODE_SET_PROTOTYPE_METHOD(constructor_template, "hexSlice", Buffer::HexSlice);
// TODO NODE_SET_PROTOTYPE_METHOD(t, "utf16Slice", Utf16Slice);
// copy
NODE_SET_PROTOTYPE_METHOD(constructor_template, "utf8Slice", Buffer::Utf8Slice);
// TODO NODE_SET_PROTOTYPE_METHOD(t, "utf16Slice", Utf16Slice);
NODE_SET_PROTOTYPE_METHOD(constructor_template, "utf8Write", Buffer::Utf8Write);
NODE_SET_PROTOTYPE_METHOD(constructor_template, "asciiWrite", Buffer::AsciiWrite);

Просмотреть файл

@ -113,12 +113,18 @@ class NODE_EXTERN Buffer: public ObjectWrap {
private:
static v8::Handle<v8::Value> New(const v8::Arguments &args);
template <encoding encoding>
static v8::Handle<v8::Value> StringSlice(const v8::Arguments &args);
static v8::Handle<v8::Value> BinarySlice(const v8::Arguments &args);
static v8::Handle<v8::Value> AsciiSlice(const v8::Arguments &args);
static v8::Handle<v8::Value> Base64Slice(const v8::Arguments &args);
static v8::Handle<v8::Value> Utf8Slice(const v8::Arguments &args);
static v8::Handle<v8::Value> Ucs2Slice(const v8::Arguments &args);
static v8::Handle<v8::Value> HexSlice(const v8::Arguments &args);
template <encoding encoding>
static v8::Handle<v8::Value> StringWrite(const v8::Arguments &args);
static v8::Handle<v8::Value> BinaryWrite(const v8::Arguments &args);
static v8::Handle<v8::Value> Base64Write(const v8::Arguments &args);
static v8::Handle<v8::Value> AsciiWrite(const v8::Arguments &args);

Просмотреть файл

@ -26,6 +26,7 @@
#include "node.h"
#include "node_buffer.h"
#include "string_bytes.h"
#include "node_root_certs.h"
#include <string.h>
@ -42,6 +43,12 @@
# define OPENSSL_CONST
#endif
#define ASSERT_IS_STRING_OR_BUFFER(val) \
if (!Buffer::HasInstance(val) && !val->IsString()) { \
return ThrowException(Exception::TypeError(String::New( \
"Not a string or buffer"))); \
}
#define ASSERT_IS_BUFFER(val) \
if (!Buffer::HasInstance(val)) { \
return ThrowException(Exception::TypeError(String::New("Not a buffer"))); \
@ -1590,7 +1597,7 @@ Handle<Value> Connection::GetPeerCertificate(const Arguments& args) {
const char hex[] = "0123456789ABCDEF";
char fingerprint[EVP_MAX_MD_SIZE * 3];
for (i=0; i<md_size; i++) {
for (i = 0; i<md_size; i++) {
fingerprint[3*i] = hex[(md[i] & 0xf0) >> 4];
fingerprint[(3*i)+1] = hex[(md[i] & 0x0f)];
fingerprint[(3*i)+2] = ':';
@ -2073,7 +2080,6 @@ Handle<Value> CipherBase::New(const Arguments& args) {
return args.This();
}
Handle<Value> CipherBase::Init(char* cipher_type,
char* key_buf,
int key_buf_len) {
@ -2229,15 +2235,25 @@ Handle<Value> CipherBase::Update(const Arguments& args) {
CipherBase* cipher = ObjectWrap::Unwrap<CipherBase>(args.This());
ASSERT_IS_BUFFER(args[0]);
ASSERT_IS_STRING_OR_BUFFER(args[0]);
unsigned char* out = NULL;
bool r;
int out_len = 0;
char* buffer_data = Buffer::Data(args[0]);
size_t buffer_length = Buffer::Length(args[0]);
r = cipher->Update(buffer_data, buffer_length, &out, &out_len);
// Only copy the data if we have to, because it's a string
if (args[0]->IsString()) {
enum encoding encoding = ParseEncoding(args[1], BINARY);
size_t buflen = StringBytes::StorageSize(args[0], encoding);
char* buf = new char[buflen];
size_t written = StringBytes::Write(buf, buflen, args[0], encoding);
r = cipher->Update(buf, written, &out, &out_len);
delete[] buf;
} else {
char* buf = Buffer::Data(args[0]);
size_t buflen = Buffer::Length(args[0]);
r = cipher->Update(buf, buflen, &out, &out_len);
}
if (!r) {
delete[] out;
@ -2246,6 +2262,8 @@ Handle<Value> CipherBase::Update(const Arguments& args) {
Buffer* buf = Buffer::New(reinterpret_cast<char*>(out), out_len);
if (out) delete[] out;
return scope.Close(buf->handle_);
}
@ -2356,6 +2374,7 @@ Handle<Value> Hmac::HmacInit(const Arguments& args) {
return ThrowError("Must give hashtype string, key as arguments");
}
ASSERT_IS_BUFFER(args[1]);
String::Utf8Value hashType(args[0]);
@ -2386,14 +2405,22 @@ Handle<Value> Hmac::HmacUpdate(const Arguments& args) {
Hmac* hmac = ObjectWrap::Unwrap<Hmac>(args.This());
ASSERT_IS_BUFFER(args[0]);
ASSERT_IS_STRING_OR_BUFFER(args[0]);
// Only copy the data if we have to, because it's a string
bool r;
char* buffer_data = Buffer::Data(args[0]);
size_t buffer_length = Buffer::Length(args[0]);
r = hmac->HmacUpdate(buffer_data, buffer_length);
if (args[0]->IsString()) {
enum encoding encoding = ParseEncoding(args[1], BINARY);
size_t buflen = StringBytes::StorageSize(args[0], encoding);
char* buf = new char[buflen];
size_t written = StringBytes::Write(buf, buflen, args[0], encoding);
r = hmac->HmacUpdate(buf, written);
delete[] buf;
} else {
char* buf = Buffer::Data(args[0]);
size_t buflen = Buffer::Length(args[0]);
r = hmac->HmacUpdate(buf, buflen);
}
if (!r) {
return ThrowTypeError("HmacUpdate fail");
@ -2418,6 +2445,11 @@ Handle<Value> Hmac::HmacDigest(const Arguments& args) {
Hmac* hmac = ObjectWrap::Unwrap<Hmac>(args.This());
enum encoding encoding = BUFFER;
if (args.Length() >= 1) {
encoding = ParseEncoding(args[0]->ToString(), BUFFER);
}
unsigned char* md_value = NULL;
unsigned int md_len = 0;
Local<Value> outString;
@ -2428,9 +2460,11 @@ Handle<Value> Hmac::HmacDigest(const Arguments& args) {
md_len = 0;
}
Buffer* buf = Buffer::New(reinterpret_cast<char*>(md_value), md_len);
outString = StringBytes::Encode(
reinterpret_cast<const char*>(md_value), md_len, encoding);
return scope.Close(buf->handle_);
delete[] md_value;
return scope.Close(outString);
}
@ -2491,13 +2525,22 @@ Handle<Value> Hash::HashUpdate(const Arguments& args) {
Hash* hash = ObjectWrap::Unwrap<Hash>(args.This());
ASSERT_IS_BUFFER(args[0]);
ASSERT_IS_STRING_OR_BUFFER(args[0]);
// Only copy the data if we have to, because it's a string
bool r;
char* buffer_data = Buffer::Data(args[0]);
size_t buffer_length = Buffer::Length(args[0]);
r = hash->HashUpdate(buffer_data, buffer_length);
if (args[0]->IsString()) {
enum encoding encoding = ParseEncoding(args[1], BINARY);
size_t buflen = StringBytes::StorageSize(args[0], encoding);
char* buf = new char[buflen];
size_t written = StringBytes::Write(buf, buflen, args[0], encoding);
r = hash->HashUpdate(buf, written);
delete[] buf;
} else {
char* buf = Buffer::Data(args[0]);
size_t buflen = Buffer::Length(args[0]);
r = hash->HashUpdate(buf, buflen);
}
if (!r) {
return ThrowTypeError("HashUpdate fail");
@ -2516,6 +2559,11 @@ Handle<Value> Hash::HashDigest(const Arguments& args) {
return ThrowError("Not initialized");
}
enum encoding encoding = BUFFER;
if (args.Length() >= 1) {
encoding = ParseEncoding(args[0]->ToString(), BUFFER);
}
unsigned char md_value[EVP_MAX_MD_SIZE];
unsigned int md_len;
@ -2523,9 +2571,8 @@ Handle<Value> Hash::HashDigest(const Arguments& args) {
EVP_MD_CTX_cleanup(&hash->mdctx_);
hash->initialised_ = false;
Buffer* buf = Buffer::New(reinterpret_cast<char*>(md_value), md_len);
return scope.Close(buf->handle_);
return scope.Close(StringBytes::Encode(
reinterpret_cast<const char*>(md_value), md_len, encoding));
}
@ -2553,7 +2600,6 @@ Handle<Value> Sign::New(const Arguments& args) {
return args.This();
}
Handle<Value> Sign::SignInit(const char* sign_type) {
HandleScope scope(node_isolate);
@ -2603,14 +2649,22 @@ Handle<Value> Sign::SignUpdate(const Arguments& args) {
Sign* sign = ObjectWrap::Unwrap<Sign>(args.This());
ASSERT_IS_BUFFER(args[0]);
ASSERT_IS_STRING_OR_BUFFER(args[0]);
bool r;
char* buffer_data = Buffer::Data(args[0]);
size_t buffer_length = Buffer::Length(args[0]);
r = sign->SignUpdate(buffer_data, buffer_length);
// Only copy the data if we have to, because it's a string
int r;
if (args[0]->IsString()) {
enum encoding encoding = ParseEncoding(args[1], BINARY);
size_t buflen = StringBytes::StorageSize(args[0], encoding);
char* buf = new char[buflen];
size_t written = StringBytes::Write(buf, buflen, args[0], encoding);
r = sign->SignUpdate(buf, written);
delete[] buf;
} else {
char* buf = Buffer::Data(args[0]);
size_t buflen = Buffer::Length(args[0]);
r = sign->SignUpdate(buf, buflen);
}
if (!r) {
return ThrowTypeError("SignUpdate fail");
@ -2652,6 +2706,11 @@ Handle<Value> Sign::SignFinal(const Arguments& args) {
unsigned int md_len;
Local<Value> outString;
enum encoding encoding = BUFFER;
if (args.Length() >= 2) {
encoding = ParseEncoding(args[1]->ToString(), BUFFER);
}
ASSERT_IS_BUFFER(args[0]);
ssize_t len = Buffer::Length(args[0]);
char* buf = Buffer::Data(args[0]);
@ -2666,10 +2725,11 @@ Handle<Value> Sign::SignFinal(const Arguments& args) {
md_len = 0;
}
Buffer* ret = Buffer::New(reinterpret_cast<char*>(md_value), md_len);
delete[] md_value;
outString = StringBytes::Encode(
reinterpret_cast<const char*>(md_value), md_len, encoding);
return scope.Close(ret->handle_);
delete[] md_value;
return scope.Close(outString);
}
@ -2749,14 +2809,22 @@ Handle<Value> Verify::VerifyUpdate(const Arguments& args) {
Verify* verify = ObjectWrap::Unwrap<Verify>(args.This());
ASSERT_IS_BUFFER(args[0]);
ASSERT_IS_STRING_OR_BUFFER(args[0]);
// Only copy the data if we have to, because it's a string
bool r;
char* buffer_data = Buffer::Data(args[0]);
size_t buffer_length = Buffer::Length(args[0]);
r = verify->VerifyUpdate(buffer_data, buffer_length);
if (args[0]->IsString()) {
enum encoding encoding = ParseEncoding(args[1], BINARY);
size_t buflen = StringBytes::StorageSize(args[0], encoding);
char* buf = new char[buflen];
size_t written = StringBytes::Write(buf, buflen, args[0], encoding);
r = verify->VerifyUpdate(buf, written);
delete[] buf;
} else {
char* buf = Buffer::Data(args[0]);
size_t buflen = Buffer::Length(args[0]);
r = verify->VerifyUpdate(buf, buflen);
}
if (!r) {
return ThrowTypeError("VerifyUpdate fail");
@ -2848,11 +2916,31 @@ Handle<Value> Verify::VerifyFinal(const Arguments& args) {
char* kbuf = Buffer::Data(args[0]);
ssize_t klen = Buffer::Length(args[0]);
ASSERT_IS_BUFFER(args[1]);
unsigned char* hbuf = reinterpret_cast<unsigned char*>(Buffer::Data(args[1]));
ssize_t hlen = Buffer::Length(args[1]);
ASSERT_IS_STRING_OR_BUFFER(args[1]);
// BINARY works for both buffers and binary strings.
enum encoding encoding = BINARY;
if (args.Length() >= 3) {
encoding = ParseEncoding(args[2]->ToString(), BINARY);
}
return scope.Close(verify->VerifyFinal(kbuf, klen, hbuf, hlen));
ssize_t hlen = StringBytes::Size(args[1], encoding);
// only copy if we need to, because it's a string.
unsigned char* hbuf;
if (args[1]->IsString()) {
hbuf = new unsigned char[hlen];
ssize_t hwritten = StringBytes::Write(
reinterpret_cast<char*>(hbuf), hlen, args[1], encoding);
assert(hwritten == hlen);
} else {
hbuf = reinterpret_cast<unsigned char*>(Buffer::Data(args[1]));
}
Local<Value> retval = Local<Value>::New(verify->VerifyFinal(kbuf, klen, hbuf, hlen));
if (args[1]->IsString()) {
delete[] hbuf;
}
return scope.Close(retval);
}

Просмотреть файл

@ -295,92 +295,6 @@ size_t StreamWrap::WriteBuffer(Handle<Value> val, uv_buf_t* buf) {
}
template <WriteEncoding encoding>
size_t StreamWrap::WriteStringImpl(char* storage,
size_t storage_size,
Handle<Value> val,
uv_buf_t* buf) {
assert(val->IsString());
Handle<String> string = val.As<String>();
size_t data_size;
switch (encoding) {
case kAscii:
data_size = string->WriteOneByte(
reinterpret_cast<uint8_t*>(storage),
0,
-1,
String::NO_NULL_TERMINATION | String::HINT_MANY_WRITES_EXPECTED);
break;
case kUtf8:
data_size = string->WriteUtf8(
storage,
-1,
NULL,
String::NO_NULL_TERMINATION | String::HINT_MANY_WRITES_EXPECTED);
break;
case kUcs2: {
int chars_copied = string->Write(
reinterpret_cast<uint16_t*>(storage),
0,
-1,
String::NO_NULL_TERMINATION | String::HINT_MANY_WRITES_EXPECTED);
data_size = chars_copied * sizeof(uint16_t);
break;
}
default:
// Unreachable
assert(0);
}
assert(data_size <= storage_size);
buf->base = storage;
buf->len = data_size;
return data_size;
}
template <WriteEncoding encoding>
size_t StreamWrap::GetStringSizeImpl(Handle<Value> val) {
assert(val->IsString());
Handle<String> string = val.As<String>();
switch (encoding) {
case kAscii:
return string->Length();
break;
case kUtf8:
if (string->Length() < 65536) {
// A single UCS2 codepoint never takes up more than 3 utf8 bytes.
// Unless the string is really long we just allocate so much space that
// we're certain the string fits in there entirely.
// TODO: maybe check handle->write_queue_size instead of string length?
return 3 * string->Length();
} else {
// The string is really long. Compute the allocation size that we
// actually need.
return string->Utf8Length();
}
break;
case kUcs2:
return string->Length() * sizeof(uint16_t);
break;
default:
// Unreachable.
assert(0);
}
return 0;
}
Handle<Value> StreamWrap::WriteBuffer(const Arguments& args) {
HandleScope scope(node_isolate);
@ -426,7 +340,7 @@ Handle<Value> StreamWrap::WriteBuffer(const Arguments& args) {
}
template <WriteEncoding encoding>
template <enum encoding encoding>
Handle<Value> StreamWrap::WriteStringImpl(const Arguments& args) {
HandleScope scope(node_isolate);
int r;
@ -439,7 +353,13 @@ Handle<Value> StreamWrap::WriteStringImpl(const Arguments& args) {
Local<String> string = args[0]->ToString();
// Compute the size of the storage that the string will be flattened into.
size_t storage_size = GetStringSizeImpl<encoding>(string);
// For UTF8 strings that are very long, go ahead and take the hit for
// computing their actual size, rather than tripling the storage.
size_t storage_size;
if (encoding == UTF8 && string->Length() > 65535)
storage_size = StringBytes::Size(string, encoding);
else
storage_size = StringBytes::StorageSize(string, encoding);
if (storage_size > INT_MAX) {
uv_err_t err;
@ -454,9 +374,15 @@ Handle<Value> StreamWrap::WriteStringImpl(const Arguments& args) {
char* data = reinterpret_cast<char*>(ROUND_UP(
reinterpret_cast<uintptr_t>(storage) + sizeof(WriteWrap), 16));
size_t data_size;
data_size = StringBytes::Write(data, storage_size, string, encoding);
assert(data_size <= storage_size);
uv_buf_t buf;
size_t data_size =
WriteStringImpl<encoding>(data, storage_size, string, &buf);
buf.base = data;
buf.len = data_size;
bool ipc_pipe = wrap->stream_->type == UV_NAMED_PIPE &&
((uv_pipe_t*)wrap->stream_)->ipc;
@ -544,24 +470,15 @@ Handle<Value> StreamWrap::Writev(const Arguments& args) {
// Buffer chunk, no additional storage required
// String chunk
Handle<Value> string = chunk->ToString();
switch (static_cast<WriteEncoding>(chunks->Get(i * 2 + 1)->Int32Value())) {
case kAscii:
storage_size += GetStringSizeImpl<kAscii>(string);
break;
Handle<String> string = chunk->ToString();
enum encoding encoding = ParseEncoding(chunks->Get(i * 2 + 1));
size_t chunk_size;
if (encoding == UTF8 && string->Length() > 65535)
chunk_size = StringBytes::Size(string, encoding);
else
chunk_size = StringBytes::StorageSize(string, encoding);
case kUtf8:
storage_size += GetStringSizeImpl<kUtf8>(string);
break;
case kUcs2:
storage_size += GetStringSizeImpl<kUcs2>(string);
break;
default:
assert(0); // Unreachable
}
storage_size += 15;
storage_size += chunk_size + 15;
}
if (storage_size > INT_MAX) {
@ -585,7 +502,9 @@ Handle<Value> StreamWrap::Writev(const Arguments& args) {
// Write buffer
if (Buffer::HasInstance(chunk)) {
bytes += WriteBuffer(chunk, &bufs[i]);
bufs[i].base = Buffer::Data(chunk);
bufs[i].len = Buffer::Length(chunk);
bytes += bufs[i].len;
continue;
}
@ -596,28 +515,10 @@ Handle<Value> StreamWrap::Writev(const Arguments& args) {
size_t str_size = storage_size - offset;
Handle<String> string = chunk->ToString();
switch (static_cast<WriteEncoding>(chunks->Get(i * 2 + 1)->Int32Value())) {
case kAscii:
str_size = WriteStringImpl<kAscii>(str_storage,
str_size,
string,
&bufs[i]);
break;
case kUtf8:
str_size = WriteStringImpl<kUtf8>(str_storage,
str_size,
string,
&bufs[i]);
break;
case kUcs2:
str_size = WriteStringImpl<kUcs2>(str_storage,
str_size,
string,
&bufs[i]);
break;
default:
assert(0);
}
enum encoding encoding = ParseEncoding(chunks->Get(i * 2 + 1));
str_size = StringBytes::Write(str_storage, str_size, string, encoding);
bufs[i].base = str_storage;
bufs[i].len = str_size;
offset += str_size;
bytes += str_size;
}
@ -655,17 +556,17 @@ Handle<Value> StreamWrap::Writev(const Arguments& args) {
Handle<Value> StreamWrap::WriteAsciiString(const Arguments& args) {
return WriteStringImpl<kAscii>(args);
return WriteStringImpl<ASCII>(args);
}
Handle<Value> StreamWrap::WriteUtf8String(const Arguments& args) {
return WriteStringImpl<kUtf8>(args);
return WriteStringImpl<UTF8>(args);
}
Handle<Value> StreamWrap::WriteUcs2String(const Arguments& args) {
return WriteStringImpl<kUcs2>(args);
return WriteStringImpl<UCS2>(args);
}

Просмотреть файл

@ -25,22 +25,14 @@
#include "v8.h"
#include "node.h"
#include "handle_wrap.h"
#include "string_bytes.h"
namespace node {
// Forward declaration
class WriteWrap;
// Important: this should have the same values as in lib/net.js
enum WriteEncoding {
kUtf8 = 0x1,
kAscii = 0x2,
kUcs2 = 0x3
};
class StreamWrap : public HandleWrap {
public:
uv_stream_t* GetStream() { return stream_; }
@ -63,13 +55,6 @@ class StreamWrap : public HandleWrap {
protected:
static size_t WriteBuffer(v8::Handle<v8::Value> val, uv_buf_t* buf);
template <enum WriteEncoding encoding>
static size_t WriteStringImpl(char* storage,
size_t storage_size,
v8::Handle<v8::Value> val,
uv_buf_t* buf);
template <enum WriteEncoding encoding>
static size_t GetStringSizeImpl(v8::Handle<v8::Value> val);
StreamWrap(v8::Handle<v8::Object> object, uv_stream_t* stream);
virtual void SetHandle(uv_handle_t* h);
@ -90,7 +75,7 @@ class StreamWrap : public HandleWrap {
static void OnReadCommon(uv_stream_t* handle, ssize_t nread,
uv_buf_t buf, uv_handle_type pending);
template <enum WriteEncoding encoding>
template <enum encoding encoding>
static v8::Handle<v8::Value> WriteStringImpl(const v8::Arguments& args);
size_t slab_offset_;

622
src/string_bytes.cc Normal file
Просмотреть файл

@ -0,0 +1,622 @@
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
#include "string_bytes.h"
#include <assert.h>
#include <string.h> // memcpy
#include <limits.h>
#include "node.h"
#include "node_buffer.h"
#include "v8.h"
namespace node {
using v8::Local;
using v8::Handle;
using v8::HandleScope;
using v8::Object;
using v8::String;
using v8::Value;
//// Base 64 ////
#define base64_encoded_size(size) ((size + 2 - ((size + 2) % 3)) / 3 * 4)
// Doesn't check for padding at the end. Can be 1-2 bytes over.
static inline size_t base64_decoded_size_fast(size_t size) {
size_t remainder = size % 4;
size = (size / 4) * 3;
if (remainder) {
if (size == 0 && remainder == 1) {
// special case: 1-byte input cannot be decoded
size = 0;
} else {
// non-padded input, add 1 or 2 extra bytes
size += 1 + (remainder == 3);
}
}
return size;
}
static inline size_t base64_decoded_size(const char* src, size_t size) {
size = base64_decoded_size_fast(size);
const char* end = src + size;
// check for trailing padding (1 or 2 bytes)
if (size > 0) {
if (end[-1] == '=') size--;
if (size > 0 && end[-2] == '=') size--;
}
return size;
}
// supports regular and URL-safe base64
static const int unbase64_table[] =
{ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -1, -1, -2, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, 62, -1, 63,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1,
-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, 63,
-1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
};
#define unbase64(x) unbase64_table[(uint8_t)(x)]
static inline size_t base64_decode(char *buf,
size_t len,
const char *src,
const size_t srcLen) {
char a, b, c, d;
char* dst = buf;
char* dstEnd = buf + len;
const char* srcEnd = src + srcLen;
while (src < srcEnd && dst < dstEnd) {
int remaining = srcEnd - src;
while (unbase64(*src) < 0 && src < srcEnd) src++, remaining--;
if (remaining == 0 || *src == '=') break;
a = unbase64(*src++);
while (unbase64(*src) < 0 && src < srcEnd) src++, remaining--;
if (remaining <= 1 || *src == '=') break;
b = unbase64(*src++);
*dst++ = (a << 2) | ((b & 0x30) >> 4);
if (dst == dstEnd) break;
while (unbase64(*src) < 0 && src < srcEnd) src++, remaining--;
if (remaining <= 2 || *src == '=') break;
c = unbase64(*src++);
*dst++ = ((b & 0x0F) << 4) | ((c & 0x3C) >> 2);
if (dst == dstEnd) break;
while (unbase64(*src) < 0 && src < srcEnd) src++, remaining--;
if (remaining <= 3 || *src == '=') break;
d = unbase64(*src++);
*dst++ = ((c & 0x03) << 6) | (d & 0x3F);
}
return dst - buf;
}
//// HEX ////
static inline unsigned hex2bin(char c) {
if (c >= '0' && c <= '9') return c - '0';
if (c >= 'A' && c <= 'F') return 10 + (c - 'A');
if (c >= 'a' && c <= 'f') return 10 + (c - 'a');
return static_cast<unsigned>(-1);
}
static inline size_t hex_decode(char *buf,
size_t len,
const char *src,
const size_t srcLen) {
size_t i;
for (i = 0; i < len && i * 2 + 1 < srcLen; ++i) {
unsigned a = hex2bin(src[i * 2 + 0]);
unsigned b = hex2bin(src[i * 2 + 1]);
if (!~a || !~b) return i;
buf[i] = a * 16 + b;
}
return i;
}
size_t StringBytes::Write(char* buf,
size_t buflen,
Handle<Value> val,
enum encoding encoding,
int* chars_written) {
HandleScope scope;
size_t len = 0;
bool is_buffer = Buffer::HasInstance(val);
// sometimes we use 'binary' when we mean 'buffer'
if (is_buffer && (encoding == BINARY || encoding == BUFFER)) {
// fast path, copy buffer data
Local<Object> valObj = Local<Object>::New(val.As<Object>());
const char* data = Buffer::Data(valObj);
size_t size = Buffer::Length(valObj);
size_t len = size < buflen ? size : buflen;
memcpy(buf, data, len);
return len;
}
Local<String> str = val->ToString();
int flags = String::NO_NULL_TERMINATION |
String::HINT_MANY_WRITES_EXPECTED;
switch (encoding) {
case ASCII:
len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf),
0,
buflen,
flags);
if (chars_written != NULL) {
*chars_written = len;
}
break;
case UTF8:
len = str->WriteUtf8(buf, buflen, chars_written, flags);
break;
case UCS2:
len = str->Write(reinterpret_cast<uint16_t*>(buf), 0, buflen, flags);
if (chars_written != NULL) {
*chars_written = len;
}
len = len * sizeof(uint16_t);
break;
case BASE64: {
String::AsciiValue value(str);
len = base64_decode(buf, buflen, *value, value.length());
if (chars_written != NULL) {
*chars_written = len;
}
break;
}
case BINARY:
case BUFFER: {
// TODO(isaacs): THIS IS AWFUL!!!
uint16_t* twobytebuf = new uint16_t[buflen];
len = str->Write(twobytebuf, 0, buflen, flags);
for (size_t i = 0; i < buflen && i < len; i++) {
unsigned char *b = reinterpret_cast<unsigned char*>(&twobytebuf[i]);
buf[i] = b[0];
}
if (chars_written != NULL) {
*chars_written = len;
}
delete[] twobytebuf;
break;
}
case HEX: {
String::AsciiValue value(str);
len = hex_decode(buf, buflen, *value, value.length());
if (chars_written != NULL) {
*chars_written = len * 2;
}
break;
}
default:
assert(0 && "unknown encoding");
break;
}
return len;
}
// Quick and dirty size calculation
// Will always be at least big enough, but may have some extra
// UTF8 can be as much as 3x the size, Base64 can have 1-2 extra bytes
size_t StringBytes::StorageSize(Handle<Value> val, enum encoding encoding) {
HandleScope scope;
size_t data_size = 0;
bool is_buffer = Buffer::HasInstance(val);
if (is_buffer && (encoding == BUFFER || encoding == BINARY)) {
return Buffer::Length(val);
}
Local<String> str = val->ToString();
switch (encoding) {
case BINARY:
case BUFFER:
case ASCII:
data_size = str->Length();
break;
case UTF8:
// A single UCS2 codepoint never takes up more than 3 utf8 bytes.
// It is an exercise for the caller to decide when a string is
// long enough to justify calling Size() instead of StorageSize()
data_size = 3 * str->Length();
break;
case UCS2:
data_size = str->Length() * sizeof(uint16_t);
break;
case BASE64:
data_size = base64_decoded_size_fast(str->Length());
break;
case HEX:
assert(str->Length() % 2 == 0 && "invalid hex string length");
data_size = str->Length() / 2;
break;
default:
assert(0 && "unknown encoding");
break;
}
return data_size;
}
size_t StringBytes::Size(Handle<Value> val, enum encoding encoding) {
HandleScope scope;
size_t data_size = 0;
bool is_buffer = Buffer::HasInstance(val);
if (is_buffer && (encoding == BUFFER || encoding == BINARY)) {
return Buffer::Length(val);
}
Local<String> str = val->ToString();
switch (encoding) {
case BINARY:
case BUFFER:
case ASCII:
data_size = str->Length();
break;
case UTF8:
data_size = str->Utf8Length();
break;
case UCS2:
data_size = str->Length() * sizeof(uint16_t);
break;
case BASE64: {
String::AsciiValue value(str);
data_size = base64_decoded_size(*value, value.length());
break;
}
case HEX:
data_size = str->Length() / 2;
break;
default:
assert(0 && "unknown encoding");
break;
}
return data_size;
}
static bool contains_non_ascii_slow(const char* buf, size_t len) {
for (size_t i = 0; i < len; ++i) {
if (buf[i] & 0x80) return true;
}
return false;
}
static bool contains_non_ascii(const char* src, size_t len) {
if (len < 16) {
return contains_non_ascii_slow(src, len);
}
const unsigned bytes_per_word = sizeof(void*);
const unsigned align_mask = bytes_per_word - 1;
const unsigned unaligned = reinterpret_cast<uintptr_t>(src) & align_mask;
if (unaligned > 0) {
const unsigned n = bytes_per_word - unaligned;
if (contains_non_ascii_slow(src, n)) return true;
src += n;
len -= n;
}
#if BITS_PER_LONG == 64
const uintptr_t mask = 0x8080808080808080ll;
#else
const uintptr_t mask = 0x80808080l;
#endif
const uintptr_t* srcw = reinterpret_cast<const uintptr_t*>(src);
for (size_t i = 0, n = len / bytes_per_word; i < n; ++i) {
if (srcw[i] & mask) return true;
}
const unsigned remainder = len & align_mask;
if (remainder > 0) {
const size_t offset = len - remainder;
if (contains_non_ascii_slow(src + offset, remainder)) return true;
}
return false;
}
static void force_ascii_slow(const char* src, char* dst, size_t len) {
for (size_t i = 0; i < len; ++i) {
dst[i] = src[i] & 0x7f;
}
}
static void force_ascii(const char* src, char* dst, size_t len) {
if (len < 16) {
force_ascii_slow(src, dst, len);
return;
}
const unsigned bytes_per_word = sizeof(void*);
const unsigned align_mask = bytes_per_word - 1;
const unsigned src_unalign = reinterpret_cast<uintptr_t>(src) & align_mask;
const unsigned dst_unalign = reinterpret_cast<uintptr_t>(dst) & align_mask;
if (src_unalign > 0) {
if (src_unalign == dst_unalign) {
const unsigned unalign = bytes_per_word - src_unalign;
force_ascii_slow(src, dst, unalign);
src += unalign;
dst += unalign;
len -= src_unalign;
} else {
force_ascii_slow(src, dst, len);
return;
}
}
#if BITS_PER_LONG == 64
const uintptr_t mask = ~0x8080808080808080ll;
#else
const uintptr_t mask = ~0x80808080l;
#endif
const uintptr_t* srcw = reinterpret_cast<const uintptr_t*>(src);
uintptr_t* dstw = reinterpret_cast<uintptr_t*>(dst);
for (size_t i = 0, n = len / bytes_per_word; i < n; ++i) {
dstw[i] = srcw[i] & mask;
}
const unsigned remainder = len & align_mask;
if (remainder > 0) {
const size_t offset = len - remainder;
force_ascii_slow(src + offset, dst + offset, remainder);
}
}
static size_t base64_encode(const char* src,
size_t slen,
char* dst,
size_t dlen) {
// We know how much we'll write, just make sure that there's space.
assert(dlen >= base64_encoded_size(slen) &&
"not enough space provided for base64 encode");
dlen = base64_encoded_size(slen);
unsigned a;
unsigned b;
unsigned c;
unsigned i;
unsigned k;
unsigned n;
static const char table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+/";
i = 0;
k = 0;
n = slen / 3 * 3;
while (i < n) {
a = src[i + 0] & 0xff;
b = src[i + 1] & 0xff;
c = src[i + 2] & 0xff;
dst[k + 0] = table[a >> 2];
dst[k + 1] = table[((a & 3) << 4) | (b >> 4)];
dst[k + 2] = table[((b & 0x0f) << 2) | (c >> 6)];
dst[k + 3] = table[c & 0x3f];
i += 3;
k += 4;
}
if (n != slen) {
switch (slen - n) {
case 1:
a = src[i + 0] & 0xff;
dst[k + 0] = table[a >> 2];
dst[k + 1] = table[(a & 3) << 4];
dst[k + 2] = '=';
dst[k + 3] = '=';
break;
case 2:
a = src[i + 0] & 0xff;
b = src[i + 1] & 0xff;
dst[k + 0] = table[a >> 2];
dst[k + 1] = table[((a & 3) << 4) | (b >> 4)];
dst[k + 2] = table[(b & 0x0f) << 2];
dst[k + 3] = '=';
break;
}
}
return dlen;
}
static size_t hex_encode(const char* src, size_t slen, char* dst, size_t dlen) {
// We know how much we'll write, just make sure that there's space.
assert(dlen >= slen * 2 &&
"not enough space provided for hex encode");
dlen = slen * 2;
for (uint32_t i = 0, k = 0; k < dlen; i += 1, k += 2) {
static const char hex[] = "0123456789abcdef";
uint8_t val = static_cast<uint8_t>(src[i]);
dst[k + 0] = hex[val >> 4];
dst[k + 1] = hex[val & 15];
}
return dlen;
}
Local<Value> StringBytes::Encode(const char* buf,
size_t buflen,
enum encoding encoding) {
HandleScope scope;
assert(buflen <= Buffer::kMaxLength);
if (!buflen && encoding != BUFFER)
return scope.Close(String::Empty());
Local<String> val;
switch (encoding) {
case BUFFER:
return scope.Close(
Buffer::New(static_cast<const char*>(buf), buflen)->handle_);
case ASCII:
if (contains_non_ascii(buf, buflen)) {
char* out = new char[buflen];
force_ascii(buf, out, buflen);
val = String::New(out, buflen);
delete[] out;
} else {
val = String::New(buf, buflen);
}
break;
case UTF8:
val = String::New(buf, buflen);
break;
case BINARY: {
// TODO(isaacs) use ExternalTwoByteString?
const unsigned char *cbuf = reinterpret_cast<const unsigned char*>(buf);
uint16_t * twobytebuf = new uint16_t[buflen];
for (size_t i = 0; i < buflen; i++) {
// XXX is the following line platform independent?
twobytebuf[i] = cbuf[i];
}
val = String::New(twobytebuf, buflen);
delete[] twobytebuf;
break;
}
case BASE64: {
size_t dlen = base64_encoded_size(buflen);
char* dst = new char[dlen];
size_t written = base64_encode(buf, buflen, dst, dlen);
assert(written == dlen);
val = String::New(dst, dlen);
delete[] dst;
break;
}
case UCS2: {
const uint16_t* data = reinterpret_cast<const uint16_t*>(buf);
val = String::New(data, buflen / 2);
break;
}
case HEX: {
size_t dlen = buflen * 2;
char* dst = new char[dlen];
size_t written = hex_encode(buf, buflen, dst, dlen);
assert(written == dlen);
val = String::New(dst, dlen);
delete[] dst;
break;
}
default:
assert(0 && "unknown encoding");
break;
}
return scope.Close(val);
}
} // namespace node

65
src/string_bytes.h Normal file
Просмотреть файл

@ -0,0 +1,65 @@
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef SRC_STRING_BYTES_H_
#define SRC_STRING_BYTES_H_
// Decodes a v8::Handle<v8::String> or Buffer to a raw char*
#include "v8.h"
#include "node.h"
namespace node {
using v8::Handle;
using v8::Local;
using v8::String;
using v8::Value;
class StringBytes {
public:
// Fast, but can be 2 bytes oversized for Base64, and
// as much as triple UTF-8 strings <= 65536 chars in length
static size_t StorageSize(Handle<Value> val, enum encoding enc);
// Precise byte count, but slightly slower for Base64 and
// very much slower for UTF-8
static size_t Size(Handle<Value> val, enum encoding enc);
// Write the bytes from the string or buffer into the char*
// returns the number of bytes written, which will always be
// <= buflen. Use StorageSize/Size first to know how much
// memory to allocate.
static size_t Write(char* buf,
size_t buflen,
Handle<Value> val,
enum encoding enc,
int* chars_written = NULL);
// Take the bytes in the src, and turn it into a Buffer or String.
static Local<Value> Encode(const char* buf,
size_t buflen,
enum encoding encoding);
};
} // namespace node
#endif // SRC_STRING_BYTES_H_

Просмотреть файл

@ -970,3 +970,8 @@ assert.throws(function() {
assert.equal(buf.slice(0, -i), s.slice(0, -i));
}
})();
// Regression test for #5482: should throw but not assert in C++ land.
assert.throws(function() {
Buffer('', 'buffer');
}, TypeError);

Просмотреть файл

@ -68,7 +68,7 @@ if (cluster.isWorker) {
// start two workers and execute callback when both is listening
var startCluster = function(cb) {
var workers = 2;
var workers = 8;
var online = 0;
for (var i = 0, l = workers; i < l; i++) {

Просмотреть файл

@ -887,3 +887,10 @@ assert.throws(function() {
try { d.final('xxx') } catch (e) { /* Ignore. */ }
try { d.final('xxx') } catch (e) { /* Ignore. */ }
})();
// Regression test for #5482: string to Cipher#update() should not assert.
(function() {
var c = crypto.createCipher('aes192', '0123456789abcdef');
c.update('update');
c.final();
})();

Просмотреть файл

@ -54,6 +54,13 @@ check_unref = setInterval(function() {
checks += 1;
}, 100);
// Should not assert on args.Holder()->InternalFieldCount() > 0. See #4261.
(function() {
var t = setInterval(function() {}, 1);
process.nextTick(t.unref.bind({}));
process.nextTick(t.unref.bind(t));
})();
process.on('exit', function() {
assert.strictEqual(interval_fired, false, 'Interval should not fire');
assert.strictEqual(timeout_fired, false, 'Timeout should not fire');

14
tools/cpplint.py поставляемый
Просмотреть файл

@ -2280,11 +2280,6 @@ def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
@ -2826,15 +2821,6 @@ def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
m = _RE_PATTERN_STRING.search(line)
if m:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:m.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)