From d3b670d1fe837ccc1be328c0debe3e85a676e676 Mon Sep 17 00:00:00 2001 From: Daijiro Wachi Date: Sat, 16 May 2026 00:09:26 +0900 Subject: [PATCH 1/3] src: cache continuation_data pointer in MKDirpSync Signed-off-by: Daijiro Wachi --- src/node_file.cc | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/node_file.cc b/src/node_file.cc index d93f213202ec43..f5398565665dde 100644 --- a/src/node_file.cc +++ b/src/node_file.cc @@ -1866,16 +1866,17 @@ int MKDirpSync(uv_loop_t* loop, req_wrap->continuation_data()->PushPath(std::move(path)); } - while (req_wrap->continuation_data()->paths().size() > 0) { - std::string next_path = req_wrap->continuation_data()->PopPath(); + FSContinuationData* cont_data = req_wrap->continuation_data(); + while (cont_data->paths().size() > 0) { + std::string next_path = cont_data->PopPath(); int err = uv_fs_mkdir(loop, req, next_path.c_str(), mode, nullptr); while (true) { switch (err) { // Note: uv_fs_req_cleanup in terminal paths will be called by // ~FSReqWrapSync(): case 0: - req_wrap->continuation_data()->MaybeSetFirstPath(next_path); - if (req_wrap->continuation_data()->paths().empty()) { + cont_data->MaybeSetFirstPath(next_path); + if (cont_data->paths().empty()) { return 0; } break; @@ -1889,9 +1890,9 @@ int MKDirpSync(uv_loop_t* loop, std::string dirname = next_path.substr(0, next_path.find_last_of(kPathSeparator)); if (dirname != next_path) { - req_wrap->continuation_data()->PushPath(std::move(next_path)); - req_wrap->continuation_data()->PushPath(std::move(dirname)); - } else if (req_wrap->continuation_data()->paths().empty()) { + cont_data->PushPath(std::move(next_path)); + cont_data->PushPath(std::move(dirname)); + } else if (cont_data->paths().empty()) { err = UV_EEXIST; continue; } @@ -1903,8 +1904,7 @@ int MKDirpSync(uv_loop_t* loop, err = uv_fs_stat(loop, req, next_path.c_str(), nullptr); if (err == 0 && !S_ISDIR(req->statbuf.st_mode)) { uv_fs_req_cleanup(req); - if (orig_err == UV_EEXIST && - req_wrap->continuation_data()->paths().size() > 0) { + if (orig_err == UV_EEXIST && cont_data->paths().size() > 0) { return UV_ENOTDIR; } return UV_EEXIST; From 9d973c7942052eed09eae44d95b10a9243766a2a Mon Sep 17 00:00:00 2001 From: Daijiro Wachi Date: Sat, 16 May 2026 00:09:58 +0900 Subject: [PATCH 2/3] stream: cache chunk data between 1st and 2nd pass in Writev Signed-off-by: Daijiro Wachi --- src/stream_base.cc | 76 +++++++++++++++++++++++++--------------------- 1 file changed, 41 insertions(+), 35 deletions(-) diff --git a/src/stream_base.cc b/src/stream_base.cc index 6a631921341307..dfc05b2b6fc866 100644 --- a/src/stream_base.cc +++ b/src/stream_base.cc @@ -201,12 +201,24 @@ int StreamBase::Writev(const FunctionCallbackInfo& args) { size_t offset; if (!all_buffers) { + // Cache per-chunk data from the first pass so the second pass avoids + // redundant V8 array accesses, ToString conversions, and ParseEncoding + // calls. Local<> handles remain valid for the duration of this scope. + struct CachedChunk { + Local value; + Local string; // empty for Buffer chunks + enum encoding enc; + }; + MaybeStackBuffer chunk_cache(count); + // Determine storage size first for (size_t i = 0; i < count; i++) { Local chunk; if (!chunks->Get(context, i * 2).ToLocal(&chunk)) return -1; + chunk_cache[i].value = chunk; + if (Buffer::HasInstance(chunk)) continue; // Buffer chunk, no additional storage required @@ -219,6 +231,8 @@ int StreamBase::Writev(const FunctionCallbackInfo& args) { if (!chunks->Get(context, i * 2 + 1).ToLocal(&next_chunk)) return -1; enum encoding encoding = ParseEncoding(isolate, next_chunk); + chunk_cache[i].string = string; + chunk_cache[i].enc = encoding; size_t chunk_size; if ((encoding == UTF8 && string->Length() > 65535 && @@ -232,33 +246,20 @@ int StreamBase::Writev(const FunctionCallbackInfo& args) { if (storage_size > INT_MAX) return UV_ENOBUFS; - } else { - for (size_t i = 0; i < count; i++) { - Local chunk; - if (!chunks->Get(context, i).ToLocal(&chunk)) - return -1; - bufs[i].base = Buffer::Data(chunk); - bufs[i].len = Buffer::Length(chunk); - } - } - std::unique_ptr bs; - if (storage_size > 0) { - bs = ArrayBuffer::NewBackingStore( - isolate, storage_size, BackingStoreInitializationMode::kUninitialized); - } + std::unique_ptr bs; + if (storage_size > 0) { + bs = ArrayBuffer::NewBackingStore( + isolate, storage_size, BackingStoreInitializationMode::kUninitialized); + } - offset = 0; - if (!all_buffers) { + offset = 0; for (size_t i = 0; i < count; i++) { - Local chunk; - if (!chunks->Get(context, i * 2).ToLocal(&chunk)) - return -1; - - // Write buffer - if (Buffer::HasInstance(chunk)) { - bufs[i].base = Buffer::Data(chunk); - bufs[i].len = Buffer::Length(chunk); + // string.IsEmpty() signals a Buffer chunk; enc is uninitialised in + // that case so we must not read it. + if (chunk_cache[i].string.IsEmpty()) { + bufs[i].base = Buffer::Data(chunk_cache[i].value); + bufs[i].len = Buffer::Length(chunk_cache[i].value); continue; } @@ -268,28 +269,33 @@ int StreamBase::Writev(const FunctionCallbackInfo& args) { static_cast(bs ? bs->Data() : nullptr) + offset; size_t str_size = (bs ? bs->ByteLength() : 0) - offset; - Local string; - if (!chunk->ToString(context).ToLocal(&string)) - return -1; - Local next_chunk; - if (!chunks->Get(context, i * 2 + 1).ToLocal(&next_chunk)) - return -1; - enum encoding encoding = ParseEncoding(isolate, next_chunk); str_size = StringBytes::Write(isolate, str_storage, str_size, - string, - encoding); + chunk_cache[i].string, + chunk_cache[i].enc); bufs[i].base = str_storage; bufs[i].len = str_size; offset += str_size; } + + StreamWriteResult res = Write(*bufs, count, nullptr, req_wrap_obj); + SetWriteResult(res); + if (res.wrap != nullptr && storage_size > 0) + res.wrap->SetBackingStore(std::move(bs)); + return res.err; + } else { + for (size_t i = 0; i < count; i++) { + Local chunk; + if (!chunks->Get(context, i).ToLocal(&chunk)) + return -1; + bufs[i].base = Buffer::Data(chunk); + bufs[i].len = Buffer::Length(chunk); + } } StreamWriteResult res = Write(*bufs, count, nullptr, req_wrap_obj); SetWriteResult(res); - if (res.wrap != nullptr && storage_size > 0) - res.wrap->SetBackingStore(std::move(bs)); return res.err; } From f78194a5337c23966459cdd309ac3552ff21ac7f Mon Sep 17 00:00:00 2001 From: Daijiro Wachi Date: Sat, 16 May 2026 00:24:03 +0900 Subject: [PATCH 3/3] benchmark: add writev string-chunk and mkdirpSync benchmarks --- benchmark/fs/bench-mkdirpSync.js | 27 +++++++++++++ benchmark/streams/writable-writev-string.js | 42 +++++++++++++++++++++ 2 files changed, 69 insertions(+) create mode 100644 benchmark/fs/bench-mkdirpSync.js create mode 100644 benchmark/streams/writable-writev-string.js diff --git a/benchmark/fs/bench-mkdirpSync.js b/benchmark/fs/bench-mkdirpSync.js new file mode 100644 index 00000000000000..286ab738907379 --- /dev/null +++ b/benchmark/fs/bench-mkdirpSync.js @@ -0,0 +1,27 @@ +'use strict'; + +// Benchmarks MKDirpSync (fs.mkdirSync with recursive: true), which iterates +// over a continuation_data path queue. Varying depth exercises the inner loop +// more, making the continuation_data pointer cache more impactful. + +const common = require('../common'); +const fs = require('fs'); +const path = require('path'); +const tmpdir = require('../../test/common/tmpdir'); +tmpdir.refresh(); + +const bench = common.createBenchmark(main, { + n: [1e3], + depth: [4, 8, 16], +}); + +let dirc = 0; + +function main({ n, depth }) { + bench.start(); + for (let i = 0; i < n; i++) { + const parts = Array.from({ length: depth }, () => String(++dirc)); + fs.mkdirSync(path.join(tmpdir.path, ...parts), { recursive: true }); + } + bench.end(n); +} diff --git a/benchmark/streams/writable-writev-string.js b/benchmark/streams/writable-writev-string.js new file mode 100644 index 00000000000000..f8a131696c5af2 --- /dev/null +++ b/benchmark/streams/writable-writev-string.js @@ -0,0 +1,42 @@ +'use strict'; + +const common = require('../common.js'); +const { Writable } = require('stream'); + +// Benchmarks StreamBase::Writev with string chunks, exercising the chunk +// cache that avoids redundant V8 array accesses, ToString, and ParseEncoding +// calls between the sizing pass and the write pass. +const bench = common.createBenchmark(main, { + n: [1e4], + chunks: [4, 16, 64], + encoding: ['utf8', 'latin1'], + type: ['string', 'buffer', 'mixed'], +}); + +function main({ n, chunks, encoding, type }) { + const str = 'Hello, benchmark! '.repeat(4); + const buf = Buffer.from(str, encoding); + + const wr = new Writable({ + writev(chunks, cb) { cb(); }, + write(chunk, enc, cb) { cb(); }, + }); + + bench.start(); + for (let i = 0; i < n; i++) { + wr.cork(); + for (let j = 0; j < chunks; j++) { + if (type === 'buffer') { + wr.write(buf); + } else if (type === 'string') { + wr.write(str, encoding); + } else { + // Alternate buffer and string to hit the mixed (non-all_buffers) path. + if (j % 2 === 0) wr.write(buf); + else wr.write(str, encoding); + } + } + wr.uncork(); + } + bench.end(n); +}