Skip to content

Commit c976cb5

Browse files
addaleaxMylesBorins
authored andcommitted
http2: no stream destroy while its data is on the wire
This fixes a crash that occurred when a `Http2Stream` write is completed after it is already destroyed. Instead, don’t destroy the stream in that case and wait for GC to take over. Backport-PR-URL: #19185 Backport-PR-URL: #20456 PR-URL: #19002 Fixes: #18973 Reviewed-By: James M Snell <[email protected]> Reviewed-By: Anatoli Papirovski <[email protected]>
1 parent bfd7d6d commit c976cb5

File tree

3 files changed

+80
-4
lines changed

3 files changed

+80
-4
lines changed

src/node_http2.cc

+15-4
Original file line numberDiff line numberDiff line change
@@ -1712,6 +1712,14 @@ void Http2Session::OnStreamDestructImpl(void* ctx) {
17121712
session->stream_ = nullptr;
17131713
}
17141714

1715+
bool Http2Session::HasWritesOnSocketForStream(Http2Stream* stream) {
1716+
for (const nghttp2_stream_write& wr : outgoing_buffers_) {
1717+
if (wr.req_wrap != nullptr && wr.req_wrap->stream() == stream)
1718+
return true;
1719+
}
1720+
return false;
1721+
}
1722+
17151723
// Every Http2Session session is tightly bound to a single i/o StreamBase
17161724
// (typically a net.Socket or tls.TLSSocket). The lifecycle of the two is
17171725
// tightly coupled with all data transfer between the two happening at the
@@ -1770,13 +1778,12 @@ Http2Stream::Http2Stream(
17701778

17711779

17721780
Http2Stream::~Http2Stream() {
1781+
DEBUG_HTTP2STREAM(this, "tearing down stream");
17731782
if (session_ != nullptr) {
17741783
session_->RemoveStream(this);
17751784
session_ = nullptr;
17761785
}
17771786

1778-
if (!object().IsEmpty())
1779-
ClearWrap(object());
17801787
persistent().Reset();
17811788
CHECK(persistent().IsEmpty());
17821789
}
@@ -1861,15 +1868,19 @@ inline void Http2Stream::Destroy() {
18611868
Http2Stream* stream = static_cast<Http2Stream*>(data);
18621869
// Free any remaining outgoing data chunks here. This should be done
18631870
// here because it's possible for destroy to have been called while
1864-
// we still have qeueued outbound writes.
1871+
// we still have queued outbound writes.
18651872
while (!stream->queue_.empty()) {
18661873
nghttp2_stream_write& head = stream->queue_.front();
18671874
if (head.req_wrap != nullptr)
18681875
head.req_wrap->Done(UV_ECANCELED);
18691876
stream->queue_.pop();
18701877
}
18711878

1872-
delete stream;
1879+
// We can destroy the stream now if there are no writes for it
1880+
// already on the socket. Otherwise, we'll wait for the garbage collector
1881+
// to take care of cleaning up.
1882+
if (!stream->session()->HasWritesOnSocketForStream(stream))
1883+
delete stream;
18731884
}, this, this->object());
18741885

18751886
statistics_.end_time = uv_hrtime();

src/node_http2.h

+3
Original file line numberDiff line numberDiff line change
@@ -869,6 +869,9 @@ class Http2Session : public AsyncWrap {
869869
// Removes a stream instance from this session
870870
inline void RemoveStream(Http2Stream* stream);
871871

872+
// Indicates whether there currently exist outgoing buffers for this stream.
873+
bool HasWritesOnSocketForStream(Http2Stream* stream);
874+
872875
// Write data to the session
873876
inline ssize_t Write(const uv_buf_t* bufs, size_t nbufs);
874877

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
// Flags: --expose-gc
2+
'use strict';
3+
const common = require('../common');
4+
if (!common.hasCrypto)
5+
common.skip('missing crypto');
6+
const assert = require('assert');
7+
const http2 = require('http2');
8+
const makeDuplexPair = require('../common/duplexpair');
9+
10+
// Make sure the Http2Stream destructor works, since we don't clean the
11+
// stream up like we would otherwise do.
12+
process.on('exit', global.gc);
13+
14+
{
15+
const { clientSide, serverSide } = makeDuplexPair();
16+
17+
let serverSideHttp2Stream;
18+
let serverSideHttp2StreamDestroyed = false;
19+
const server = http2.createServer();
20+
server.on('stream', common.mustCall((stream, headers) => {
21+
serverSideHttp2Stream = stream;
22+
stream.respond({
23+
'content-type': 'text/html',
24+
':status': 200
25+
});
26+
27+
const originalWrite = serverSide._write;
28+
serverSide._write = (buf, enc, cb) => {
29+
if (serverSideHttp2StreamDestroyed) {
30+
serverSide.destroy();
31+
serverSide.write = () => {};
32+
} else {
33+
setImmediate(() => {
34+
originalWrite.call(serverSide, buf, enc, () => setImmediate(cb));
35+
});
36+
}
37+
};
38+
39+
// Enough data to fit into a single *session* window,
40+
// not enough data to fit into a single *stream* window.
41+
stream.write(Buffer.alloc(40000));
42+
}));
43+
44+
server.emit('connection', serverSide);
45+
46+
const client = http2.connect('http://localhost:80', {
47+
createConnection: common.mustCall(() => clientSide)
48+
});
49+
50+
const req = client.request({ ':path': '/' });
51+
52+
req.on('response', common.mustCall((headers) => {
53+
assert.strictEqual(headers[':status'], 200);
54+
}));
55+
56+
req.on('data', common.mustCallAtLeast(() => {
57+
if (!serverSideHttp2StreamDestroyed) {
58+
serverSideHttp2Stream.destroy();
59+
serverSideHttp2StreamDestroyed = true;
60+
}
61+
}));
62+
}

0 commit comments

Comments
 (0)