@@ -405,7 +405,9 @@ pub const Flags = packed struct(u16) {
405
405
is_preconnect_only : bool = false ,
406
406
is_streaming_request_body : bool = false ,
407
407
defer_fail_until_connecting_is_complete : bool = false ,
408
- _padding : u5 = 0 ,
408
+ is_websockets : bool = false ,
409
+ websocket_upgraded : bool = false ,
410
+ _padding : u3 = 0 ,
409
411
};
410
412
411
413
// TODO: reduce the size of this struct
@@ -592,6 +594,11 @@ pub fn buildRequest(this: *HTTPClient, body_len: usize) picohttp.Request {
592
594
hashHeaderConst ("Accept-Encoding" ) = > {
593
595
override_accept_encoding = true ;
594
596
},
597
+ hashHeaderConst ("Upgrade" ) = > {
598
+ if (std .ascii .eqlIgnoreCase (this .headerStr (header_values [i ]), "websocket" )) {
599
+ this .flags .is_websockets = true ;
600
+ }
601
+ },
595
602
hashHeaderConst (chunked_encoded_header .name ) = > {
596
603
// We don't want to override chunked encoding header if it was set by the user
597
604
add_transfer_encoding = false ;
@@ -1019,11 +1026,14 @@ fn writeToStreamUsingBuffer(this: *HTTPClient, comptime is_ssl: bool, socket: Ne
1019
1026
// no data to send so we are done
1020
1027
return false ;
1021
1028
}
1022
-
1023
1029
pub fn writeToStream (this : * HTTPClient , comptime is_ssl : bool , socket : NewHTTPContext (is_ssl ).HTTPSocket , data : []const u8 ) void {
1024
1030
log ("flushStream" , .{});
1025
1031
var stream = & this .state .original_request_body .stream ;
1026
1032
const stream_buffer = stream .buffer orelse return ;
1033
+ if (this .flags .is_websockets and ! this .flags .websocket_upgraded ) {
1034
+ // cannot drain yet, websocket is waiting for upgrade
1035
+ return ;
1036
+ }
1027
1037
const buffer = stream_buffer .acquire ();
1028
1038
const wasEmpty = buffer .isEmpty () and data .len == 0 ;
1029
1039
if (wasEmpty and stream .ended ) {
@@ -1324,56 +1334,78 @@ pub fn handleOnDataHeaders(
1324
1334
) void {
1325
1335
log ("handleOnDataHeaders" , .{});
1326
1336
var to_read = incoming_data ;
1327
- var amount_read : usize = 0 ;
1328
- var needs_move = true ;
1329
- if (this .state .response_message_buffer .list .items .len > 0 ) {
1330
- // this one probably won't be another chunk, so we use appendSliceExact() to avoid over-allocating
1331
- bun .handleOom (this .state .response_message_buffer .appendSliceExact (incoming_data ));
1332
- to_read = this .state .response_message_buffer .list .items ;
1333
- needs_move = false ;
1334
- }
1335
-
1336
- // we reset the pending_response each time wich means that on parse error this will be always be empty
1337
- this .state .pending_response = picohttp.Response {};
1338
-
1339
- // minimal http/1.1 request size is 16 bytes without headers and 26 with Host header
1340
- // if is less than 16 will always be a ShortRead
1341
- if (to_read .len < 16 ) {
1342
- log ("handleShortRead" , .{});
1343
- this .handleShortRead (is_ssl , incoming_data , socket , needs_move );
1344
- return ;
1345
- }
1346
1337
1347
- var response = picohttp .Response .parseParts (
1348
- to_read ,
1349
- & shared_response_headers_buf ,
1350
- & amount_read ,
1351
- ) catch | err | {
1352
- switch (err ) {
1353
- error .ShortRead = > {
1354
- this .handleShortRead (is_ssl , incoming_data , socket , needs_move );
1355
- },
1356
- else = > {
1357
- this .closeAndFail (err , is_ssl , socket );
1358
- },
1338
+ while (true ) {
1339
+ var amount_read : usize = 0 ;
1340
+ var needs_move = true ;
1341
+ if (this .state .response_message_buffer .list .items .len > 0 ) {
1342
+ // this one probably won't be another chunk, so we use appendSliceExact() to avoid over-allocating
1343
+ bun .handleOom (this .state .response_message_buffer .appendSliceExact (incoming_data ));
1344
+ to_read = this .state .response_message_buffer .list .items ;
1345
+ needs_move = false ;
1359
1346
}
1360
- return ;
1361
- };
1362
1347
1363
- // we save the successful parsed response
1364
- this .state .pending_response = response ;
1365
-
1366
- const body_buf = to_read [@min (@as (usize , @intCast (response .bytes_read )), to_read .len ).. ];
1367
- // handle the case where we have a 100 Continue
1368
- if (response .status_code >= 100 and response .status_code < 200 ) {
1369
- log ("information headers" , .{});
1370
- // we still can have the 200 OK in the same buffer sometimes
1371
- if (body_buf .len > 0 ) {
1372
- log ("information headers with body" , .{});
1373
- this .onData (is_ssl , body_buf , ctx , socket );
1348
+ // we reset the pending_response each time wich means that on parse error this will be always be empty
1349
+ this .state .pending_response = picohttp.Response {};
1350
+
1351
+ // minimal http/1.1 request size is 16 bytes without headers and 26 with Host header
1352
+ // if is less than 16 will always be a ShortRead
1353
+ if (to_read .len < 16 ) {
1354
+ log ("handleShortRead" , .{});
1355
+ this .handleShortRead (is_ssl , incoming_data , socket , needs_move );
1356
+ return ;
1374
1357
}
1375
- return ;
1358
+
1359
+ const response = picohttp .Response .parseParts (
1360
+ to_read ,
1361
+ & shared_response_headers_buf ,
1362
+ & amount_read ,
1363
+ ) catch | err | {
1364
+ switch (err ) {
1365
+ error .ShortRead = > {
1366
+ this .handleShortRead (is_ssl , incoming_data , socket , needs_move );
1367
+ },
1368
+ else = > {
1369
+ this .closeAndFail (err , is_ssl , socket );
1370
+ },
1371
+ }
1372
+ return ;
1373
+ };
1374
+
1375
+ // we save the successful parsed response
1376
+ this .state .pending_response = response ;
1377
+
1378
+ to_read = to_read [@min (@as (usize , @intCast (response .bytes_read )), to_read .len ).. ];
1379
+
1380
+ if (response .status_code == 101 ) {
1381
+ if (! this .flags .is_websockets ) {
1382
+ // we cannot upgrade to websocket because the client did not request it!
1383
+ this .closeAndFail (error .UnrequestedUpgrade , is_ssl , socket );
1384
+ return ;
1385
+ }
1386
+ // special case for websocket upgrade
1387
+ this .flags .is_websockets = true ;
1388
+ this .flags .websocket_upgraded = true ;
1389
+ if (this .signals .upgraded ) | upgraded | {
1390
+ upgraded .store (true , .monotonic );
1391
+ }
1392
+ // start draining the request body
1393
+ this .flushStream (is_ssl , socket );
1394
+ break ;
1395
+ }
1396
+
1397
+ // handle the case where we have a 100 Continue
1398
+ if (response .status_code >= 100 and response .status_code < 200 ) {
1399
+ log ("information headers" , .{});
1400
+ // we still can have the 200 OK in the same buffer sometimes
1401
+ // 1XX responses MUST NOT include a message-body, therefore we need to continue parsing
1402
+
1403
+ continue ;
1404
+ }
1405
+
1406
+ break ;
1376
1407
}
1408
+ var response = this .state .pending_response .? ;
1377
1409
const should_continue = this .handleResponseMetadata (
1378
1410
& response ,
1379
1411
) catch | err | {
@@ -1409,14 +1441,14 @@ pub fn handleOnDataHeaders(
1409
1441
1410
1442
if (this .flags .proxy_tunneling and this .proxy_tunnel == null ) {
1411
1443
// we are proxing we dont need to cloneMetadata yet
1412
- this .startProxyHandshake (is_ssl , socket , body_buf );
1444
+ this .startProxyHandshake (is_ssl , socket , to_read );
1413
1445
return ;
1414
1446
}
1415
1447
1416
1448
// we have body data incoming so we clone metadata and keep going
1417
1449
this .cloneMetadata ();
1418
1450
1419
- if (body_buf .len == 0 ) {
1451
+ if (to_read .len == 0 ) {
1420
1452
// no body data yet, but we can report the headers
1421
1453
if (this .signals .get (.header_progress )) {
1422
1454
this .progressUpdate (is_ssl , ctx , socket );
@@ -1426,7 +1458,7 @@ pub fn handleOnDataHeaders(
1426
1458
1427
1459
if (this .state .response_stage == .body ) {
1428
1460
{
1429
- const report_progress = this .handleResponseBody (body_buf , true ) catch | err | {
1461
+ const report_progress = this .handleResponseBody (to_read , true ) catch | err | {
1430
1462
this .closeAndFail (err , is_ssl , socket );
1431
1463
return ;
1432
1464
};
@@ -1439,7 +1471,7 @@ pub fn handleOnDataHeaders(
1439
1471
} else if (this .state .response_stage == .body_chunk ) {
1440
1472
this .setTimeout (socket , 5 );
1441
1473
{
1442
- const report_progress = this .handleResponseBodyChunkedEncoding (body_buf ) catch | err | {
1474
+ const report_progress = this .handleResponseBodyChunkedEncoding (to_read ) catch | err | {
1443
1475
this .closeAndFail (err , is_ssl , socket );
1444
1476
return ;
1445
1477
};
@@ -2149,7 +2181,7 @@ pub fn handleResponseMetadata(
2149
2181
// [...] cannot contain a message body or trailer section.
2150
2182
// therefore in these cases set content-length to 0, so the response body is always ignored
2151
2183
// and is not waited for (which could cause a timeout)
2152
- if ((response .status_code >= 100 and response .status_code < 200 ) or response .status_code == 204 or response .status_code == 304 ) {
2184
+ if ((response .status_code >= 100 and response .status_code < 200 and response . status_code != 101 ) or response .status_code == 204 or response .status_code == 304 ) {
2153
2185
this .state .content_length = 0 ;
2154
2186
}
2155
2187
@@ -2416,7 +2448,7 @@ pub fn handleResponseMetadata(
2416
2448
log ("handleResponseMetadata: content_length is null and transfer_encoding {}" , .{this .state .transfer_encoding });
2417
2449
}
2418
2450
2419
- if (this .method .hasBody () and (content_length == null or content_length .? > 0 or ! this .state .flags .allow_keepalive or this .state .transfer_encoding == .chunked or is_server_sent_events )) {
2451
+ if (this .method .hasBody () and (content_length == null or content_length .? > 0 or ! this .state .flags .allow_keepalive or this .state .transfer_encoding == .chunked or is_server_sent_events or this . flags . websocket_upgraded )) {
2420
2452
return ShouldContinue .continue_streaming ;
2421
2453
} else {
2422
2454
return ShouldContinue .finished ;
0 commit comments