mirror of
https://github.com/HoneyryderChuck/httpx.git
synced 2025-08-13 00:02:57 -04:00
Compare commits
749 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
0261449b39 | ||
|
84c8126cd9 | ||
|
ff3f1f726f | ||
|
b8b710470c | ||
|
0f3e3ab068 | ||
|
095fbb3463 | ||
|
7790589c1f | ||
|
dd8608ec3b | ||
|
8205b351aa | ||
|
5992628926 | ||
|
39370b5883 | ||
|
1801a7815c | ||
|
0953e4f91a | ||
|
a78a3f0b7c | ||
|
aeb8fe5382 | ||
|
03170b6c89 | ||
|
814d607a45 | ||
|
5502332e7e | ||
|
f3b68950d6 | ||
|
2c4638784f | ||
|
b0016525e3 | ||
|
49555694fe | ||
|
93e5efa32e | ||
|
8b3c1da507 | ||
|
d64f247e11 | ||
|
f64c3ab599 | ||
|
af03ddba3b | ||
|
7012ca1f27 | ||
|
d405f8905f | ||
|
3ff10f142a | ||
|
51ce9d10a4 | ||
|
6bde11b09c | ||
|
0c2808fa25 | ||
|
cb78091e03 | ||
|
6fa69ba475 | ||
|
4a78e78d32 | ||
|
0e393987d0 | ||
|
12483fa7c8 | ||
|
d955ba616a | ||
|
804d5b878b | ||
|
75702165fd | ||
|
120bbad126 | ||
|
35446e9fe1 | ||
|
3ed41ef2bf | ||
|
9ffbceff87 | ||
|
757c9ae32c | ||
|
5d88ccedf9 | ||
|
85808b6569 | ||
|
d5483a4264 | ||
|
540430c00e | ||
|
3a417a4623 | ||
|
35c18a1b9b | ||
|
cf19fe5221 | ||
|
f9c2fc469a | ||
|
9b513faab4 | ||
|
0be39faefc | ||
|
08c5f394ba | ||
|
55411178ce | ||
|
a5c83e84d3 | ||
|
d7e15c4441 | ||
|
012255e49c | ||
|
d20506acb8 | ||
|
28399f1b88 | ||
|
953101afde | ||
|
055ee47b83 | ||
|
dbad275c65 | ||
|
fe69231e6c | ||
|
4c61df768a | ||
|
aec150b030 | ||
|
29a43c4bc3 | ||
|
34c2fee60c | ||
|
c62966361e | ||
|
2b87a3d5e5 | ||
|
3dd767cdc2 | ||
|
a9255c52aa | ||
|
32031e8a03 | ||
|
f328646c08 | ||
|
0484dd76c8 | ||
|
17c1090b7a | ||
|
87f4ce4b03 | ||
|
1ec7442322 | ||
|
723959cf92 | ||
|
10b4b9c7c0 | ||
|
1b39bcd3a3 | ||
|
44a2041ea8 | ||
|
b63f9f1ae2 | ||
|
467dd5e7e5 | ||
|
c626fae3da | ||
|
7f6b78540b | ||
|
b120ce4657 | ||
|
32c36bb4ee | ||
|
cc0626429b | ||
|
a0e2c1258a | ||
|
6bd3c15384 | ||
|
0d23c464f5 | ||
|
a75b89db74 | ||
|
7173616154 | ||
|
69f9557780 | ||
|
339af65cc1 | ||
|
3df6edbcfc | ||
|
5c2f8ab0b1 | ||
|
0c335fd03d | ||
|
bf19cde364 | ||
|
7e0ddb7ab2 | ||
|
4cd3136922 | ||
|
642122a0f5 | ||
|
42d42a92b4 | ||
|
fb6a509d98 | ||
|
3c22f36a6c | ||
|
51b2693842 | ||
|
1ab5855961 | ||
|
f82816feb3 | ||
|
ee229aa74c | ||
|
793e900ce8 | ||
|
1241586eb4 | ||
|
cbf454ae13 | ||
|
180d3b0e59 | ||
|
84db0072fb | ||
|
c48f6c8e8f | ||
|
870b8aed69 | ||
|
56b8e9647a | ||
|
1f59688791 | ||
|
e63c75a86c | ||
|
3eaf58e258 | ||
|
9ff62404a6 | ||
|
4d694f9517 | ||
|
22952f6a4a | ||
|
7660e4c555 | ||
|
a9cc787210 | ||
|
970830a025 | ||
|
7a3d38aeee | ||
|
54bb617902 | ||
|
cf08ae99f5 | ||
|
c8ce4cd8c8 | ||
|
6658a2ce24 | ||
|
7169f6aaaf | ||
|
ffc4824762 | ||
|
8e050e846f | ||
|
e40d3c9552 | ||
|
ba60ef79a7 | ||
|
ca49c9ef41 | ||
|
7010484b2a | ||
|
06eba512a6 | ||
|
f9ed0ab602 | ||
|
5632e522c2 | ||
|
cfdb719a8e | ||
|
b2a1b9cded | ||
|
5917c63a70 | ||
|
6af8ad0132 | ||
|
35ac13406d | ||
|
d00c46d363 | ||
|
a437de36e8 | ||
|
797fd28142 | ||
|
6d4266d4a4 | ||
|
eb8c18ccda | ||
|
4653b48602 | ||
|
8287a55b95 | ||
|
9faed647bf | ||
|
5268f60021 | ||
|
132e4b4ebe | ||
|
b502247284 | ||
|
e5d852573a | ||
|
d17ac7c8c3 | ||
|
b1c08f16d5 | ||
|
f618c6447a | ||
|
4454b1bbcc | ||
|
88f8f5d287 | ||
|
999b6a603a | ||
|
f8d05b0e82 | ||
|
a7f2271652 | ||
|
55f1f6800b | ||
|
3e736b1f05 | ||
|
f5497eec4f | ||
|
08015e0851 | ||
|
a0f472ba02 | ||
|
8bee6956eb | ||
|
97cbdf117d | ||
|
383f2a01d8 | ||
|
8a473b4ccd | ||
|
b6c8f70aaf | ||
|
f5aa6142a0 | ||
|
56d82e6370 | ||
|
41e95d5b86 | ||
|
46a39f2b0d | ||
|
8009fc11b7 | ||
|
398c08eb4d | ||
|
723fda297f | ||
|
35ee625827 | ||
|
210abfb2f5 | ||
|
53bf6824f8 | ||
|
cb8a97c837 | ||
|
0063ab6093 | ||
|
7811cbf3a7 | ||
|
7c21c33999 | ||
|
e45edcbfce | ||
|
7e705dc57e | ||
|
dae4364664 | ||
|
8dfd1edf85 | ||
|
d2fd20b3ec | ||
|
28fdbb1a3d | ||
|
23857f196a | ||
|
bf1ef451f2 | ||
|
d68e98be5a | ||
|
fd57d72a22 | ||
|
a74bd9f397 | ||
|
f76be1983b | ||
|
86cb30926f | ||
|
ed8fafd11d | ||
|
5333def40d | ||
|
ab78e3189e | ||
|
b26313d18e | ||
|
2af9bc0626 | ||
|
f573c1c50b | ||
|
2d999063fc | ||
|
1a44b8ea48 | ||
|
8eeafaa008 | ||
|
0ec8e80f0f | ||
|
f2bca9fcbf | ||
|
6ca17c47a0 | ||
|
016ed04f61 | ||
|
5b59011a89 | ||
|
7548347421 | ||
|
43c4cf500e | ||
|
aecb6f5ddd | ||
|
6ac3d346b9 | ||
|
946f93471c | ||
|
f68ff945c1 | ||
|
9fa9dd5350 | ||
|
1c0cb0185c | ||
|
2a1338ca5b | ||
|
cb847f25ad | ||
|
44311d08a5 | ||
|
17003840d3 | ||
|
a4bebf91bc | ||
|
691215ca6f | ||
|
999d86ae3e | ||
|
a4c2fb92e7 | ||
|
66d3a9e00d | ||
|
e418783ea9 | ||
|
36ddd84c85 | ||
|
f7a5b3ae90 | ||
|
3afe853517 | ||
|
853ebd5e36 | ||
|
f820b8cfcb | ||
|
062fd5a7f4 | ||
|
70bf874f4a | ||
|
bf9d847516 | ||
|
d45cae096b | ||
|
717b932e01 | ||
|
da11cb320c | ||
|
4bf07e75ac | ||
|
3b52ef3c09 | ||
|
ac809d18cc | ||
|
85019e5493 | ||
|
95c1a264ee | ||
|
32313ef02e | ||
|
ed9df06b38 | ||
|
b9086f37cf | ||
|
d3ed551203 | ||
|
1b0e9b49ef | ||
|
8797434ae7 | ||
|
25c87f3b96 | ||
|
26c63a43e0 | ||
|
3217fc03f8 | ||
|
b7b63c4460 | ||
|
7d8388af28 | ||
|
a53d7f1e01 | ||
|
c019f1b3a7 | ||
|
594f6056da | ||
|
113e9fd4ef | ||
|
e32d226151 | ||
|
a3246e506d | ||
|
ccb22827a2 | ||
|
94e154261b | ||
|
c23561f80c | ||
|
681650e9a6 | ||
|
31f0543da2 | ||
|
5e3daadf9c | ||
|
6b9a737756 | ||
|
1f9dcfb353 | ||
|
d77e97d31d | ||
|
69e7e533de | ||
|
840bb55ab3 | ||
|
5223d51475 | ||
|
8ffa04d4a8 | ||
|
4a351bc095 | ||
|
11d197ff24 | ||
|
12fbca468b | ||
|
79d5d16c1b | ||
|
e204bc6df0 | ||
|
6783b378d3 | ||
|
9d7681cb46 | ||
|
c6139e40db | ||
|
a4b95db01c | ||
|
91b9e13cd0 | ||
|
8d5def5f02 | ||
|
3e504fb511 | ||
|
492097d551 | ||
|
02ed2ae87d | ||
|
599b6865da | ||
|
7c0e776044 | ||
|
7ea0b32161 | ||
|
72b0267598 | ||
|
4a966d4cb8 | ||
|
70f1ffc65d | ||
|
fda0ea8b0e | ||
|
2443ded12b | ||
|
1db2d00d07 | ||
|
40b4884d87 | ||
|
823e7446f4 | ||
|
83b4c73b92 | ||
|
9844a55205 | ||
|
6e1bc89256 | ||
|
8ec0765bd7 | ||
|
6b893872fb | ||
|
ca8346b193 | ||
|
7115f0cdce | ||
|
74fc7bf77d | ||
|
002459b9b6 | ||
|
1ee39870da | ||
|
b8db28abd2 | ||
|
fafe7c140c | ||
|
047dc30487 | ||
|
7278647688 | ||
|
09fbb32b9a | ||
|
4e7ad8fd23 | ||
|
9a3ddfd0e4 | ||
|
e250ea5118 | ||
|
2689adc390 | ||
|
ba31204227 | ||
|
581b749e89 | ||
|
7562346357 | ||
|
e7aa53365e | ||
|
0b671fa2f9 | ||
|
8b2ee0b466 | ||
|
b686119a6f | ||
|
dcbd2f81e3 | ||
|
0fffa98e83 | ||
|
08ba389fd6 | ||
|
587271ff77 | ||
|
7062b3c49b | ||
|
b1cec40743 | ||
|
2d6fde2e5d | ||
|
3a3188efff | ||
|
7928624639 | ||
|
d61df6d84f | ||
|
c388d8ec9a | ||
|
ad02ad5327 | ||
|
af6ce5dca4 | ||
|
68dd8e223f | ||
|
d9fbd5194e | ||
|
0ba7112a9f | ||
|
0c262bc19d | ||
|
b03a46d25e | ||
|
69f58bc358 | ||
|
41c1aace80 | ||
|
423f05173c | ||
|
d82008ddcf | ||
|
19f46574cb | ||
|
713887cf08 | ||
|
a3cfcc71ec | ||
|
0f431500c0 | ||
|
9d03dab83d | ||
|
7e7c06597a | ||
|
83157412e7 | ||
|
461dac06d5 | ||
|
d60cfb7e44 | ||
|
20c8dde9ef | ||
|
594640c10c | ||
|
1f7a251925 | ||
|
7ab251f755 | ||
|
3d9779cc63 | ||
|
b234465219 | ||
|
51a8b508ac | ||
|
b86529655f | ||
|
4434daa5ea | ||
|
dec17e8d85 | ||
|
c6a63b55a9 | ||
|
be5a91ce2e | ||
|
c4445074ad | ||
|
b1146b9f55 | ||
|
78d67cd364 | ||
|
2fbec7ab6a | ||
|
fbfd17351f | ||
|
3c914f741d | ||
|
ad14df6a7a | ||
|
cf43257006 | ||
|
06076fc908 | ||
|
d5c9a518d8 | ||
|
d5eee7f2d1 | ||
|
ab51dcbbc1 | ||
|
8982dc0fe4 | ||
|
8e3d5f4094 | ||
|
77006fd0c9 | ||
|
bab19efcfe | ||
|
f1bccaae2e | ||
|
b5b59b10d7 | ||
|
91fba0a971 | ||
|
a839c2d6f1 | ||
|
3cf07839cc | ||
|
112dc10dba | ||
|
b086c237ee | ||
|
ffd20d73c8 | ||
|
861f7a0d34 | ||
|
7a7ad75ef7 | ||
|
2f513526d3 | ||
|
566b804b65 | ||
|
5a08853e7a | ||
|
dd0473e7cf | ||
|
067e32923c | ||
|
f3a241fcc1 | ||
|
4ad2c50143 | ||
|
194b5ae3dc | ||
|
0633daaf8e | ||
|
7dd06c5e87 | ||
|
8d30ce1588 | ||
|
9187692615 | ||
|
99621de555 | ||
|
e9d5b75298 | ||
|
994049da8c | ||
|
84d01b5358 | ||
|
ff914d380d | ||
|
9d04c6747c | ||
|
8e0a5665f0 | ||
|
dc7b41e7da | ||
|
b1fc1907ab | ||
|
c1a25d34d3 | ||
|
5a9113e445 | ||
|
cc4b8d4c9e | ||
|
890d4b8d50 | ||
|
9afc138e25 | ||
|
76737b3b99 | ||
|
5b570c21fb | ||
|
31ec7a2ecf | ||
|
2e32aa6707 | ||
|
5feba82ffb | ||
|
1be8fdd1f0 | ||
|
4848e5be14 | ||
|
c4b6df2637 | ||
|
874bb6f1cf | ||
|
7842d075ad | ||
|
1bd7831c85 | ||
|
5816debef5 | ||
|
97c44a37ae | ||
|
3c060a4e8c | ||
|
fb7302c361 | ||
|
4670c94241 | ||
|
864a6cd2ae | ||
|
815f3bd638 | ||
|
c2e4e5030b | ||
|
086e6bc970 | ||
|
58fb2c2191 | ||
|
8268b12a77 | ||
|
290db4847a | ||
|
1e146e711c | ||
|
f88322cdff | ||
|
7a96cbe228 | ||
|
7143245c37 | ||
|
885bf947b5 | ||
|
e29a91e7f7 | ||
|
7878595460 | ||
|
7a1cdd2c3d | ||
|
9bab254710 | ||
|
b32f936365 | ||
|
4809e1d0d0 | ||
|
529daa3c6f | ||
|
37314ec930 | ||
|
b38d8805a6 | ||
|
b2cfe285b4 | ||
|
36cab0c1af | ||
|
793840f762 | ||
|
a784941932 | ||
|
ae14d6a9fe | ||
|
f1bd41fada | ||
|
2760e588ac | ||
|
c60ad23618 | ||
|
9b3691b2bc | ||
|
1c64a31ac8 | ||
|
290da6f1fe | ||
|
ea46cb08a4 | ||
|
8ec98064a1 | ||
|
b8f0d0fbcd | ||
|
911a27b20a | ||
|
a586dd0d44 | ||
|
79756e4ac4 | ||
|
354bba3179 | ||
|
b0dfe68ebe | ||
|
fa513a9ac9 | ||
|
716e98af5b | ||
|
6437b4b5fb | ||
|
ce5c2c2f21 | ||
|
4eb1ccb532 | ||
|
b0e1e2e837 | ||
|
ee66b7e5cc | ||
|
b82e57c281 | ||
|
aa4f267a29 | ||
|
ef3ae2a38e | ||
|
78c29804a1 | ||
|
cce68bcd98 | ||
|
a27f735eb8 | ||
|
abe4997d44 | ||
|
1c7881eda3 | ||
|
5be39fe60e | ||
|
02c1917004 | ||
|
20164c647b | ||
|
8290afc737 | ||
|
95681aa86e | ||
|
c7431f1b19 | ||
|
6106f5cd43 | ||
|
b6611ec321 | ||
|
9636e58bec | ||
|
ca602ed936 | ||
|
fb6b5d0887 | ||
|
5faf8fa050 | ||
|
ffb24f71c6 | ||
|
a9ecbec6f1 | ||
|
5f8bc74f0b | ||
|
8b80f15ee7 | ||
|
0d24204b83 | ||
|
ac21f563de | ||
|
55c71e2b80 | ||
|
c150bd1341 | ||
|
ce7eb0b91a | ||
|
b24ed83a8b | ||
|
0d9a8d76fc | ||
|
187bdbc20f | ||
|
bb3183a0b8 | ||
|
100394b29c | ||
|
7345c19d5d | ||
|
801e0aa907 | ||
|
0910c2749b | ||
|
300cb83ab8 | ||
|
ca6fa4605b | ||
|
1bebb179ce | ||
|
8632da0a22 | ||
|
a864db0182 | ||
|
fcf41b990e | ||
|
4c01dd0b9b | ||
|
bea2c4d5c6 | ||
|
f442e81414 | ||
|
18f2bea9b0 | ||
|
f6bee9e6e4 | ||
|
d9a52ec795 | ||
|
4b074a6d8a | ||
|
791a94322f | ||
|
3cd063b153 | ||
|
9a64fadb56 | ||
|
e178bc9f20 | ||
|
4ef2d9c3ce | ||
|
39d0356340 | ||
|
1e05cdbe62 | ||
|
e27301013d | ||
|
f477871bfa | ||
|
fac8a62037 | ||
|
ec7b845c67 | ||
|
ce07b2ff50 | ||
|
c2bd6c8540 | ||
|
1aa2b08db7 | ||
|
14c94e6d14 | ||
|
8f54afe7b3 | ||
|
9465a077b1 | ||
|
168e530dab | ||
|
159fa74a3f | ||
|
5bb74ec465 | ||
|
949bcdbc2a | ||
|
ceaa994eba | ||
|
489c7280ec | ||
|
c5fc8aeb19 | ||
|
d5e469d6c6 | ||
|
bc99188c80 | ||
|
6176afbf2c | ||
|
1cc9d4f04b | ||
|
62217f6a76 | ||
|
e4facd9b7a | ||
|
ba8b4a4bc9 | ||
|
82a0c8cf11 | ||
|
bdc9478aa8 | ||
|
8bd4dc1fbd | ||
|
dbc7536724 | ||
|
062109a5bc | ||
|
09a3df54c4 | ||
|
554b5a663c | ||
|
0cb169afab | ||
|
61ce888e47 | ||
|
e8f1657821 | ||
|
f089d57d7d | ||
|
2de2b026be | ||
|
9d3dd72b80 | ||
|
c1da8d29fc | ||
|
1fa9846f56 | ||
|
ba6fc820b7 | ||
|
16ecdd2b57 | ||
|
2896134f67 | ||
|
97a34cfcbc | ||
|
ca75148e86 | ||
|
834873638d | ||
|
4618845a97 | ||
|
5db6e28534 | ||
|
fb86669872 | ||
|
013f24ba80 | ||
|
96eae65da1 | ||
|
a3ac1993e9 | ||
|
5ca0dcdf8d | ||
|
8a66233148 | ||
|
377abc84c7 | ||
|
ede4ccdf30 | ||
|
7e06957cc2 | ||
|
ad7da6edfa | ||
|
62868f64b3 | ||
|
09be632cd9 | ||
|
803718108e | ||
|
f8020b9c10 | ||
|
11210e3a23 | ||
|
c48969996e | ||
|
c7ccc9eaf6 | ||
|
e4869e1a4b | ||
|
dd84195db6 | ||
|
d856ae81e0 | ||
|
1494ba872a | ||
|
685e6e4c7f | ||
|
085cec0c8e | ||
|
288ac05508 | ||
|
c777aa779e | ||
|
d55bfec80c | ||
|
e88956a16f | ||
|
aab30279ac | ||
|
2f9247abfb | ||
|
0d58408c58 | ||
|
3f73d2e3ce | ||
|
896914e189 | ||
|
4f587c5508 | ||
|
a9cb0a69a2 | ||
|
6baca35422 | ||
|
b4c5e75705 | ||
|
d859c3a1eb | ||
|
b7f5a3dfad | ||
|
8cd1aac99c | ||
|
f0f6b5f7e2 | ||
|
acbc22e79f | ||
|
134bef69e0 | ||
|
477c3601fc | ||
|
f0dabb9a83 | ||
|
7407adefb9 | ||
|
91bfa84c12 | ||
|
7473af6d9d | ||
|
4292644870 | ||
|
2e11ee5b32 | ||
|
0c8398b3db | ||
|
52e738b586 | ||
|
c0afc295a5 | ||
|
ed7c56f12c | ||
|
be7075beb8 | ||
|
f9a6aab475 | ||
|
cc441b33d8 | ||
|
b8d97cc414 | ||
|
eab39a5f99 | ||
|
5ffab53364 | ||
|
b24421e18c | ||
|
487a747544 | ||
|
ef2f0cc998 | ||
|
f03d9bb648 | ||
|
0f234c2d7b | ||
|
f4171e3cf5 | ||
|
9c831205e0 | ||
|
a429a6af22 | ||
|
73484df323 | ||
|
819e11f680 | ||
|
9b2c8e773d | ||
|
607fa42672 | ||
|
0ce42ba694 | ||
|
463bf15ba8 | ||
|
835a851dd6 | ||
|
1b9422e828 | ||
|
2ef2b5f797 | ||
|
7be554dc62 | ||
|
b7a850f6da | ||
|
b7d421fdcd | ||
|
93b4ac8542 | ||
|
892dd6d37f | ||
|
6ae05006c6 | ||
|
f0167925ec | ||
|
afead02c46 | ||
|
baab52f440 | ||
|
1c04bf7cdb | ||
|
4b058cc837 | ||
|
5bc2949a49 | ||
|
1a2db03c26 | ||
|
17a26be1a9 | ||
|
3ec44fd56a | ||
|
ee6c5b231f | ||
|
255fc98d44 | ||
|
4f0b41a791 | ||
|
e4338979a6 | ||
|
85f0ac8ed3 | ||
|
e25ac201d2 | ||
|
38b871aa8e | ||
|
0b18bb63e8 | ||
|
afbde420a7 | ||
|
244563720a | ||
|
886c091901 | ||
|
11942b2c74 | ||
|
b2848ea718 | ||
|
b9ee892b20 | ||
|
af457255ca | ||
|
0397d6d814 | ||
|
4d61ba1cc2 | ||
|
23fe515eac | ||
|
75bf8de36a | ||
|
d24cf98785 | ||
|
896253bcbc | ||
|
32188352a5 | ||
|
b9b2715b10 | ||
|
7c1d7083ab | ||
|
bed0d03b9c | ||
|
0555132740 | ||
|
9342f983d5 | ||
|
52082359f0 | ||
|
59cc0037fc | ||
|
eb0291ed87 | ||
|
03059786b6 | ||
|
1475f9a2ec | ||
|
8daf49a505 | ||
|
73468e5424 | ||
|
46ce583de3 | ||
|
f066bc534f | ||
|
709101cf0f | ||
|
0d969a7a3c | ||
|
0f988e3e9f | ||
|
9bcae578d7 | ||
|
45c8dcb36b | ||
|
5655c602c7 | ||
|
af38476a14 | ||
|
2dda42cf9f | ||
|
e4b9557c8e | ||
|
6bdf827c65 | ||
|
ddffe33bcd | ||
|
f193e164ff | ||
|
af2da64c62 | ||
|
1433f35186 | ||
|
507339907c | ||
|
1fb4046d52 | ||
|
c71d4048af | ||
|
877e561a45 | ||
|
1765ddf0f8 | ||
|
5ad314607d | ||
|
b154d97438 | ||
|
07624e529f | ||
|
a772ab42d0 | ||
|
b13b0f86eb |
4
.gitignore
vendored
4
.gitignore
vendored
@ -15,4 +15,6 @@ tmp
|
||||
public
|
||||
build
|
||||
.sass-cache
|
||||
wiki
|
||||
wiki
|
||||
.gem_rbs_collection/
|
||||
rbs_collection.lock.yaml
|
@ -8,7 +8,7 @@ image:
|
||||
name: docker/compose:latest
|
||||
|
||||
variables:
|
||||
# this variable enables caching withing docker-in-docker
|
||||
# this variable enables caching within docker-in-docker
|
||||
# https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#use-docker-in-docker-workflow-with-docker-executor
|
||||
MOUNT_POINT: /builds/$CI_PROJECT_PATH/vendor
|
||||
# bundler-specific
|
||||
@ -38,33 +38,40 @@ cache:
|
||||
paths:
|
||||
- vendor
|
||||
|
||||
lint rubocop code:
|
||||
image: "ruby:3.4"
|
||||
variables:
|
||||
BUNDLE_WITHOUT: test:coverage:assorted
|
||||
before_script:
|
||||
- bundle install
|
||||
script:
|
||||
- bundle exec rake rubocop
|
||||
lint rubocop wiki:
|
||||
image: "ruby:3.4"
|
||||
rules:
|
||||
- if: $CI_PIPELINE_SOURCE == "schedule"
|
||||
variables:
|
||||
BUNDLE_ONLY: lint
|
||||
before_script:
|
||||
- git clone https://gitlab.com/os85/httpx.wiki.git
|
||||
- bundle install
|
||||
- |
|
||||
cat > .rubocop-wiki.yml << FILE
|
||||
require:
|
||||
- rubocop-md
|
||||
|
||||
AllCops:
|
||||
TargetRubyVersion: 3.4
|
||||
DisabledByDefault: true
|
||||
FILE
|
||||
script:
|
||||
- bundle exec rubocop httpx.wiki --config .rubocop-wiki.yml
|
||||
|
||||
test jruby:
|
||||
<<: *test_settings
|
||||
script:
|
||||
./spec.sh jruby 9.0.0.0
|
||||
allow_failure: true
|
||||
test ruby 2/3:
|
||||
<<: *test_settings
|
||||
script:
|
||||
./spec.sh ruby 2.3
|
||||
test ruby 2/4:
|
||||
<<: *test_settings
|
||||
only:
|
||||
- master
|
||||
script:
|
||||
./spec.sh ruby 2.4
|
||||
test ruby 2/5:
|
||||
<<: *test_settings
|
||||
only:
|
||||
- master
|
||||
script:
|
||||
./spec.sh ruby 2.5
|
||||
test ruby 2/6:
|
||||
<<: *test_settings
|
||||
only:
|
||||
- master
|
||||
script:
|
||||
./spec.sh ruby 2.6
|
||||
test ruby 2/7:
|
||||
<<: *test_settings
|
||||
script:
|
||||
@ -83,20 +90,28 @@ test ruby 3/1:
|
||||
./spec.sh ruby 3.1
|
||||
test ruby 3/2:
|
||||
<<: *test_settings
|
||||
<<: *yjit_matrix
|
||||
script:
|
||||
./spec.sh ruby 3.2
|
||||
test ruby 3/3:
|
||||
<<: *test_settings
|
||||
script:
|
||||
./spec.sh ruby 3.3
|
||||
test ruby 3/4:
|
||||
<<: *test_settings
|
||||
<<: *yjit_matrix
|
||||
script:
|
||||
./spec.sh ruby 3.4
|
||||
test truffleruby:
|
||||
<<: *test_settings
|
||||
script:
|
||||
./spec.sh truffleruby latest
|
||||
allow_failure: true
|
||||
regression tests:
|
||||
image: "ruby:3.2"
|
||||
image: "ruby:3.4"
|
||||
variables:
|
||||
BUNDLE_WITHOUT: assorted
|
||||
BUNDLE_WITHOUT: lint:assorted
|
||||
CI: 1
|
||||
COVERAGE_KEY: "$RUBY_ENGINE-$RUBY_VERSION-regression-tests"
|
||||
COVERAGE_KEY: "ruby-3.4-regression-tests"
|
||||
artifacts:
|
||||
paths:
|
||||
- coverage/
|
||||
@ -108,12 +123,12 @@ regression tests:
|
||||
- bundle exec rake regression_tests
|
||||
|
||||
coverage:
|
||||
coverage: '/\(\d+.\d+\%\) covered/'
|
||||
coverage: '/Coverage: \d+.\d+\%/'
|
||||
stage: prepare
|
||||
variables:
|
||||
BUNDLE_WITHOUT: test:assorted
|
||||
BUNDLE_WITHOUT: lint:test:assorted
|
||||
|
||||
image: "ruby:3.2"
|
||||
image: "ruby:3.4"
|
||||
script:
|
||||
- gem install simplecov --no-doc
|
||||
# this is a workaround, because simplecov doesn't support relative paths.
|
||||
@ -135,7 +150,7 @@ pages:
|
||||
stage: deploy
|
||||
needs:
|
||||
- coverage
|
||||
image: "ruby:3.2"
|
||||
image: "ruby:3.4"
|
||||
before_script:
|
||||
- gem install hanna-nouveau
|
||||
script:
|
||||
|
18
.rubocop.yml
18
.rubocop.yml
@ -1,6 +1,8 @@
|
||||
inherit_from: .rubocop_todo.yml
|
||||
|
||||
require: rubocop-performance
|
||||
require:
|
||||
- rubocop-performance
|
||||
- rubocop-md
|
||||
|
||||
AllCops:
|
||||
NewCops: enable
|
||||
@ -23,9 +25,10 @@ AllCops:
|
||||
- 'vendor/**/*'
|
||||
- 'www/**/*'
|
||||
- 'lib/httpx/extensions.rb'
|
||||
- 'lib/httpx/punycode.rb'
|
||||
# Do not lint ffi block, for openssl parity
|
||||
- 'test/extensions/response_pattern_match.rb'
|
||||
# Old release notes
|
||||
- !ruby/regexp /doc/release_notes/0_.*.md/
|
||||
|
||||
Metrics/ClassLength:
|
||||
Enabled: false
|
||||
@ -89,6 +92,10 @@ Style/GlobalVars:
|
||||
Exclude:
|
||||
- lib/httpx/plugins/internal_telemetry.rb
|
||||
|
||||
Style/CommentedKeyword:
|
||||
Exclude:
|
||||
- integration_tests/faraday_datadog_test.rb
|
||||
|
||||
Style/RedundantBegin:
|
||||
Enabled: false
|
||||
|
||||
@ -118,6 +125,9 @@ Style/HashSyntax:
|
||||
Style/AndOr:
|
||||
Enabled: False
|
||||
|
||||
Style/ArgumentsForwarding:
|
||||
Enabled: False
|
||||
|
||||
Naming/MethodParameterName:
|
||||
Enabled: false
|
||||
|
||||
@ -170,3 +180,7 @@ Performance/StringIdentifierArgument:
|
||||
|
||||
Style/Lambda:
|
||||
Enabled: false
|
||||
|
||||
Style/TrivialAccessors:
|
||||
Exclude:
|
||||
- 'test/pool_test.rb'
|
@ -11,7 +11,7 @@ Metrics/ModuleLength:
|
||||
Max: 325
|
||||
|
||||
Metrics/BlockLength:
|
||||
Max: 200
|
||||
Max: 500
|
||||
|
||||
Metrics/BlockNesting:
|
||||
Enabled: False
|
||||
@ -38,4 +38,4 @@ Naming/AccessorMethodName:
|
||||
Enabled: false
|
||||
|
||||
Performance/MethodObjectAsBlock:
|
||||
Enabled: false
|
||||
Enabled: false
|
||||
|
@ -6,5 +6,5 @@ SimpleCov.start do
|
||||
add_filter "/integration_tests/"
|
||||
add_filter "/regression_tests/"
|
||||
add_filter "/lib/httpx/plugins/internal_telemetry.rb"
|
||||
add_filter "/lib/httpx/punycode.rb"
|
||||
add_filter "/lib/httpx/base64.rb"
|
||||
end
|
||||
|
@ -14,7 +14,7 @@ require "httpx"
|
||||
|
||||
response = HTTPX.get("https://google.com/")
|
||||
# Will print response.body
|
||||
puts response.to_s
|
||||
puts response
|
||||
```
|
||||
|
||||
## Multiple HTTP Requests
|
||||
@ -24,7 +24,7 @@ require "httpx"
|
||||
|
||||
uri = "https://google.com"
|
||||
|
||||
responses = HTTPX.new(uri, uri)
|
||||
responses = HTTPX.get(uri, uri)
|
||||
|
||||
# OR
|
||||
HTTPX.wrap do |client|
|
||||
@ -37,17 +37,17 @@ end
|
||||
## Headers
|
||||
|
||||
```ruby
|
||||
HTTPX.headers("user-agent" => "My Ruby Script").get("https://google.com")
|
||||
HTTPX.with(headers: { "user-agent" => "My Ruby Script" }).get("https://google.com")
|
||||
```
|
||||
|
||||
## HTTP Methods
|
||||
|
||||
```ruby
|
||||
HTTP.get("https://myapi.com/users/1")
|
||||
HTTP.post("https://myapi.com/users")
|
||||
HTTP.patch("https://myapi.com/users/1")
|
||||
HTTP.put("https://myapi.com/users/1")
|
||||
HTTP.delete("https://myapi.com/users/1")
|
||||
HTTPX.get("https://myapi.com/users/1")
|
||||
HTTPX.post("https://myapi.com/users")
|
||||
HTTPX.patch("https://myapi.com/users/1")
|
||||
HTTPX.put("https://myapi.com/users/1")
|
||||
HTTPX.delete("https://myapi.com/users/1")
|
||||
```
|
||||
|
||||
## HTTP Authentication
|
||||
@ -56,13 +56,13 @@ HTTP.delete("https://myapi.com/users/1")
|
||||
require "httpx"
|
||||
|
||||
# Basic Auth
|
||||
response = HTTPX.plugin(:basic_authentication).basic_authentication("username", "password").get("https://google.com")
|
||||
response = HTTPX.plugin(:basic_auth).basic_auth("username", "password").get("https://google.com")
|
||||
|
||||
# Digest Auth
|
||||
response = HTTPX.plugin(:digest_authentication).digest_authentication("username", "password").get("https://google.com")
|
||||
response = HTTPX.plugin(:digest_auth).digest_auth("username", "password").get("https://google.com")
|
||||
|
||||
# Token Auth
|
||||
response = HTTPX.plugin(:authentication).authentication("eyrandomtoken").get("https://google.com")
|
||||
# Bearer Token Auth
|
||||
response = HTTPX.plugin(:auth).authorization("eyrandomtoken").get("https://google.com")
|
||||
```
|
||||
|
||||
|
||||
@ -74,31 +74,27 @@ require "httpx"
|
||||
response = HTTPX.get("https://google.com/")
|
||||
response.status # => 301
|
||||
response.headers["location"] #=> "https://www.google.com/"
|
||||
response.body # => "<HTML><HEAD><meta http-equiv=\"content-type\" ....
|
||||
response["cache-control"] # => public, max-age=2592000
|
||||
response.headers["cache-control"] #=> public, max-age=2592000
|
||||
response.body.to_s #=> "<HTML><HEAD><meta http-equiv=\"content-type\" ....
|
||||
```
|
||||
|
||||
## POST form request
|
||||
## POST `application/x-www-form-urlencoded` request
|
||||
|
||||
```ruby
|
||||
require "httpx"
|
||||
uri = URI.parse("http://example.com/search")
|
||||
|
||||
# Shortcut
|
||||
response = HTTPX.post(uri, form: {"q" => "My query", "per_page" => "50"})
|
||||
response = HTTPX.post(uri, form: { "q" => "My query", "per_page" => "50" })
|
||||
```
|
||||
|
||||
## File upload - input type="file" style
|
||||
## File `multipart/form-data` upload - input type="file" style
|
||||
|
||||
```ruby
|
||||
require "httpx"
|
||||
|
||||
# uses http_form_data API: https://github.com/httprb/form_data
|
||||
|
||||
path = "/path/to/your/testfile.txt"
|
||||
HTTPX.plugin(:multipart).post("http://something.com/uploads", form: {
|
||||
name: HTTP::FormData::File.new(path)
|
||||
})
|
||||
file_to_upload = Pathname.new("/path/to/your/testfile.txt")
|
||||
HTTPX.plugin(:multipart).post("http://something.com/uploads", form: { name: file_to_upload })
|
||||
```
|
||||
|
||||
## SSL/HTTPS request
|
||||
@ -108,8 +104,7 @@ Update: There are some good reasons why this code example is bad. It introduces
|
||||
```ruby
|
||||
require "httpx"
|
||||
|
||||
|
||||
response = HTTPX.with(ssl: { verify_mode: OpenSSL::SSL::VERIFY_NONE }).get("https://secure.com/")
|
||||
response = HTTPX.with(ssl: { verify_mode: OpenSSL::SSL::VERIFY_NONE }).get("https://secure.com/")
|
||||
```
|
||||
|
||||
## SSL/HTTPS request with PEM certificate
|
||||
@ -118,11 +113,11 @@ response = HTTPX.with(ssl: { verify_mode: OpenSSL::SSL::VERIFY_NONE }).get("htt
|
||||
require "httpx"
|
||||
|
||||
pem = File.read("/path/to/my.pem")
|
||||
HTTPX.with(ssl: {
|
||||
HTTPX.with_ssl(
|
||||
cert: OpenSSL::X509::Certificate.new(pem),
|
||||
key: OpenSSL::PKey::RSA.new(pem),
|
||||
verify_mode: OpenSSL::SSL::VERIFY_PEER
|
||||
}).get("https://secure.com/")
|
||||
verify_mode: OpenSSL::SSL::VERIFY_PEER,
|
||||
).get("https://secure.com/")
|
||||
```
|
||||
|
||||
## Cookies
|
||||
@ -132,8 +127,7 @@ require "httpx"
|
||||
|
||||
HTTPX.plugin(:cookies).wrap do |client|
|
||||
session_response = client.get("https://translate.google.com/")
|
||||
response_cookies = session_response.cookie_jar
|
||||
response = client.cookies(response_cookies).get("https://translate.google.com/#auto|en|Pardon")
|
||||
response = client.get("https://translate.google.com/#auto|en|Pardon")
|
||||
puts response
|
||||
end
|
||||
```
|
||||
@ -143,9 +137,14 @@ end
|
||||
```ruby
|
||||
require "httpx"
|
||||
|
||||
response = HTTPX.plugin(:compression).get("https://www.google.com")
|
||||
puts response.headers["content-encoding"] #=> "gzip"
|
||||
response = HTTPX.get("https://www.google.com")
|
||||
puts response.headers["content-encoding"] #=> "gzip"
|
||||
puts response #=> uncompressed payload
|
||||
|
||||
# uncompressed request payload
|
||||
HTTPX.post("https://myapi.com/users", body: super_large_text_payload)
|
||||
# gzip-compressed request payload
|
||||
HTTPX.post("https://myapi.com/users", headers: { "content-encoding" => %w[gzip] }, body: super_large_text_payload)
|
||||
```
|
||||
|
||||
## Proxy
|
||||
@ -171,11 +170,10 @@ HTTPX.get("https://google.com")
|
||||
require "httpx"
|
||||
HTTPX.with(resolver_class: :https).get("https://google.com")
|
||||
|
||||
|
||||
# by default it uses cloudflare DoH server.
|
||||
# This example switches the resolver to Quad9's DoH server
|
||||
|
||||
HTTPX.with(resolver_class: :https, resolver_options: {uri: "https://9.9.9.9/dns-query"}).get("https://google.com")
|
||||
HTTPX.with(resolver_class: :https, resolver_options: { uri: "https://9.9.9.9/dns-query" }).get("https://google.com")
|
||||
```
|
||||
|
||||
## Follow Redirects
|
||||
@ -183,7 +181,9 @@ HTTPX.with(resolver_class: :https, resolver_options: {uri: "https://9.9.9.9/dns-
|
||||
```ruby
|
||||
require "httpx"
|
||||
|
||||
HTTPX.plugin(:follow_redirects).with(follow_insecure_redirects: false, max_redirects: 4).get("https://www.google.com")
|
||||
HTTPX.plugin(:follow_redirects)
|
||||
.with(follow_insecure_redirects: false, max_redirects: 4)
|
||||
.get("https://www.google.com")
|
||||
```
|
||||
|
||||
## Timeouts
|
||||
@ -191,12 +191,12 @@ HTTPX.plugin(:follow_redirects).with(follow_insecure_redirects: false, max_redir
|
||||
```ruby
|
||||
require "httpx"
|
||||
|
||||
HTTPX.with(timeout: {connect_timeout: 10, operation_timeout: 3}).get("https://google.com")
|
||||
# full E2E request/response timeout, 10 sec to connect to peer
|
||||
HTTPX.with(timeout: { connect_timeout: 10, request_timeout: 3 }).get("https://google.com")
|
||||
```
|
||||
|
||||
## Retries
|
||||
|
||||
|
||||
```ruby
|
||||
require "httpx"
|
||||
HTTPX.plugin(:retries).max_retries(5).get("https://www.google.com")
|
||||
@ -214,4 +214,3 @@ HTTPX.get("https://google.com") #=> udp://10.0.1.2:53...
|
||||
|
||||
HTTPX.with(debug_level: 1, debug: $stderr).get("https://google.com")
|
||||
```
|
||||
|
||||
|
116
Gemfile
116
Gemfile
@ -5,56 +5,42 @@ ruby RUBY_VERSION
|
||||
source "https://rubygems.org"
|
||||
gemspec
|
||||
|
||||
if RUBY_VERSION < "2.2.0"
|
||||
gem "rake", "~> 12.3"
|
||||
else
|
||||
gem "rake", "~> 13.0"
|
||||
end
|
||||
gem "rake", "~> 13.0"
|
||||
|
||||
group :test do
|
||||
if RUBY_VERSION >= "3.2.0"
|
||||
gem "datadog", "~> 2.0"
|
||||
else
|
||||
gem "ddtrace"
|
||||
end
|
||||
gem "http-form_data", ">= 2.0.0"
|
||||
gem "minitest"
|
||||
gem "minitest-proveit"
|
||||
gem "ruby-ntlm"
|
||||
gem "sentry-ruby" if RUBY_VERSION >= "2.4.0"
|
||||
gem "spy"
|
||||
if RUBY_VERSION < "2.3.0"
|
||||
gem "webmock", "< 3.15.0"
|
||||
elsif RUBY_VERSION < "2.4.0"
|
||||
gem "webmock", "< 3.17.0"
|
||||
else
|
||||
gem "webmock"
|
||||
end
|
||||
gem "nokogiri"
|
||||
gem "ruby-ntlm"
|
||||
gem "sentry-ruby"
|
||||
gem "spy"
|
||||
gem "webmock"
|
||||
gem "websocket-driver"
|
||||
|
||||
gem "net-ssh", "~> 4.2.0" if RUBY_VERSION < "2.2.0"
|
||||
|
||||
gem "ddtrace"
|
||||
|
||||
platform :mri do
|
||||
if RUBY_VERSION >= "2.3.0"
|
||||
if RUBY_VERSION < "2.5.0"
|
||||
gem "google-protobuf", "< 3.19.2"
|
||||
elsif RUBY_VERSION < "2.7.0"
|
||||
gem "google-protobuf", "< 3.22.0"
|
||||
end
|
||||
if RUBY_VERSION <= "2.6.0"
|
||||
gem "grpc", "< 1.49.0"
|
||||
else
|
||||
gem "grpc"
|
||||
end
|
||||
gem "logging"
|
||||
gem "marcel", require: false
|
||||
gem "mimemagic", require: false
|
||||
gem "ruby-filemagic", require: false
|
||||
end
|
||||
gem "grpc"
|
||||
gem "logging"
|
||||
gem "marcel", require: false
|
||||
gem "mimemagic", require: false
|
||||
gem "ruby-filemagic", require: false
|
||||
|
||||
if RUBY_VERSION >= "3.0.0"
|
||||
gem "multi_json", require: false
|
||||
gem "oj", require: false
|
||||
gem "rbs"
|
||||
gem "yajl-ruby", require: false
|
||||
end
|
||||
|
||||
if RUBY_VERSION >= "3.4.0"
|
||||
# TODO: remove this once websocket-driver-ruby declares this as dependency
|
||||
gem "base64"
|
||||
end
|
||||
end
|
||||
|
||||
platform :mri, :truffleruby do
|
||||
@ -65,63 +51,39 @@ group :test do
|
||||
gem "net-ssh-gateway"
|
||||
end
|
||||
|
||||
platform :mri_21 do
|
||||
gem "rbnacl"
|
||||
end
|
||||
|
||||
platform :mri_23 do
|
||||
if RUBY_VERSION >= "2.3.0"
|
||||
gem "openssl", "< 2.0.6" # force usage of openssl version we patch against
|
||||
end
|
||||
gem "msgpack", "<= 1.3.3"
|
||||
end
|
||||
|
||||
platform :jruby do
|
||||
gem "jruby-openssl" # , git: "https://github.com/jruby/jruby-openssl.git", branch: "master"
|
||||
gem "ruby-debug"
|
||||
end
|
||||
|
||||
gem "aws-sdk-s3"
|
||||
gem "faraday"
|
||||
gem "idnx" if RUBY_VERSION >= "2.4.0"
|
||||
gem "multipart-post", "< 2.2.0" if RUBY_VERSION < "2.3.0"
|
||||
gem "faraday-multipart"
|
||||
gem "idnx"
|
||||
gem "oga"
|
||||
|
||||
if RUBY_VERSION >= "3.0.0"
|
||||
gem "rbs"
|
||||
gem "rubocop"
|
||||
gem "rubocop-performance"
|
||||
gem "webrick"
|
||||
end
|
||||
gem "webrick" if RUBY_VERSION >= "3.0.0"
|
||||
# https://github.com/TwP/logging/issues/247
|
||||
gem "syslog" if RUBY_VERSION >= "3.3.0"
|
||||
# https://github.com/ffi/ffi/issues/1103
|
||||
# ruby 2.7 only, it seems
|
||||
gem "ffi", "< 1.17.0" if Gem::VERSION < "3.3.22"
|
||||
end
|
||||
|
||||
group :lint do
|
||||
gem "rubocop", "~> 1.59.0"
|
||||
gem "rubocop-md"
|
||||
gem "rubocop-performance", "~> 1.19.0"
|
||||
end
|
||||
|
||||
group :coverage do
|
||||
if RUBY_VERSION < "2.2.0"
|
||||
gem "simplecov", "< 0.11.0"
|
||||
elsif RUBY_VERSION < "2.3"
|
||||
gem "simplecov", "< 0.11.0"
|
||||
elsif RUBY_VERSION < "2.4"
|
||||
gem "simplecov", "< 0.19.0"
|
||||
elsif RUBY_VERSION < "2.5"
|
||||
gem "simplecov", "< 0.21.0"
|
||||
else
|
||||
gem "simplecov"
|
||||
end
|
||||
gem "simplecov"
|
||||
end
|
||||
|
||||
group :assorted do
|
||||
if RUBY_VERSION < "2.2.0"
|
||||
gem "pry", "~> 0.12.2"
|
||||
else
|
||||
gem "pry"
|
||||
end
|
||||
gem "pry"
|
||||
|
||||
platform :mri do
|
||||
if RUBY_VERSION < "2.2.0"
|
||||
gem "pry-byebug", "~> 3.4.3"
|
||||
else
|
||||
gem "debug" if RUBY_VERSION >= "3.1.0"
|
||||
gem "pry-byebug"
|
||||
end
|
||||
gem "debug" if RUBY_VERSION >= "3.1.0"
|
||||
gem "pry-byebug"
|
||||
end
|
||||
end
|
||||
|
48
LICENSE.txt
48
LICENSE.txt
@ -189,51 +189,3 @@
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
|
||||
* lib/httpx/domain_name.rb
|
||||
|
||||
This file is derived from the implementation of punycode available at
|
||||
here:
|
||||
|
||||
https://www.verisign.com/en_US/channel-resources/domain-registry-products/idn-sdks/index.xhtml
|
||||
|
||||
Copyright (C) 2000-2002 Verisign Inc., All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or
|
||||
without modification, are permitted provided that the following
|
||||
conditions are met:
|
||||
|
||||
1) Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2) Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
3) Neither the name of the VeriSign Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
This software is licensed under the BSD open source license. For more
|
||||
information visit www.opensource.org.
|
||||
|
||||
Authors:
|
||||
John Colosi (VeriSign)
|
||||
Srikanth Veeramachaneni (VeriSign)
|
||||
Nagesh Chigurupati (Verisign)
|
||||
Praveen Srinivasan(Verisign)
|
38
README.md
38
README.md
@ -19,7 +19,7 @@ And also:
|
||||
|
||||
* Compression (gzip, deflate, brotli)
|
||||
* Streaming Requests
|
||||
* Authentication (Basic Auth, Digest Auth, NTLM)
|
||||
* Auth (Basic Auth, Digest Auth, NTLM)
|
||||
* Expect 100-continue
|
||||
* Multipart Requests
|
||||
* Advanced Cookie handling
|
||||
@ -46,7 +46,7 @@ And that's the simplest one there is. But you can also do:
|
||||
HTTPX.post("http://example.com", form: { user: "john", password: "pass" })
|
||||
|
||||
http = HTTPX.with(headers: { "x-my-name" => "joe" })
|
||||
http.patch(("http://example.com/file", body: File.open("path/to/file")) # request body is streamed
|
||||
http.patch("http://example.com/file", body: File.open("path/to/file")) # request body is streamed
|
||||
```
|
||||
|
||||
If you want to do some more things with the response, you can get an `HTTPX::Response`:
|
||||
@ -61,7 +61,7 @@ puts body #=> #<HTTPX::Response ...
|
||||
You can also send as many requests as you want simultaneously:
|
||||
|
||||
```ruby
|
||||
page1, page2, page3 =`HTTPX.get("https://news.ycombinator.com/news", "https://news.ycombinator.com/news?p=2", "https://news.ycombinator.com/news?p=3")
|
||||
page1, page2, page3 = HTTPX.get("https://news.ycombinator.com/news", "https://news.ycombinator.com/news?p=2", "https://news.ycombinator.com/news?p=3")
|
||||
```
|
||||
|
||||
## Installation
|
||||
@ -107,26 +107,26 @@ HTTPX.get(
|
||||
|
||||
```ruby
|
||||
response = HTTPX.get("https://www.google.com", params: { q: "me" })
|
||||
response = HTTPX.post("https://www.nghttp2.org/httpbin/post", form: {name: "John", age: "22"})
|
||||
response = HTTPX.plugin(:basic_authentication)
|
||||
.basic_authentication("user", "pass")
|
||||
response = HTTPX.post("https://www.nghttp2.org/httpbin/post", form: { name: "John", age: "22" })
|
||||
response = HTTPX.plugin(:basic_auth)
|
||||
.basic_auth("user", "pass")
|
||||
.get("https://www.google.com")
|
||||
|
||||
# more complex client objects can be cached, and are thread-safe
|
||||
http = HTTPX.plugin(:compression).plugin(:expect).with(headers: { "x-pvt-token" => "TOKEN"})
|
||||
http = HTTPX.plugin(:expect).with(headers: { "x-pvt-token" => "TOKEN" })
|
||||
http.get("https://example.com") # the above options will apply
|
||||
http.post("https://example2.com", form: {name: "John", age: "22"}) # same, plus the form POST body
|
||||
http.post("https://example2.com", form: { name: "John", age: "22" }) # same, plus the form POST body
|
||||
```
|
||||
|
||||
### Lightweight
|
||||
|
||||
It ships with most features published as a plugin, making vanilla `httpx` lightweight and dependency-free, while allowing you to "pay for what you use"
|
||||
|
||||
The plugin system is similar to the ones used by [sequel](https://github.com/jeremyevans/sequel), [roda](https://github.com/jeremyevans/roda) or [shrine](https://github.com/janko-m/shrine).
|
||||
The plugin system is similar to the ones used by [sequel](https://github.com/jeremyevans/sequel), [roda](https://github.com/jeremyevans/roda) or [shrine](https://github.com/shrinerb/shrine).
|
||||
|
||||
### Advanced DNS features
|
||||
|
||||
`HTTPX` ships with custom DNS resolver implementations, including a native Happy Eyeballs resolver immplementation, and a DNS-over-HTTPS resolver.
|
||||
`HTTPX` ships with custom DNS resolver implementations, including a native Happy Eyeballs resolver implementation, and a DNS-over-HTTPS resolver.
|
||||
|
||||
## User-driven test suite
|
||||
|
||||
@ -134,9 +134,9 @@ The test suite runs against [httpbin proxied over nghttp2](https://nghttp2.org/h
|
||||
|
||||
## Supported Rubies
|
||||
|
||||
All Rubies greater or equal to 2.1, and always latest JRuby and Truffleruby.
|
||||
All Rubies greater or equal to 2.7, and always latest JRuby and Truffleruby.
|
||||
|
||||
**Note**: This gem is tested against all latest patch versions, i.e. if you're using 2.2.0 and you experience some issue, please test it against 2.2.10 (latest patch version of 2.2) before creating an issue.
|
||||
**Note**: This gem is tested against all latest patch versions, i.e. if you're using 3.3.0 and you experience some issue, please test it against 3.3.$latest before creating an issue.
|
||||
|
||||
## Resources
|
||||
| | |
|
||||
@ -149,24 +149,14 @@ All Rubies greater or equal to 2.1, and always latest JRuby and Truffleruby.
|
||||
|
||||
## Caveats
|
||||
|
||||
### ALPN support
|
||||
|
||||
ALPN negotiation is required for "auto" HTTP/2 "https" requests. This is available in ruby since version 2.3 .
|
||||
|
||||
### Known bugs
|
||||
|
||||
* Doesn't work with ruby 2.4.0 for Windows (see [#36](https://gitlab.com/os85/httpx/issues/36)).
|
||||
* Using `total_timeout` along with the `:persistent` plugin [does not work as you might expect](https://gitlab.com/os85/httpx/-/wikis/Timeouts#total_timeout).
|
||||
|
||||
## Versioning Policy
|
||||
|
||||
Although 0.x software, `httpx` is considered API-stable and production-ready, i.e. current API or options may be subject to deprecation and emit log warnings, but can only effectively be removed in a major version change.
|
||||
`httpx` follows Semantic Versioning.
|
||||
|
||||
## Contributing
|
||||
|
||||
* Discuss your contribution in an issue
|
||||
* Fork it
|
||||
* Make your changes, add some tests
|
||||
* Ensure all tests pass (`docker-compose -f docker-compose.yml -f docker-compose-ruby-{RUBY_VERSION}.yml run httpx bundle exec rake test`)
|
||||
* Make your changes, add some tests (follow the instructions from [here](test/README.md))
|
||||
* Open a Merge Request (that's Pull Request in Github-ish)
|
||||
* Wait for feedback
|
||||
|
1
Rakefile
1
Rakefile
@ -100,6 +100,7 @@ task :prepare_website => %w[rdoc prepare_jekyll_data] do
|
||||
header = "---\n" \
|
||||
"layout: #{layout}\n" \
|
||||
"title: #{title}\n" \
|
||||
"project: httpx\n" \
|
||||
"---\n\n"
|
||||
File.write(path, header + data)
|
||||
end
|
||||
|
5
doc/release_notes/0_23_1.md
Normal file
5
doc/release_notes/0_23_1.md
Normal file
@ -0,0 +1,5 @@
|
||||
# 0.23.1
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* fixed regression causing dns candidate names not being tried after first one fails.
|
5
doc/release_notes/0_23_2.md
Normal file
5
doc/release_notes/0_23_2.md
Normal file
@ -0,0 +1,5 @@
|
||||
# 0.23.2
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* fix missing variable on code path in the native resolver.
|
6
doc/release_notes/0_23_3.md
Normal file
6
doc/release_notes/0_23_3.md
Normal file
@ -0,0 +1,6 @@
|
||||
# 0.23.3
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* native resolver: fix missing exception variable in the DNS error code path.
|
||||
* native resolver: fixed short DNS packet handling when using TCP.
|
5
doc/release_notes/0_23_4.md
Normal file
5
doc/release_notes/0_23_4.md
Normal file
@ -0,0 +1,5 @@
|
||||
# 0.23.4
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* fix `Response::Body#read` which rewinds on every call.
|
48
doc/release_notes/0_24_0.md
Normal file
48
doc/release_notes/0_24_0.md
Normal file
@ -0,0 +1,48 @@
|
||||
# 0.24.0
|
||||
|
||||
## Features
|
||||
|
||||
### `:oauth` plugin
|
||||
|
||||
The `:oauth` plugin manages the handling of a given OAuth session, in that it ships with convenience methods to generate a new access token, which it then injects in all requests.
|
||||
|
||||
More info under https://honeyryderchuck.gitlab.io/httpx/wiki/OAuth
|
||||
|
||||
### session callbacks
|
||||
|
||||
HTTP request/response lifecycle events have now the ability of being intercepted via public API callback methods:
|
||||
|
||||
```ruby
|
||||
HTTPX.on_request_completed do |request|
|
||||
puts "request to #{request.uri} sent"
|
||||
end.get(...)
|
||||
```
|
||||
|
||||
More info under https://honeyryderchuck.gitlab.io/httpx/wiki/Events to know which events and callback methods are supported.
|
||||
|
||||
### `:circuit_breaker` plugin `on_circuit_open` callback
|
||||
|
||||
A callback has been introduced for the `:circuit_breaker` plugin, which is triggered when a circuit is opened.
|
||||
|
||||
```ruby
|
||||
http = HTTPX.plugin(:circuit_breaker).on_circuit_open do |req|
|
||||
puts "circuit opened for #{req.uri}"
|
||||
end
|
||||
http.get(...)
|
||||
```
|
||||
|
||||
## Improvements
|
||||
|
||||
Several `:response_cache` features have been improved:
|
||||
|
||||
* `:response_cache` plugin: response cache store has been made thread-safe.
|
||||
* cached response sharing across threads is made safer, as stringio/tempfile instances are copied instead of shared (without copying the underling string/file).
|
||||
* stale cached responses are eliminate on cache store lookup/store operations.
|
||||
* already closed responses are evicted from the cache store.
|
||||
* fallback for lack of compatible response "date" header has been fixed to return a `Time` object.
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* Ability to recover from errors happening during response chunk processing (required for overriding behaviour and response chunk callbacks); error bubbling up will result in the connection being closed.
|
||||
* Happy eyeballs support for multi-homed early-resolved domain names (such as `localhost` under `/etc/hosts`) was broken, as it would try the first given IP; so, if given `::1` and connection would fail, it wouldn't try `127.0.0.1`, which would have succeeded.
|
||||
* `:digest_authentication` plugin was removing the "algorithm" header on `-sess` declared algorithms, which is required for HTTP digest auth negotiation.
|
12
doc/release_notes/0_24_1.md
Normal file
12
doc/release_notes/0_24_1.md
Normal file
@ -0,0 +1,12 @@
|
||||
# 0.24.1
|
||||
|
||||
## Improvements
|
||||
|
||||
* datadog adapter: support `:service_name` configuration option.
|
||||
* datadog adapter: set `:distributed_tracing` to `true` by default.
|
||||
* `:proxy` plugin: when the proxy uri uses an unsupported scheme (i.e.: "scp://125.24.2.1"), a more user friendly error is raised (instead of the previous broken stacktrace).
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* datadog adapter: fix tracing enable call, which was wrongly calling `super`.
|
||||
+ `:proxy` plugin: fix for bug which was turning off plugins overriding `HTTPX::Connection#send` (such as the datadog adapter).
|
12
doc/release_notes/0_24_2.md
Normal file
12
doc/release_notes/0_24_2.md
Normal file
@ -0,0 +1,12 @@
|
||||
# 0.24.2
|
||||
|
||||
## Improvements
|
||||
|
||||
* besides an array, `:resolver_options` can now receive a hash for `:nameserver`, which **must** be indexed by IP family (`Socket::AF_INET6` or `Socket::AF_INET`); each group of nameservers will be used for emitting DNS queries of that iP family.
|
||||
* `:authentication` plugin: Added `#bearer_auth` helper, which receives a token, and sets it as `"Bearer $TOKEN` in the `"authorization"` header.
|
||||
* `faraday` adapter: now implements `#build_connection` and `#close`, will now interact with `faraday` native timeouts (`:read`, `:write` and `:connect`).
|
||||
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* fixed native resolver bug when queries involving intermediate alias would be kept after the original query and mess with re-queries.
|
12
doc/release_notes/0_24_3.md
Normal file
12
doc/release_notes/0_24_3.md
Normal file
@ -0,0 +1,12 @@
|
||||
# 0.24.3
|
||||
|
||||
## Improvements
|
||||
|
||||
* faraday adapter: reraise httpx timeout errors as faraday errors.
|
||||
* faraday adapter: support `:bind` option, which expects a host and port to connect to.
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* faraday adapter: fix `#close` implementation using the wrong ivar.
|
||||
* faraday adapter: fix usage of `requestt_timeout` translation of faraday timeouts into httpx timeouts.
|
||||
* faraday adapter: `ssl: { verify: false }` was being ignored, and certification verification was still proceeding.
|
18
doc/release_notes/0_24_4.md
Normal file
18
doc/release_notes/0_24_4.md
Normal file
@ -0,0 +1,18 @@
|
||||
# 0.24.4
|
||||
|
||||
## Improvements
|
||||
|
||||
* `digest_authentication` plugin now supports passing HA1hashed with password HA1s (common to store in htdigest files for example) when setting the`:hashed` kwarg to `true` in the `.digest_auth` call.
|
||||
* ex: `http.digest_auth(user, get_hashed_passwd_from_htdigest(user), hashed: true)`
|
||||
* TLS session resumption is now supported
|
||||
* whenever possible, `httpx` sessions will recycle used connections so that, in the case of TLS connections, the first session will keep being reusedd, thereby diminishing the overhead of subsequent TLS handshakes on the same host.
|
||||
* TLS sessions are only reused in the scope of the same `httpx` session, unless the `:persistent` plugin is used, in which case, the persisted `httpx` session will always try to resume TLS sessions.
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* When explicitly using IP addresses in the URL host, TLS handshake will now verify tif he IP address is included in the certificate.
|
||||
* IP address will keep not be used for SNI, as per RFC 6066, section 3.
|
||||
* ex: `http.get("https://10.12.0.12/get")`
|
||||
* if you want the prior behavior, set `HTTPX.with(ssl: {verify_hostname: false})`
|
||||
* Turn TLS hostname verification on for `jruby` (it's turned off by default).
|
||||
* if you want the prior behavior, set `HTTPX.with(ssl: {verify_hostname: false})`
|
6
doc/release_notes/0_24_5.md
Normal file
6
doc/release_notes/0_24_5.md
Normal file
@ -0,0 +1,6 @@
|
||||
# 0.24.5
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* fix for SSL handshake post connection SAN check using IPv6 address.
|
||||
* fix bug in DoH impl when the request returned no answer.
|
5
doc/release_notes/0_24_6.md
Normal file
5
doc/release_notes/0_24_6.md
Normal file
@ -0,0 +1,5 @@
|
||||
# 0.24.6
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* fix Session class assertions not prepared for class overrides, which could break some plugins which override the Session class on load (such as `datadog` or `webmock` adapters).
|
10
doc/release_notes/0_24_7.md
Normal file
10
doc/release_notes/0_24_7.md
Normal file
@ -0,0 +1,10 @@
|
||||
# 0.24.6
|
||||
|
||||
## dependencies
|
||||
|
||||
`http-2-next` last supported version for the 0.x series is the last version before v1. This shoul ensure that older versions of `httpx` won't be affected by any of the recent breaking changes.
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* `grpc`: setup of rpc calls from camel-cased symbols has been fixed. As an improvement, the GRPC-enabled session will now support both snake-cased, as well as camel-cased calls.
|
||||
* `datadog` adapter has now been patched to support the most recent breaking changes of `ddtrace` configuration DSL (`env_to_bool` is no longer supported).
|
60
doc/release_notes/1_0_0.md
Normal file
60
doc/release_notes/1_0_0.md
Normal file
@ -0,0 +1,60 @@
|
||||
# 1.0.0
|
||||
|
||||
## Breaking changes
|
||||
|
||||
* the minimum supported ruby version is 2.7.0 .
|
||||
* The fallback support for IDNA 2003 has been removed. If you require this feature, install the [idnx gem](https://github.com/HoneyryderChuck/idnx), which `httpx` automatically integrates with when available (and supports IDNA 2008).
|
||||
* `:total_timeout` option has been removed (no session-wide timeout supported, use `:request_timeout`).
|
||||
* `:read_timeout` and `:write_timeout` are now set to 60 seconds by default, and preferred over `:operation_timeout`;
|
||||
* the exception being in the `:stream` plugin, as the response is theoretically endless (so `:read_timeout` is unset).
|
||||
* The `:multipart` plugin is removed, as its functionality and API are now loaded by default (no API changes).
|
||||
* The `:compression` plugin is removed, as its functionality and API are now loaded by default (no API changes).
|
||||
* `:compression_threshold_size` was removed (formats in `"content-encoding"` request header will always encode the request body).
|
||||
* the new `:compress_request_body` and `:decompress_response_body` can be set to `false` to (respectively) disable compression of passed input body, or decompression of the response body.
|
||||
* `:retries` plugin: the `:retry_on` condition will **not** replace default retriable error checks, it will now instead be triggered **only if** no retryable error has been found.
|
||||
|
||||
### plugins
|
||||
|
||||
* `:authentication` plugin becomes `:auth`.
|
||||
* `.authentication` helper becomes `.authorization`.
|
||||
* `:basic_authentication` plugin becomes `:basic_auth`.
|
||||
* `:basic_authentication` helper is removed.
|
||||
* `:digest_authentication` plugin becomes `:digest_auth`.
|
||||
* `:digest_authentication` helper is removed.
|
||||
* `:ntlm_authentication` plugin becomes `:ntlm_auth`.
|
||||
* `:ntlm_authentication` helper is removed.
|
||||
* OAuth plugin: `:oauth_authentication` helper is rename to `:oauth_auth`.
|
||||
* `:compression/brotli` plugin becomes `:brotli`.
|
||||
|
||||
### Support removed for deprecated APIs
|
||||
|
||||
* The deprecated `HTTPX::Client` constant lookup has been removed (use `HTTPX::Session` instead).
|
||||
* The deprecated `HTTPX.timeout({...})` function has been removed (use `HTTPX.with(timeout: {...})` instead).
|
||||
* The deprecated `HTTPX.headers({...})` function has been removed (use `HTTPX.with(headers: {...})` instead).
|
||||
* The deprecated `HTTPX.plugins(...)` function has been removed (use `HTTPX.plugin(...).plugin(...)...` instead).
|
||||
* The deprecated `:transport_options` option, which was only valid for UNIX connections, has been removed (use `:addresses` instead).
|
||||
* The deprecated `def_option(...)` function, previously used to define additional options in plugins, has been removed (use `def option_$new_option)` instead).
|
||||
* The deprecated `:loop_timeout` timeout option has been removed.
|
||||
* `:stream` plugin: the deprecated `HTTPX::InstanceMethods::StreamResponse` has been removed (use `HTTPX::StreamResponse` instead).
|
||||
* The deprecated usage of symbols to indicate HTTP verbs (i.e. `HTTPX.request(:get, ...)` or `HTTPX.build_request(:get, ...)`) is not supported anymore (use the upcase string always, i.e. `HTTPX.request("GET", ...)` or `HTTPX.build_request("GET", ...)`, instead).
|
||||
* The deprecated `HTTPX::ErrorResponse#status` method has been removed (use `HTTPX::ErrorResponse#error` instead).
|
||||
|
||||
### dependencies
|
||||
|
||||
* `http-2-next` minimum supported version is 1.0.0.
|
||||
* `:datadog` adapter only supports `ddtrace` gem 1.x or higher.
|
||||
* `:faraday` adapter only supports `faraday` gem 1.x or higher.
|
||||
|
||||
## Improvements
|
||||
|
||||
* `circuit_breaker`: the drip rate of real request during the "half-open" stage of a circuit will reliably distribute real requests (as per the drip rate) over the `max_attempts`, before the circuit is closed.
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* Tempfiles are now correctly identified as file inputs for multipart requests.
|
||||
* fixed `proxy` plugin behaviour when loaded with the `follow_redirects` plugin and processing a 305 response (request needs to be retried on a different proxy).
|
||||
|
||||
## Chore
|
||||
|
||||
* `:grpc` plugin: connection won't buffer requests before HTTP/2 handshake is commpleted, i.e. works the same as plain `httpx` HTTP/2 connection establishment.
|
||||
* if you are relying on this, you can keep the old behavior this way: `HTTPX.plugin(:grpc, http2_settings: { wait_for_handshake: false })`.
|
5
doc/release_notes/1_0_1.md
Normal file
5
doc/release_notes/1_0_1.md
Normal file
@ -0,0 +1,5 @@
|
||||
# 1.0.1
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* do not try to inflate empty chunks (it triggered an error during response decoding).
|
7
doc/release_notes/1_0_2.md
Normal file
7
doc/release_notes/1_0_2.md
Normal file
@ -0,0 +1,7 @@
|
||||
# 1.0.2
|
||||
|
||||
## bugfixes
|
||||
|
||||
* bump `http-2-next` to 1.0.1, which fixes a bug where http/2 connection interprets MAX_CONCURRENT_STREAMS as request cap.
|
||||
* `grpc`: setup of rpc calls from camel-cased symbols has been fixed. As an improvement, the GRPC-enabled session will now support both snake-cased, as well as camel-cased calls.
|
||||
* `datadog` adapter has now been patched to support the most recent breaking changes of `ddtrace` configuration DSL (`env_to_bool` is no longer supported).
|
32
doc/release_notes/1_1_0.md
Normal file
32
doc/release_notes/1_1_0.md
Normal file
@ -0,0 +1,32 @@
|
||||
# 1.1.0
|
||||
|
||||
## Features
|
||||
|
||||
A function, `#peer_address`, was added to the response object, which returns the IP (either a string or an `IPAddr` object) from the socket used to get the response from.
|
||||
|
||||
```ruby
|
||||
response = HTTPX.get("https://example.com")
|
||||
response.peer_address #=> #<IPAddr: IPv4:93.184.216.34/255.255.255.255>
|
||||
```
|
||||
|
||||
error responses will also expose an IP address via `#peer_address` as long a connection happened before the error.
|
||||
|
||||
## Improvements
|
||||
|
||||
* A performance regression involving the new default timeouts has been fixed, which could cause significant overhead in "multiple requests in sequence" scenarios, and was clearly visible in benchmarks.
|
||||
* this regression will still be seen in jruby due to a bug, which fix will be released in jruby 9.4.5.0.
|
||||
* HTTP/1.1 connections are now set to handle as many requests as they can by default (instead of the past default of max 200, at which point they'd be recycled).
|
||||
* tolerate the inexistence of `openssl` in the installed ruby, like `net-http` does.
|
||||
* `on_connection_opened` and `on_connection_closed` will yield the `OpenSSL::SSL::SSLSocket` instance for `https` backed origins (instead of always the `Socket` instance).
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* when using the `:native` resolver (default option), a default of 1 for ndots is set, for systems which do not set one.
|
||||
* replaced usage of `Float::INFINITY` with `nil` for timeout defaults, as the former can't be used in IO wait functions.
|
||||
* `faraday` adapter timeout setup now maps to `:read_timeout` and `:write_timeout` options from `httpx`.
|
||||
* fixed HTTP/1.1 connection recycling on number of max requests exhausted.
|
||||
* `response.json` will now work when "content-type" header is set to "application/hal+json".
|
||||
|
||||
## Chore
|
||||
|
||||
* when using the `:cookies` plugin, a warning message to install the idnx message will only be emitted if the cookie domain is an IDN (this message was being shown all the time since v1 release).
|
17
doc/release_notes/1_1_1.md
Normal file
17
doc/release_notes/1_1_1.md
Normal file
@ -0,0 +1,17 @@
|
||||
# 1.1.1
|
||||
|
||||
## improvements
|
||||
|
||||
* (Re-)enabling default retries in DNS name queries; this had been disabled as a result of revamping timeouts, and resulted in queries only being sent once, which is very little for UDP-related traffic, and breaks if using DNs rate-limiting software. Retries the query just once, for now.
|
||||
|
||||
## bugfixes
|
||||
|
||||
* reset timers when adding new intervals, as these may be added as a result on after-select connection handling, and must wait for the next tick cycle (before the patch, they were triggering too soon).
|
||||
* fixed "on close" callback leak on connection reuse, which caused linear performance regression in benchmarks performing one request per connection.
|
||||
* fixed hanging connection when an HTTP/1.1 emitted a "connection: close" header but the server would not emit one (it closes the connection now).
|
||||
* fixed recursive dns cached lookups which may have already expired, and created nil entries in the returned address list.
|
||||
* dns system resolver is now able to retry on failure.
|
||||
|
||||
## chore
|
||||
|
||||
* remove duplicated callback unregitering connections.
|
12
doc/release_notes/1_1_2.md
Normal file
12
doc/release_notes/1_1_2.md
Normal file
@ -0,0 +1,12 @@
|
||||
# 1.1.2
|
||||
|
||||
## improvements
|
||||
|
||||
* only moving eden connections to idle when they're recycled.
|
||||
|
||||
## bugfixes
|
||||
|
||||
* skip closing a connection which is already closed during reset.
|
||||
* sentry adapter: fixed `super` call which didn't have a super method (this prevented usinng sentry-enabled sessions with the `:retries` plugin).
|
||||
* sentry adapter: fixing registering of sentry config.
|
||||
* sentry adapter: do not propagate traces when relevant sdk options are disabled (such as `propagate_traces`).
|
18
doc/release_notes/1_1_3.md
Normal file
18
doc/release_notes/1_1_3.md
Normal file
@ -0,0 +1,18 @@
|
||||
# 1.1.3
|
||||
|
||||
## improvements
|
||||
|
||||
## security
|
||||
|
||||
* when using `:follow_redirects` plugin, the "authorization" header will be removed when following redirect responses to a different origin.
|
||||
|
||||
## bugfixes
|
||||
|
||||
* fixed `:stream` plugin not following redirect responses when used with the `:follow_redirects` plugin.
|
||||
* fixed `:stream` plugin not doing content decoding when responses were p.ex. gzip-compressed.
|
||||
* fixed bug preventing usage of IPv6 loopback or link-local addresses in the request URL in systems with no IPv6 internet connectivity (the request was left hanging).
|
||||
* protect all code which may initiate a new connection from abrupt errors (such as internet turned off), as it was done on the initial request call.
|
||||
|
||||
## chore
|
||||
|
||||
internal usage of `mutex_m` has been removed (`mutex_m` is going to be deprecated in ruby 3.3).
|
6
doc/release_notes/1_1_4.md
Normal file
6
doc/release_notes/1_1_4.md
Normal file
@ -0,0 +1,6 @@
|
||||
# 1.1.4
|
||||
|
||||
## bugfixes
|
||||
|
||||
* datadog adapter: use `Gem::Version` to invoke the correct configuration API.
|
||||
* stream plugin: do not preempt request enqueuing (this was making integration with the `:follow_redirects` plugin fail when set up with `webmock`).
|
12
doc/release_notes/1_1_5.md
Normal file
12
doc/release_notes/1_1_5.md
Normal file
@ -0,0 +1,12 @@
|
||||
# 1.1.5
|
||||
|
||||
## improvements
|
||||
|
||||
* pattern matching support for responses has been backported to ruby 2.7 as well.
|
||||
|
||||
## bugfixes
|
||||
|
||||
* `stream` plugin: fix for `HTTPX::StreamResponse#each_line` not yielding the last line of the payload when not delimiter-terminated.
|
||||
* `stream` plugin: fix `webmock` adapter integration when methods calls would happen in the `HTTPX::StreamResponse#each` block.
|
||||
* `stream` plugin: fix `:follow_redirects` plugin integration which was caching the redirect response and using it for method calls inside the `HTTPX::StreamResponse#each` block.
|
||||
* "103 early hints" responses will be ignored when processing the response (it was causing the response returned by sesssions to hold its headers, instead of the following 200 response, while keeping the 200 response body).
|
49
doc/release_notes/1_2_0.md
Normal file
49
doc/release_notes/1_2_0.md
Normal file
@ -0,0 +1,49 @@
|
||||
# 1.2.0
|
||||
|
||||
## Features
|
||||
|
||||
### `:ssrf_filter` plugin
|
||||
|
||||
The `:ssrf_filter` plugin prevents server-side request forgery attacks, by blocking requests to the internal network. This is useful when the URLs used to perform requests aren’t under the developer control (such as when they are inserted via a web application form).
|
||||
|
||||
```ruby
|
||||
http = HTTPX.plugin(:ssrf_filter)
|
||||
|
||||
# this works
|
||||
response = http.get("https://example.com")
|
||||
|
||||
# this doesn't
|
||||
response = http.get("http://localhost:3002")
|
||||
response = http.get("http://[::1]:3002")
|
||||
response = http.get("http://169.254.169.254/latest/meta-data/")
|
||||
```
|
||||
|
||||
More info under https://honeyryderchuck.gitlab.io/httpx/wiki/SSRF-Filter
|
||||
|
||||
### `:callbacks` plugin
|
||||
|
||||
The session callbacks introduced in v0.24.0 are in its own plugin. Older code will still work and emit a deprecation warning.
|
||||
|
||||
More info under https://honeyryderchuck.gitlab.io/httpx/wiki/Callbacks
|
||||
|
||||
### `:redirect_on` option for `:follow_redirects` plugin
|
||||
|
||||
This option allows passing a callback which, when returning `false`, can interrupt the redirect loop.
|
||||
|
||||
```ruby
|
||||
http = HTTPX.plugin(:follow_redirects).with(redirect_on: ->(location_uri) { BLACKLIST_HOSTS.include?(location_uri.host) })
|
||||
```
|
||||
|
||||
### `:close_on_handshake_timeout` timeout
|
||||
|
||||
A new `:timeout` option, `:close_handshake_timeout`, is added, which monitors connection readiness when performing HTTP/2 connection termination handshake.
|
||||
|
||||
## Improvements
|
||||
|
||||
* Internal "eden connections" concept was removed, and connection objects are now kept-and-reused during the lifetime of a session, even when closed. This simplified connectio pool implementation and improved performance.
|
||||
* request using `:proxy` and `:retries` plugin enabled sessions will now retry on proxy connection establishment related errors.
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* webmock adapter: mocked responses storing decoded payloads won't try to decode them again (fixes vcr/webmock integrations).
|
||||
* webmock adapter: fix issue related with making real requests over webmock-enabled connection.
|
6
doc/release_notes/1_2_1.md
Normal file
6
doc/release_notes/1_2_1.md
Normal file
@ -0,0 +1,6 @@
|
||||
# 1.2.1
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* DoH resolver: try resolving other candidates on "domain not found" error (same behaviour as with native resolver).
|
||||
* Allow HTTP/2 connections to exit cleanly when TLS session gets corrupted and termination handshake can't be performed.
|
10
doc/release_notes/1_2_2.md
Normal file
10
doc/release_notes/1_2_2.md
Normal file
@ -0,0 +1,10 @@
|
||||
# 1.2.2
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* only raise "unknown option" error when option is not supported, not anymore when error happens in the setup of a support option.
|
||||
* usage of `HTTPX::Session#wrap` within a thread with other sessions using the `:persistent` plugin won't inadvertedly terminate its open connections.
|
||||
* terminate connections on `IOError` (`SocketError` does not cover them).
|
||||
* terminate connections on HTTP/2 protocol and handshake errors, which happen during establishment or termination of a HTTP/2 connection (they were being previously kept around, although they'd irrecoverable).
|
||||
* `:oauth` plugin: fixing check preventing the OAuth metadata server integration path to be exercised.
|
||||
* fix instantiation of the options headers object with the wrong headers class.
|
16
doc/release_notes/1_2_3.md
Normal file
16
doc/release_notes/1_2_3.md
Normal file
@ -0,0 +1,16 @@
|
||||
# 1.2.3
|
||||
|
||||
## Improvements
|
||||
|
||||
* `:retries` plugin: allow `:max_retries` set to 0 (allows for a soft disable of retries when using the faraday adapter).
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* `:oauth` plugin: fix for default auth method being ignored when setting grant type and scope as options only.
|
||||
* ensure happy eyeballs-initiated cloned connections also set session callbacks (caused issues when server would respond with a 421 response, an event requiring a valid internal callback).
|
||||
* native resolver cleanly transitions from tcp to udp after truncated DNS query (causing issues on follow-up CNAME resolution).
|
||||
* elapsing timeouts now guard against mutation of callbacks while looping (prevents skipping callbacks in situations where a previous one would remove itself from the collection).
|
||||
|
||||
## Chore
|
||||
|
||||
* datadog adapter: do not call `.lazy` on options (avoids deprecation warning, to be removed in ddtrace 2.0)
|
8
doc/release_notes/1_2_4.md
Normal file
8
doc/release_notes/1_2_4.md
Normal file
@ -0,0 +1,8 @@
|
||||
# 1.2.4
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* fixed issue related to inability to buffer payload to error responses (which may happen on certain error handling situations).
|
||||
* fixed recovery from a lost persistent connection leaving process due to ping being sent while still marked as inactive.
|
||||
* fixed datadog integration, which was not generating new spans on retried requests (when `:retries` plugin is enabled).
|
||||
* fixed splitting strings into key value pairs in cases where the value would contain a "=", such as in certain base64 payloads.
|
7
doc/release_notes/1_2_5.md
Normal file
7
doc/release_notes/1_2_5.md
Normal file
@ -0,0 +1,7 @@
|
||||
# 1.2.5
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* fix for usage of correct `last-modified` header in `response_cache` plugin.
|
||||
* fix usage of decoding helper methods (i.e. `response.json`) with `response_cache` plugin.
|
||||
* `stream` plugin: reverted back to yielding buffered payloads for streamed responses (broke `down` integration)
|
13
doc/release_notes/1_2_6.md
Normal file
13
doc/release_notes/1_2_6.md
Normal file
@ -0,0 +1,13 @@
|
||||
# 1.2.6
|
||||
|
||||
## Improvements
|
||||
|
||||
* `native` resolver: when timing out on DNS query for an alias, retry the DNS query for the alias (instead of the original hostname).
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* `faraday` adapter: set `env` options on the request object, so they are available in the request object when yielded.
|
||||
* `follow_redirects` plugin: remove body-related headers (`content-length`, `content-type`) on POST-to-GET redirects.
|
||||
* `follow_redirects` plugin: maintain verb (and body) of original request when the response status code is 307.
|
||||
* `native` resolver: when timing out on TCP-based name resolution, downgrade to UDP before retrying.
|
||||
* `rate_limiter` plugin: do not try fetching the retry-after of error responses.
|
18
doc/release_notes/1_3_0.md
Normal file
18
doc/release_notes/1_3_0.md
Normal file
@ -0,0 +1,18 @@
|
||||
# 1.3.0
|
||||
|
||||
## Dependencies
|
||||
|
||||
`http-2` v1.0.0 is replacing `http-2-next` as the HTTP/2 parser.
|
||||
|
||||
`http-2-next` was forked from `http-2` 5 years ago; its improvements have been merged back to `http-2` recently though, so `http-2-next` willl therefore no longer be maintained.
|
||||
|
||||
## Improvements
|
||||
|
||||
Request-specific options (`:params`, `:form`, `:json` and `:xml`) are now separately kept by the request, which allows them to share `HTTPX::Options`, and reduce the number of copying / allocations.
|
||||
|
||||
This means that `HTTPX::Options` will throw an error if you initialize an object which such keys; this should not happen, as this class is considered internal and you should not be using it directly.
|
||||
|
||||
## Fixes
|
||||
|
||||
* support for the `datadog` gem v2.0.0 in its adapter has been unblocked, now that the gem has been released.
|
||||
* loading the `:cookies` plugin was making the `Session#build_request` private.
|
17
doc/release_notes/1_3_1.md
Normal file
17
doc/release_notes/1_3_1.md
Normal file
@ -0,0 +1,17 @@
|
||||
# 1.3.1
|
||||
|
||||
## Improvements
|
||||
|
||||
* `:request_timeout` will be applied to all HTTP interactions until the final responses returned to the caller. That includes:
|
||||
* all redirect requests/responses (when using the `:follow_redirects` plugin)
|
||||
* all retried requests/responses (when using the `:retries` plugin)
|
||||
* intermediate requests (such as "100-continue")
|
||||
* faraday adapter: allow further plugins of internal session (ex: `builder.adapter(:httpx) { |sess| sess.plugin(:follow_redirects) }...`)
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* fix connection leak on proxy auth failed (407) handling
|
||||
* fix busy loop on deferred requests for the duration interval
|
||||
* do not further enqueue deferred requests if they have terminated meanwhile.
|
||||
* fix busy loop caused by coalescing connections when one of them is on the DNS resolution phase still.
|
||||
* faraday adapter: on parallel mode, skip calling `on_complete` when not defined.
|
6
doc/release_notes/1_3_2.md
Normal file
6
doc/release_notes/1_3_2.md
Normal file
@ -0,0 +1,6 @@
|
||||
# 1.3.2
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* Prevent `NoMethodError` in an edge case when the `:proxy` plugin is autoloaded via env vars and webmock adapter are used in tandem, and a real request fails.
|
||||
* raise invalid uri error if passed request uri does not contain the host part (ex: `"https:/get"`)
|
5
doc/release_notes/1_3_3.md
Normal file
5
doc/release_notes/1_3_3.md
Normal file
@ -0,0 +1,5 @@
|
||||
# 1.3.3
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* fixing a regression introduced in 1.3.2 associated with the webmock adapter, which expects matchable request bodies to be strings
|
6
doc/release_notes/1_3_4.md
Normal file
6
doc/release_notes/1_3_4.md
Normal file
@ -0,0 +1,6 @@
|
||||
# 1.3.4
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* webmock adapter: fix tempfile usage in multipart requests.
|
||||
* fix: fallback to binary encoding when parsing incoming invalid charset in HTTP "content-type" header.
|
43
doc/release_notes/1_4_0.md
Normal file
43
doc/release_notes/1_4_0.md
Normal file
@ -0,0 +1,43 @@
|
||||
# 1.4.0
|
||||
|
||||
## Features
|
||||
|
||||
### `:content_digest` plugin
|
||||
|
||||
The `:content_digest` can be used to calculate the digest of request payloads and set them in the `"content-digest"` header; it can also validate the integrity of responses which declare the same `"content-digest"` header.
|
||||
|
||||
More info under https://honeyryderchuck.gitlab.io/httpx/wiki/Content-Digest
|
||||
|
||||
## Per-session connection pools
|
||||
|
||||
This architectural changes moves away from per-thread shared connection pools, and into per-session (also thread-safe) connection pools. Unlike before, this enables connections from a session to be reused across threads, as well as limiting the number of connections that can be open on a given origin peer. This fixes long-standing issues, such as reusing connections under a fiber scheduler loop (such as the one from the gem `async`).
|
||||
|
||||
A new `:pool_options` option is introduced, which can be passed an hash with the following sub-options:
|
||||
|
||||
* `:max_connections_per_origin`: maximum number of connections a pool allows (unbounded by default, for backwards compatibility).
|
||||
* `:pool_timeout`: the number of seconds a session will wait for a connection to be checked out (default: 5)
|
||||
|
||||
More info under https://honeyryderchuck.gitlab.io/httpx/wiki/Connection-Pools
|
||||
|
||||
|
||||
## Improvements
|
||||
|
||||
* `:aws_sigv4` plugin: improved digest calculation on compressed request bodies by buffering content to a tempfile.
|
||||
* `HTTPX::Response#json` will parse payload from extended json MIME types (like `application/ld+json`, `application/hal+json`, ...).
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* `:aws_sigv4` plugin: do not try to rewind a request body which yields chunks.
|
||||
* fixed request encoding when `:json` param is passed, and the `oj` gem is used (by using the `:compat` flag).
|
||||
* native resolver: on message truncation, bubble up tcp handshake errors as resolve errors.
|
||||
* allow `HTTPX::Response#json` to accept extended JSON mime types (such as responses with `content-type: application/ld+json`)
|
||||
|
||||
## Chore
|
||||
|
||||
* default options are now fully frozen (in case anyone relies on overriding them).
|
||||
|
||||
### `:xml` plugin
|
||||
|
||||
XML encoding/decoding (via `:xml` request param, and `HTTPX::Response#xml`) is now available via the `:xml` plugin.
|
||||
|
||||
Using `HTTPX::Response#xml` without the plugin will issue a deprecation warning.
|
19
doc/release_notes/1_4_1.md
Normal file
19
doc/release_notes/1_4_1.md
Normal file
@ -0,0 +1,19 @@
|
||||
# 1.4.1
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* several `datadog` integration bugfixes
|
||||
* only load the `datadog` integration when the `datadog` sdk is loaded (and not other gems that may define the `Datadog` module, like `dogstatsd`)
|
||||
* do not trace if datadog integration is loaded but disabled
|
||||
* distributed headers are now sent along (when the configuration is enabled, which it is by default)
|
||||
* fix for handling multiple `GOAWAY` frames coming from the server (node.js servers seem to send multiple frames on connection timeout)
|
||||
* fix regression for when a url is used with `httpx` which is not `http://` or `https://` (should raise `HTTPX::UnsupportedSchemaError`)
|
||||
* worked around `IO.copy_stream` which was emitting incorrect bytes for HTTP/2 requests which bodies larger than the maximum supported frame size.
|
||||
* multipart requests: make sure that a body declared as `Pathname` is opened for reading in binary mode.
|
||||
* `webmock` integration: ensure that request events are emitted (such as plugins and integrations relying in it, such as `datadog` and the OTel integration)
|
||||
* native resolver: do not propagate successful name resolutions for connections which were already closed.
|
||||
* native resolver: fixed name resolution stalling, in a multi-request to multi-origin scenario, when a resolution timeout would happen.
|
||||
|
||||
## Chore
|
||||
|
||||
* refactor of the happy eyeballs and connection coalescing logic to not rely on callbacks, and instead on instance variable management (makes code more straightforward to read).
|
20
doc/release_notes/1_4_2.md
Normal file
20
doc/release_notes/1_4_2.md
Normal file
@ -0,0 +1,20 @@
|
||||
# 1.4.2
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* faraday: use default reason when none is matched by Net::HTTP::STATUS_CODES
|
||||
* native resolver: keep sending DNS queries if the socket is available, to avoid busy loops on select
|
||||
* native resolver fixes for Happy Eyeballs v2
|
||||
* do not apply resolution delay if the IPv4 IP was not resolved via DNS
|
||||
* ignore ALIAS if DNS response carries IP answers
|
||||
* do not try to query for names already awaiting answer from the resolver
|
||||
* make sure all types of errors are propagated to connections
|
||||
* make sure next candidate is picked up if receiving NX_DOMAIN_NOT_FOUND error from resolver
|
||||
* raise error happening before any request is flushed to respective connections (avoids loop on non-actionable selector termination).
|
||||
* fix "NoMethodError: undefined method `after' for nil:NilClass", happening for requests flushed into persistent connections which errored, and were retried in a different connection before triggering the timeout callbacks from the previously-closed connection.
|
||||
|
||||
|
||||
## Chore
|
||||
|
||||
* Refactor of timers to allow for explicit and more performant single timer interval cancellation.
|
||||
* default log message restructured to include info about process, thread and caller.
|
11
doc/release_notes/1_4_3.md
Normal file
11
doc/release_notes/1_4_3.md
Normal file
@ -0,0 +1,11 @@
|
||||
# 1.4.3
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* `webmock` adapter: reassign headers to signature after callbacks are called (these may change the headers before virtual send).
|
||||
* do not close request (and its body) right after sending, instead only on response close
|
||||
* prevents retries from failing under the `:retries` plugin
|
||||
* fixes issue when using `faraday-multipart` request bodies
|
||||
* retry request with HTTP/1 when receiving an HTTP/2 GOAWAY frame with `HTTP_1_1_REQUIRED` error code.
|
||||
* fix wrong method call on HTTP/2 PING frame with unrecognized code.
|
||||
* fix EOFError issues on connection termination for long running connections which may have already been terminated by peer and were wrongly trying to complete the HTTP/2 termination handshake.
|
14
doc/release_notes/1_4_4.md
Normal file
14
doc/release_notes/1_4_4.md
Normal file
@ -0,0 +1,14 @@
|
||||
# 1.4.4
|
||||
|
||||
## Improvements
|
||||
|
||||
* `:stream` plugin: response will now be partially buffered in order to i.e. inspect response status or headers on the response body without buffering the full response
|
||||
* this fixes an issue in the `down` gem integration when used with the `:max_size` option.
|
||||
* do not unnecessarily probe for connection liveness if no more requests are inflight, including failed ones.
|
||||
* when using persistent connections, do not probe for liveness right after reconnecting after a keep alive timeout.
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* `:persistent` plugin: do not exhaust retry attempts when probing for (and failing) connection liveness.
|
||||
* since the introduction of per-session connection pools, and consequentially due to the possibility of multiple inactive connections for the same origin being in the pool, which may have been terminated by the peer server, requests would fail before being able to establish a new connection.
|
||||
* prevent retrying to connect the TCP socket object when an SSLSocket object is already in place and connecting.
|
126
doc/release_notes/1_5_0.md
Normal file
126
doc/release_notes/1_5_0.md
Normal file
@ -0,0 +1,126 @@
|
||||
# 1.5.0
|
||||
|
||||
## Features
|
||||
|
||||
### `:stream_bidi` plugin
|
||||
|
||||
The `:stream_bidi` plugin enables bidirectional streaming support (an HTTP/2 only feature!). It builds on top of the `:stream` plugin, and uses its block-based syntax to process incoming frames, while allowing the user to pipe more data to the request (from the same, or another thread/fiber).
|
||||
|
||||
```ruby
|
||||
http = HTTPX.plugin(:stream_bidi)
|
||||
request = http.build_request(
|
||||
"POST",
|
||||
"https://your-origin.com/stream",
|
||||
headers: { "content-type" => "application/x-ndjson" },
|
||||
body: ["{\"message\":\"started\"}\n"]
|
||||
)
|
||||
|
||||
chunks = []
|
||||
|
||||
response = http.request(request, stream: true)
|
||||
|
||||
Thread.start do
|
||||
response.each do |chunk|
|
||||
handle_data(chunk)
|
||||
end
|
||||
end
|
||||
|
||||
# now send data...
|
||||
request << "{\"message\":\"foo\"}\n"
|
||||
request << "{\"message\":\"bar\"}\n"
|
||||
# ...
|
||||
```
|
||||
|
||||
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Stream-Bidi
|
||||
|
||||
### `:query` plugin
|
||||
|
||||
The `:query` plugin adds public methods supporting the `QUERY` HTTP verb:
|
||||
|
||||
```ruby
|
||||
http = HTTPX.plugin(:query)
|
||||
|
||||
http.query("https://example.com/gquery", body: "foo=bar") # QUERY /gquery ....
|
||||
```
|
||||
|
||||
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Query
|
||||
|
||||
this functionality was added as a plugin for explicit opt-in, as it's experimental (RFC for the new HTTP verb is still in draft).
|
||||
|
||||
### `:response_cache` plugin filesystem based store
|
||||
|
||||
The `:response_cache` plugin supports setting the filesystem as the response cache store (instead of just storing them in memory, which is the default `:store`).
|
||||
|
||||
```ruby
|
||||
# cache store in the filesystem, writes to the temporary directory from the OS
|
||||
http = HTTPX.plugin(:response_cache, response_cache_store: :file_store)
|
||||
# if you want a separate location
|
||||
http = HTTPX.plugin(:response_cache).with(response_cache_store: HTTPX::Plugins::ResponseCache::FileStore.new("/path/to/dir"))
|
||||
```
|
||||
|
||||
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Response-Cache#:file_store
|
||||
|
||||
### `:close_on_fork` option
|
||||
|
||||
A new option `:close_on_fork` can be used to ensure that a session object which may have open connections will not leak them in case the process is forked (this can be the case of `:persistent` plugin enabled sessions which have add usage before fork):
|
||||
|
||||
```ruby
|
||||
http = HTTPX.plugin(:persistent, close_on_fork: true)
|
||||
|
||||
# http may have open connections here
|
||||
fork do
|
||||
# http has no connections here
|
||||
end
|
||||
```
|
||||
|
||||
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Connection-Pools#Fork-Safety .
|
||||
|
||||
### `:debug_redact` option
|
||||
|
||||
The `:debug_redact` option will, when enabled, replace parts of the debug logs (enabled via `:debug` and `:debug_level` options) which may contain sensitive information, with the `"[REDACTED]"` placeholder.
|
||||
|
||||
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Debugging .
|
||||
|
||||
### `:max_connections` pool option
|
||||
|
||||
A new `:max_connections` pool option (settable under `:pool_options`) can be used to defined the maximum number **overall** of connections for a pool ("in-transit" or "at-rest"); this complements, and supersedes when used, the already existing `:max_connections_per_origin`, which does the same per connection origin.
|
||||
|
||||
```ruby
|
||||
HTTPX.with(pool_options: { max_connections: 100 })
|
||||
```
|
||||
|
||||
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Connection-Pools .
|
||||
|
||||
### Subplugins
|
||||
|
||||
An enhancement to the plugins architecture, it allows plugins to define submodules ("subplugins") which are loaded if another plugin is in use, or is loaded afterwards.
|
||||
|
||||
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Custom-Plugins#Subplugins .
|
||||
|
||||
## Improvements
|
||||
|
||||
* `:persistent` plugin: several improvements around reconnections of failure:
|
||||
* reconnections will only happen for "connection broken" errors (and will discard reconnection on timeouts)
|
||||
* reconnections won't exhaust retries
|
||||
* `:response_cache` plugin: several improements:
|
||||
* return cached response if not stale, send conditional request otherwise (it was always doing the latter).
|
||||
* consider immutable (i.e. `"Cache-Control: immutable"`) responses as never stale.
|
||||
* `:datadog` adapter: decorate spans with more tags (header, kind, component, etc...)
|
||||
* timers operations have been improved to use more efficient algorithms and reduce object creation.
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* ensure that setting request timeouts happens before the request is buffered (the latter could trigger a state transition required by the former).
|
||||
* `:response_cache` plugin: fix `"Vary"` header handling by supporting a new plugin option, `:supported_vary_headers`, which defines which headers are taken into account for cache key calculation.
|
||||
* fixed query string encoded value when passed an empty hash to the `:query` param and the URL already contains query string.
|
||||
* `:callbacks` plugin: ensure the callbacks from a session are copied when a new session is derived from it (via a `.plugin` call, for example).
|
||||
* `:callbacks` plugin: errors raised from hostname resolution should bubble up to user code.
|
||||
* fixed connection coalescing selector monitoring in cases where the coalescable connecton is cloned, while other branches were simplified.
|
||||
* clear the connection write buffer in corner cases where the remaining bytes may be interpreted as GOAWAY handshake frame (and may cause unintended writes to connections already identified as broken).
|
||||
* remove idle connections from the selector when an error happens before the state changes (this may happen if the thread is interrupted during name resolution).
|
||||
|
||||
## Chore
|
||||
|
||||
`httpx` makes extensive use of features introduced in ruby 3.4, such as `Module#set_temporary_name` for otherwise plugin-generated anonymous classes (improves debugging and issue reporting), or `String#append_as_bytes` for a small but non-negligible perf boost in buffer operations. It falls back to the previous behaviour when used with ruby 3.3 or lower.
|
||||
|
||||
Also, and in preparation for the incoming ruby 3.5 release, dependency of the `cgi` gem (which will be removed from stdlib) was removed.
|
6
doc/release_notes/1_5_1.md
Normal file
6
doc/release_notes/1_5_1.md
Normal file
@ -0,0 +1,6 @@
|
||||
# 1.5.1
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* connection errors on persistent connections which have just been checked out from the pool no longer account for retries bookkeeping; the assumption should be that, if a connection has been checked into the pool in an open state, chances are, when it eventually gets checked out, it may be corrupt. This issue was more exacerbated in `:persistent` plugin connections, which by design have a retry of 1, thus failing often immediately after check out without a legitimate request try.
|
||||
* native resolver: fix issue with process interrupts during DNS request, which caused a busy loop when closing the selector.
|
@ -1,7 +1,7 @@
|
||||
version: '3'
|
||||
services:
|
||||
httpx:
|
||||
image: jruby:9.3
|
||||
image: jruby:9.4
|
||||
environment:
|
||||
- JRUBY_OPTS=--debug
|
||||
entrypoint:
|
||||
|
@ -1,4 +0,0 @@
|
||||
version: '3'
|
||||
services:
|
||||
httpx:
|
||||
image: ruby:2.1
|
@ -1,4 +0,0 @@
|
||||
version: '3'
|
||||
services:
|
||||
httpx:
|
||||
image: ruby:2.2
|
@ -1,8 +0,0 @@
|
||||
version: '3'
|
||||
services:
|
||||
httpx:
|
||||
image: ruby:2.3
|
||||
environment:
|
||||
- HTTPBIN_COALESCING_HOST=another
|
||||
links:
|
||||
- "nghttp2:another"
|
@ -1,8 +0,0 @@
|
||||
version: '3'
|
||||
services:
|
||||
httpx:
|
||||
image: ruby:2.4
|
||||
environment:
|
||||
- HTTPBIN_COALESCING_HOST=another
|
||||
links:
|
||||
- "nghttp2:another"
|
@ -1,8 +0,0 @@
|
||||
version: '3'
|
||||
services:
|
||||
httpx:
|
||||
image: ruby:2.5
|
||||
environment:
|
||||
- HTTPBIN_COALESCING_HOST=another
|
||||
links:
|
||||
- "nghttp2:another"
|
@ -5,13 +5,11 @@ services:
|
||||
environment:
|
||||
- HTTPBIN_COALESCING_HOST=another
|
||||
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
||||
links:
|
||||
- "nghttp2:another"
|
||||
depends_on:
|
||||
- doh
|
||||
|
||||
doh:
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||
depends_on:
|
||||
- doh-proxy
|
||||
entrypoint:
|
||||
|
@ -5,13 +5,11 @@ services:
|
||||
environment:
|
||||
- HTTPBIN_COALESCING_HOST=another
|
||||
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
||||
links:
|
||||
- "nghttp2:another"
|
||||
depends_on:
|
||||
- doh
|
||||
|
||||
doh:
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||
depends_on:
|
||||
- doh-proxy
|
||||
entrypoint:
|
||||
|
@ -5,13 +5,11 @@ services:
|
||||
environment:
|
||||
- HTTPBIN_COALESCING_HOST=another
|
||||
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
||||
links:
|
||||
- "nghttp2:another"
|
||||
depends_on:
|
||||
- doh
|
||||
|
||||
doh:
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||
depends_on:
|
||||
- doh-proxy
|
||||
entrypoint:
|
||||
|
@ -5,13 +5,11 @@ services:
|
||||
environment:
|
||||
- HTTPBIN_COALESCING_HOST=another
|
||||
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
||||
links:
|
||||
- "nghttp2:another"
|
||||
depends_on:
|
||||
- doh
|
||||
|
||||
doh:
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||
depends_on:
|
||||
- doh-proxy
|
||||
entrypoint:
|
||||
|
@ -1,17 +1,15 @@
|
||||
version: '3'
|
||||
services:
|
||||
httpx:
|
||||
image: ruby:2.6
|
||||
image: ruby:3.3
|
||||
environment:
|
||||
- HTTPBIN_COALESCING_HOST=another
|
||||
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
||||
links:
|
||||
- "nghttp2:another"
|
||||
depends_on:
|
||||
- doh
|
||||
|
||||
doh:
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||
depends_on:
|
||||
- doh-proxy
|
||||
entrypoint:
|
23
docker-compose-ruby-3.4.yml
Normal file
23
docker-compose-ruby-3.4.yml
Normal file
@ -0,0 +1,23 @@
|
||||
version: '3'
|
||||
services:
|
||||
httpx:
|
||||
image: ruby:3.4
|
||||
environment:
|
||||
- HTTPBIN_COALESCING_HOST=another
|
||||
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
||||
depends_on:
|
||||
- doh
|
||||
|
||||
doh:
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||
depends_on:
|
||||
- doh-proxy
|
||||
entrypoint: /usr/local/bin/nghttpx
|
||||
volumes:
|
||||
- ./test/support/ci:/home
|
||||
command: --conf /home/doh-nghttp.conf --no-ocsp --frontend '*,443'
|
||||
|
||||
doh-proxy:
|
||||
image: publicarray/doh-proxy
|
||||
environment:
|
||||
- "UNBOUND_SERVICE_HOST=127.0.0.11"
|
@ -1,7 +1,7 @@
|
||||
version: '3'
|
||||
services:
|
||||
httpx:
|
||||
image: ghcr.io/graalvm/truffleruby:latest
|
||||
image: ghcr.io/graalvm/truffleruby-community:latest
|
||||
entrypoint:
|
||||
- bash
|
||||
- /home/test/support/ci/build.sh
|
@ -26,6 +26,7 @@ services:
|
||||
- AMZ_HOST=aws:4566
|
||||
- WEBDAV_HOST=webdav
|
||||
- DD_INSTRUMENTATION_TELEMETRY_ENABLED=false
|
||||
- GRPC_VERBOSITY=ERROR
|
||||
image: ruby:alpine
|
||||
privileged: true
|
||||
depends_on:
|
||||
@ -37,13 +38,10 @@ services:
|
||||
- aws
|
||||
- ws-echo-server
|
||||
- webdav
|
||||
- altsvc-nghttp2
|
||||
volumes:
|
||||
- ./:/home
|
||||
links:
|
||||
- "altsvc-nghttp2:another2"
|
||||
- "aws:test.aws"
|
||||
entrypoint:
|
||||
/home/test/support/ci/build.sh
|
||||
entrypoint: /home/test/support/ci/build.sh
|
||||
|
||||
sshproxy:
|
||||
image: connesc/ssh-gateway
|
||||
@ -51,8 +49,6 @@ services:
|
||||
- ./test/support/ssh:/config
|
||||
depends_on:
|
||||
- nghttp2
|
||||
links:
|
||||
- "nghttp2:another"
|
||||
|
||||
socksproxy:
|
||||
image: qautomatron/docker-3proxy
|
||||
@ -61,8 +57,6 @@ services:
|
||||
- "3129:3129"
|
||||
volumes:
|
||||
- ./test/support/ci:/etc/3proxy
|
||||
links:
|
||||
- "nghttp2:another"
|
||||
|
||||
httpproxy:
|
||||
image: sameersbn/squid:3.5.27-2
|
||||
@ -72,56 +66,53 @@ services:
|
||||
- ./test/support/ci/squid/proxy.conf:/etc/squid/squid.conf
|
||||
- ./test/support/ci/squid/proxy-users-basic.txt:/etc/squid/proxy-users-basic.txt
|
||||
- ./test/support/ci/squid/proxy-users-digest.txt:/etc/squid/proxy-users-digest.txt
|
||||
links:
|
||||
- "nghttp2:another"
|
||||
command:
|
||||
-d 3
|
||||
command: -d 3
|
||||
|
||||
http2proxy:
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||
ports:
|
||||
- 3300:80
|
||||
depends_on:
|
||||
- httpproxy
|
||||
entrypoint:
|
||||
/usr/local/bin/nghttpx
|
||||
command:
|
||||
--no-ocsp --frontend '*,80;no-tls' --backend 'httpproxy,3128' --http2-proxy
|
||||
entrypoint: /usr/local/bin/nghttpx
|
||||
command: --no-ocsp --frontend '*,80;no-tls' --backend 'httpproxy,3128' --http2-proxy
|
||||
|
||||
nghttp2:
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||
ports:
|
||||
- 80:80
|
||||
- 443:443
|
||||
depends_on:
|
||||
- httpbin
|
||||
entrypoint:
|
||||
/usr/local/bin/nghttpx
|
||||
entrypoint: /usr/local/bin/nghttpx
|
||||
volumes:
|
||||
- ./test/support/ci:/home
|
||||
command:
|
||||
--conf /home/nghttp.conf --no-ocsp --frontend '*,80;no-tls' --frontend '*,443'
|
||||
command: --conf /home/nghttp.conf --no-ocsp --frontend '*,80;no-tls' --frontend '*,443'
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- another
|
||||
|
||||
altsvc-nghttp2:
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
||||
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||
ports:
|
||||
- 81:80
|
||||
- 444:443
|
||||
depends_on:
|
||||
- httpbin
|
||||
entrypoint:
|
||||
/usr/local/bin/nghttpx
|
||||
entrypoint: /usr/local/bin/nghttpx
|
||||
volumes:
|
||||
- ./test/support/ci:/home
|
||||
command:
|
||||
--conf /home/nghttp.conf --no-ocsp --frontend '*,80;no-tls' --frontend '*,443' --altsvc "h2,443,nghttp2"
|
||||
|
||||
command: --conf /home/nghttp.conf --no-ocsp --frontend '*,80;no-tls' --frontend '*,443' --altsvc "h2,443,nghttp2"
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- another2
|
||||
httpbin:
|
||||
environment:
|
||||
- DEBUG=True
|
||||
image: citizenstig/httpbin
|
||||
command:
|
||||
gunicorn --bind=0.0.0.0:8000 --workers=6 --access-logfile - --error-logfile - --log-level debug --capture-output httpbin:app
|
||||
command: gunicorn --bind=0.0.0.0:8000 --workers=6 --access-logfile - --error-logfile - --log-level debug --capture-output httpbin:app
|
||||
|
||||
aws:
|
||||
image: localstack/localstack
|
||||
@ -133,6 +124,10 @@ services:
|
||||
- 4566:4566
|
||||
volumes:
|
||||
- ./test/support/ci/aws:/docker-entrypoint-initaws.d
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- test.aws
|
||||
|
||||
ws-echo-server:
|
||||
environment:
|
||||
@ -146,4 +141,4 @@ services:
|
||||
environment:
|
||||
- AUTH_TYPE=Basic
|
||||
- USERNAME=user
|
||||
- PASSWORD=pass
|
||||
- PASSWORD=pass
|
||||
|
@ -1,11 +1,20 @@
|
||||
require "httpx"
|
||||
|
||||
URLS = %w[https://nghttp2.org/httpbin/get] * 1
|
||||
if ARGV.empty?
|
||||
URLS = %w[https://nghttp2.org/httpbin/get] * 1
|
||||
else
|
||||
URLS = ARGV
|
||||
end
|
||||
|
||||
responses = HTTPX.get(*URLS)
|
||||
Array(responses).each(&:raise_for_status)
|
||||
puts "Status: \n"
|
||||
puts Array(responses).map(&:status)
|
||||
puts "Payload: \n"
|
||||
puts Array(responses).map(&:to_s)
|
||||
|
||||
Array(responses).each do |res|
|
||||
puts "URI: #{res.uri}"
|
||||
case res
|
||||
when HTTPX::ErrorResponse
|
||||
puts "error: #{res.error}"
|
||||
puts res.error.backtrace
|
||||
else
|
||||
puts "STATUS: #{res.status}"
|
||||
puts res.to_s[0..2048]
|
||||
end
|
||||
end
|
||||
|
@ -17,23 +17,49 @@ end
|
||||
|
||||
Signal.trap("INFO") { print_status } unless ENV.key?("CI")
|
||||
|
||||
PAGES = (ARGV.first || 10).to_i
|
||||
|
||||
Thread.start do
|
||||
frontpage = HTTPX.get("https://news.ycombinator.com").to_s
|
||||
|
||||
html = Oga.parse_html(frontpage)
|
||||
|
||||
links = html.css('.itemlist a.storylink').map{|link| link.get('href') }
|
||||
|
||||
links = links.select {|l| l.start_with?("https") }
|
||||
|
||||
puts links
|
||||
|
||||
responses = HTTPX.get(*links)
|
||||
|
||||
links.each_with_index do |l, i|
|
||||
puts "#{responses[i].status}: #{l}"
|
||||
end
|
||||
page_links = []
|
||||
HTTPX.wrap do |http|
|
||||
PAGES.times.each do |i|
|
||||
frontpage = http.get("https://news.ycombinator.com?p=#{i+1}").to_s
|
||||
|
||||
html = Oga.parse_html(frontpage)
|
||||
|
||||
links = html.css('.athing .title a').map{|link| link.get('href') }.select { |link| URI(link).absolute? }
|
||||
|
||||
links = links.select {|l| l.start_with?("https") }
|
||||
|
||||
puts "for page #{i+1}: #{links.size} links"
|
||||
page_links.concat(links)
|
||||
end
|
||||
end
|
||||
|
||||
puts "requesting #{page_links.size} links:"
|
||||
responses = HTTPX.get(*page_links)
|
||||
|
||||
# page_links.each_with_index do |l, i|
|
||||
# puts "#{responses[i].status}: #{l}"
|
||||
# end
|
||||
|
||||
responses, error_responses = responses.partition { |r| r.is_a?(HTTPX::Response) }
|
||||
puts "#{responses.size} responses (from #{page_links.size})"
|
||||
puts "by group:"
|
||||
responses.group_by(&:status).each do |st, res|
|
||||
res.each do |r|
|
||||
puts "#{st}: #{r.uri}"
|
||||
end
|
||||
end unless responses.empty?
|
||||
|
||||
unless error_responses.empty?
|
||||
puts "error responses (#{error_responses.size})"
|
||||
error_responses.group_by{ |r| r.error.class }.each do |kl, res|
|
||||
res.each do |r|
|
||||
puts "#{r.uri}: #{r.error}"
|
||||
puts r.error.backtrace&.join("\n")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end.join
|
||||
|
||||
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
require "httpx"
|
||||
require "oga"
|
||||
|
||||
http = HTTPX.plugin(:compression).plugin(:persistent).with(timeout: { operation_timeut: 5, connect_timeout: 5})
|
||||
http = HTTPX.plugin(:persistent).with(timeout: { request_timeout: 5 })
|
||||
|
||||
PAGES = (ARGV.first || 10).to_i
|
||||
pages = PAGES.times.map do |page|
|
||||
@ -16,10 +16,11 @@ Array(http.get(*pages)).each_with_index.map do |response, i|
|
||||
end
|
||||
html = Oga.parse_html(response.to_s)
|
||||
# binding.irb
|
||||
page_links = html.css('.itemlist a.titlelink').map{|link| link.get('href') }
|
||||
page_links = html.css('.athing .title a').map{|link| link.get('href') }.select { |link| URI(link).absolute? }
|
||||
puts "page(#{i+1}): #{page_links.size}"
|
||||
if page_links.size == 0
|
||||
puts "error(#{response.status}) on page #{i+1}"
|
||||
next
|
||||
end
|
||||
# page_links.each do |link|
|
||||
# puts "link: #{link}"
|
||||
@ -31,6 +32,11 @@ end
|
||||
links = links.each_with_index do |pages, i|
|
||||
puts "Page: #{i+1}\t Links: #{pages.size}"
|
||||
pages.each do |page|
|
||||
puts "URL: #{page.uri} (#{page.status})"
|
||||
case page
|
||||
in status:
|
||||
puts "URL: #{page.uri} (#{status})"
|
||||
in error:
|
||||
puts "URL: #{page.uri} (#{error.message})"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -7,8 +7,8 @@
|
||||
#
|
||||
require "httpx"
|
||||
|
||||
URLS = %w[http://badipv4.test.ipv6friday.org/] * 1
|
||||
# URLS = %w[http://badipv6.test.ipv6friday.org/] * 1
|
||||
# URLS = %w[https://ipv4.test-ipv6.com] * 1
|
||||
URLS = %w[https://ipv6.test-ipv6.com] * 1
|
||||
|
||||
responses = HTTPX.get(*URLS, ssl: { verify_mode: OpenSSL::SSL::VERIFY_NONE})
|
||||
|
||||
|
@ -6,11 +6,9 @@ include HTTPX
|
||||
URLS = %w[http://nghttp2.org https://nghttp2.org/blog/]# * 3
|
||||
|
||||
client = HTTPX.plugin(:proxy)
|
||||
client = client.with_proxy(uri: "http://61.7.174.110:54132")
|
||||
responses = client.get(URLS)
|
||||
client = client.with_proxy(uri: "http://134.209.29.120:8080")
|
||||
responses = client.get(*URLS)
|
||||
puts responses.map(&:status)
|
||||
|
||||
# response = client.get(URLS.first)
|
||||
# puts response.status
|
||||
|
||||
|
||||
|
8
examples/resolv/addrinfo.rb
Normal file
8
examples/resolv/addrinfo.rb
Normal file
@ -0,0 +1,8 @@
|
||||
require "socket"
|
||||
|
||||
puts Process.pid
|
||||
sleep 10
|
||||
puts Addrinfo.getaddrinfo("www.google.com", 80).inspect
|
||||
sleep 10
|
||||
puts Addrinfo.getaddrinfo("www.google.com", 80).inspect
|
||||
sleep 60
|
40
examples/resolv/resolv_tcp.rb
Normal file
40
examples/resolv/resolv_tcp.rb
Normal file
@ -0,0 +1,40 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "resolv"
|
||||
require "httpx"
|
||||
|
||||
host = "127.0.0.11"
|
||||
port = 53
|
||||
|
||||
# srv_hostname = "aerserv-bc-us-east.bidswitch.net"
|
||||
record_type = Resolv::DNS::Resource::IN::A
|
||||
|
||||
# # addresses = nil
|
||||
# # Resolv::DNS.open(nameserver: host) do |dns|
|
||||
# # require "pry-byebug"; binding.pry
|
||||
# # addresses = dns.getresources(srv_hostname, record_type)
|
||||
# # end
|
||||
|
||||
# message_id = 1
|
||||
# buffer = HTTPX::Resolver.encode_dns_query(srv_hostname, type: record_type, message_id: message_id)
|
||||
|
||||
# io = TCPSocket.new(host, port)
|
||||
# buffer[0, 2] = [buffer.size, message_id].pack("nn")
|
||||
# io.write(buffer.to_s)
|
||||
# data, _ = io.readpartial(2048)
|
||||
# size = data[0, 2].unpack1("n")
|
||||
# answer = data[2..-1]
|
||||
# answer << io.readpartial(size) if size > answer.bytesize
|
||||
|
||||
# addresses = HTTPX::Resolver.decode_dns_answer(answer)
|
||||
|
||||
# puts "(#{srv_hostname}) addresses: #{addresses}"
|
||||
|
||||
srv_hostname = "www.sfjewjfwigiewpgwwg-native-1.com"
|
||||
socket = UDPSocket.new
|
||||
buffer = HTTPX::Resolver.encode_dns_query(srv_hostname, type: record_type)
|
||||
socket.send(buffer.to_s, 0, host, port)
|
||||
recv, _ = socket.recvfrom(512)
|
||||
puts "received #{recv.bytesize} bytes..."
|
||||
addresses = HTTPX::Resolver.decode_dns_answer(recv)
|
||||
puts "(#{srv_hostname}) addresses: #{addresses}"
|
23
examples/resolv/srv_record.rb
Normal file
23
examples/resolv/srv_record.rb
Normal file
@ -0,0 +1,23 @@
|
||||
require "httpx"
|
||||
|
||||
host = "1.1.1.1"
|
||||
port = 53
|
||||
|
||||
hostname = "google.com"
|
||||
srv_hostname = "_https._tcp.#{hostname}"
|
||||
record_type = Resolv::DNS::Resource::IN::SRV
|
||||
|
||||
addresses = nil
|
||||
Resolv::DNS.open(nameserver: host) do |dns|
|
||||
addresses = dns.getresources(srv_hostname, record_type)
|
||||
end
|
||||
|
||||
# buffer = HTTPX::Resolver.encode_dns_query(hostname, type: record_type)
|
||||
|
||||
# io = UDPSocket.new(Socket::AF_INET)
|
||||
# size = io.send(buffer.to_s, 0, Socket.sockaddr_in(port, host.to_s))
|
||||
# data, _ = io.recvfrom(2048)
|
||||
|
||||
# addresses = HTTPX::Resolver.decode_dns_answer(data)
|
||||
|
||||
puts "(#{hostname}) addresses: #{addresses}"
|
@ -20,10 +20,10 @@ Gem::Specification.new do |gem|
|
||||
|
||||
gem.metadata = {
|
||||
"bug_tracker_uri" => "https://gitlab.com/os85/httpx/issues",
|
||||
"changelog_uri" => "https://os85.gitlab.io/httpx/#release-notes",
|
||||
"documentation_uri" => "https://os85.gitlab.io/httpx/rdoc/",
|
||||
"changelog_uri" => "https://honeyryderchuck.gitlab.io/httpx/#release-notes",
|
||||
"documentation_uri" => "https://honeyryderchuck.gitlab.io/httpx/rdoc/",
|
||||
"source_code_uri" => "https://gitlab.com/os85/httpx",
|
||||
"homepage_uri" => "https://os85.gitlab.io/httpx/",
|
||||
"homepage_uri" => "https://honeyryderchuck.gitlab.io/httpx/",
|
||||
"rubygems_mfa_required" => "true",
|
||||
}
|
||||
|
||||
@ -32,5 +32,7 @@ Gem::Specification.new do |gem|
|
||||
|
||||
gem.require_paths = ["lib"]
|
||||
|
||||
gem.add_runtime_dependency "http-2-next", ">= 0.4.1"
|
||||
gem.add_runtime_dependency "http-2", ">= 1.0.0"
|
||||
|
||||
gem.required_ruby_version = ">= 2.7.0"
|
||||
end
|
||||
|
@ -1,3 +1,3 @@
|
||||
# Integration
|
||||
|
||||
This section is to test certain cases where we can't reliably reproduce in our test environments, but can be ran locally.
|
||||
This section is to test certain cases where we can't reliably reproduce in our test environments, but can be ran locally.
|
133
integration_tests/datadog_helpers.rb
Normal file
133
integration_tests/datadog_helpers.rb
Normal file
@ -0,0 +1,133 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module DatadogHelpers
|
||||
DATADOG_VERSION = defined?(DDTrace) ? DDTrace::VERSION : Datadog::VERSION
|
||||
ERROR_TAG = if Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("1.8.0")
|
||||
"error.message"
|
||||
else
|
||||
"error.msg"
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def verify_instrumented_request(status, verb:, uri:, span: fetch_spans.first, service: datadog_service_name.to_s, error: nil)
|
||||
if Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("2.0.0")
|
||||
assert span.type == "http"
|
||||
else
|
||||
assert span.span_type == "http"
|
||||
end
|
||||
assert span.name == "#{datadog_service_name}.request"
|
||||
assert span.service == service
|
||||
|
||||
assert span.get_tag("out.host") == uri.host
|
||||
assert span.get_tag("out.port") == 80
|
||||
assert span.get_tag("http.method") == verb
|
||||
assert span.get_tag("http.url") == uri.path
|
||||
|
||||
if status && status >= 400
|
||||
verify_http_error_span(span, status, error)
|
||||
elsif error
|
||||
verify_error_span(span)
|
||||
else
|
||||
assert span.status.zero?
|
||||
assert span.get_tag("http.status_code") == status.to_s
|
||||
# peer service
|
||||
# assert span.get_tag("peer.service") == span.service
|
||||
end
|
||||
end
|
||||
|
||||
def verify_http_error_span(span, status, error)
|
||||
assert span.get_tag("http.status_code") == status.to_s
|
||||
assert span.get_tag("error.type") == error
|
||||
assert !span.get_tag(ERROR_TAG).nil?
|
||||
assert span.status == 1
|
||||
end
|
||||
|
||||
def verify_error_span(span)
|
||||
assert span.get_tag("error.type") == "HTTPX::NativeResolveError"
|
||||
assert !span.get_tag(ERROR_TAG).nil?
|
||||
assert span.status == 1
|
||||
end
|
||||
|
||||
def verify_no_distributed_headers(request_headers)
|
||||
assert !request_headers.key?("x-datadog-parent-id")
|
||||
assert !request_headers.key?("x-datadog-trace-id")
|
||||
assert !request_headers.key?("x-datadog-sampling-priority")
|
||||
end
|
||||
|
||||
def verify_distributed_headers(request_headers, span: fetch_spans.first, sampling_priority: 1)
|
||||
if Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("2.0.0")
|
||||
assert request_headers["x-datadog-parent-id"] == span.id.to_s
|
||||
else
|
||||
assert request_headers["x-datadog-parent-id"] == span.span_id.to_s
|
||||
end
|
||||
assert request_headers["x-datadog-trace-id"] == trace_id(span)
|
||||
assert request_headers["x-datadog-sampling-priority"] == sampling_priority.to_s
|
||||
end
|
||||
|
||||
if Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("1.17.0")
|
||||
def trace_id(span)
|
||||
Datadog::Tracing::Utils::TraceId.to_low_order(span.trace_id).to_s
|
||||
end
|
||||
else
|
||||
def trace_id(span)
|
||||
span.trace_id.to_s
|
||||
end
|
||||
end
|
||||
|
||||
def verify_analytics_headers(span, sample_rate: nil)
|
||||
assert span.get_metric("_dd1.sr.eausr") == sample_rate
|
||||
end
|
||||
|
||||
def set_datadog(options = {}, &blk)
|
||||
Datadog.configure do |c|
|
||||
c.tracing.instrument(datadog_service_name, options, &blk)
|
||||
end
|
||||
|
||||
tracer # initialize tracer patches
|
||||
end
|
||||
|
||||
def tracer
|
||||
@tracer ||= begin
|
||||
tr = Datadog::Tracing.send(:tracer)
|
||||
def tr.write(trace)
|
||||
@traces ||= []
|
||||
@traces << trace
|
||||
end
|
||||
tr
|
||||
end
|
||||
end
|
||||
|
||||
def trace_with_sampling_priority(priority)
|
||||
tracer.trace("foo.bar") do
|
||||
tracer.active_trace.sampling_priority = priority
|
||||
yield
|
||||
end
|
||||
end
|
||||
|
||||
# Returns spans and caches it (similar to +let(:spans)+).
|
||||
def spans
|
||||
@spans ||= fetch_spans
|
||||
end
|
||||
|
||||
# Retrieves and sorts all spans in the current tracer instance.
|
||||
# This method does not cache its results.
|
||||
def fetch_spans
|
||||
spans = (tracer.instance_variable_get(:@traces) || []).map(&:spans)
|
||||
spans.flatten.sort! do |a, b|
|
||||
if a.name == b.name
|
||||
if a.resource == b.resource
|
||||
if a.start_time == b.start_time
|
||||
a.end_time <=> b.end_time
|
||||
else
|
||||
a.start_time <=> b.start_time
|
||||
end
|
||||
else
|
||||
a.resource <=> b.resource
|
||||
end
|
||||
else
|
||||
a.name <=> b.name
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -1,51 +1,60 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "ddtrace"
|
||||
begin
|
||||
# upcoming 2.0
|
||||
require "datadog"
|
||||
rescue LoadError
|
||||
require "ddtrace"
|
||||
end
|
||||
|
||||
require "test_helper"
|
||||
require "support/http_helpers"
|
||||
require "httpx/adapters/datadog"
|
||||
require_relative "datadog_helpers"
|
||||
|
||||
class DatadogTest < Minitest::Test
|
||||
include HTTPHelpers
|
||||
include DatadogHelpers
|
||||
|
||||
def test_datadog_successful_get_request
|
||||
set_datadog
|
||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
||||
uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||
|
||||
response = HTTPX.get(uri)
|
||||
verify_status(response, 200)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
verify_instrumented_request(response, verb: "GET", uri: uri)
|
||||
verify_distributed_headers(response)
|
||||
verify_instrumented_request(response.status, verb: "GET", uri: uri)
|
||||
verify_distributed_headers(request_headers(response))
|
||||
end
|
||||
|
||||
def test_datadog_successful_post_request
|
||||
set_datadog
|
||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
||||
uri = URI(build_uri("/post", "http://#{httpbin}"))
|
||||
|
||||
response = HTTPX.post(uri, body: "bla")
|
||||
verify_status(response, 200)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
verify_instrumented_request(response, verb: "POST", uri: uri)
|
||||
verify_distributed_headers(response)
|
||||
verify_instrumented_request(response.status, verb: "POST", uri: uri)
|
||||
verify_distributed_headers(request_headers(response))
|
||||
end
|
||||
|
||||
def test_datadog_successful_multiple_requests
|
||||
set_datadog
|
||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
||||
get_uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||
post_uri = URI(build_uri("/post", "http://#{httpbin}"))
|
||||
|
||||
get_response, post_response = HTTPX.request([["GET", uri], ["POST", uri]])
|
||||
get_response, post_response = HTTPX.request([["GET", get_uri], ["POST", post_uri]])
|
||||
verify_status(get_response, 200)
|
||||
verify_status(post_response, 200)
|
||||
|
||||
assert fetch_spans.size == 2, "expected to have 2 spans"
|
||||
get_span, post_span = fetch_spans
|
||||
verify_instrumented_request(get_response, span: get_span, verb: "GET", uri: uri)
|
||||
verify_instrumented_request(post_response, span: post_span, verb: "POST", uri: uri)
|
||||
verify_distributed_headers(get_response, span: get_span)
|
||||
verify_distributed_headers(post_response, span: post_span)
|
||||
verify_instrumented_request(get_response.status, span: get_span, verb: "GET", uri: get_uri)
|
||||
verify_instrumented_request(post_response.status, span: post_span, verb: "POST", uri: post_uri)
|
||||
verify_distributed_headers(request_headers(get_response), span: get_span)
|
||||
verify_distributed_headers(request_headers(post_response), span: post_span)
|
||||
verify_analytics_headers(get_span)
|
||||
verify_analytics_headers(post_span)
|
||||
end
|
||||
@ -58,8 +67,7 @@ class DatadogTest < Minitest::Test
|
||||
verify_status(response, 500)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
verify_instrumented_request(response, verb: "GET", uri: uri)
|
||||
verify_distributed_headers(response)
|
||||
verify_instrumented_request(response.status, verb: "GET", uri: uri, error: "HTTPX::HTTPError")
|
||||
end
|
||||
|
||||
def test_datadog_client_error_request
|
||||
@ -70,8 +78,7 @@ class DatadogTest < Minitest::Test
|
||||
verify_status(response, 404)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
verify_instrumented_request(response, verb: "GET", uri: uri)
|
||||
verify_distributed_headers(response)
|
||||
verify_instrumented_request(response.status, verb: "GET", uri: uri, error: "HTTPX::HTTPError")
|
||||
end
|
||||
|
||||
def test_datadog_some_other_error
|
||||
@ -82,12 +89,11 @@ class DatadogTest < Minitest::Test
|
||||
assert response.is_a?(HTTPX::ErrorResponse), "response should contain errors"
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
verify_instrumented_request(response, verb: "GET", uri: uri, error: "HTTPX::NativeResolveError")
|
||||
verify_distributed_headers(response)
|
||||
verify_instrumented_request(nil, verb: "GET", uri: uri, error: "HTTPX::NativeResolveError")
|
||||
end
|
||||
|
||||
def test_datadog_host_config
|
||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
||||
uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||
set_datadog(describe: /#{uri.host}/) do |http|
|
||||
http.service_name = "httpbin"
|
||||
http.split_by_domain = false
|
||||
@ -97,12 +103,12 @@ class DatadogTest < Minitest::Test
|
||||
verify_status(response, 200)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
verify_instrumented_request(response, service: "httpbin", verb: "GET", uri: uri)
|
||||
verify_distributed_headers(response)
|
||||
verify_instrumented_request(response.status, service: "httpbin", verb: "GET", uri: uri)
|
||||
verify_distributed_headers(request_headers(response))
|
||||
end
|
||||
|
||||
def test_datadog_split_by_domain
|
||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
||||
uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||
set_datadog do |http|
|
||||
http.split_by_domain = true
|
||||
end
|
||||
@ -111,13 +117,13 @@ class DatadogTest < Minitest::Test
|
||||
verify_status(response, 200)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
verify_instrumented_request(response, service: uri.host, verb: "GET", uri: uri)
|
||||
verify_distributed_headers(response)
|
||||
verify_instrumented_request(response.status, service: uri.host, verb: "GET", uri: uri)
|
||||
verify_distributed_headers(request_headers(response))
|
||||
end
|
||||
|
||||
def test_datadog_distributed_headers_disabled
|
||||
set_datadog(distributed_tracing: false)
|
||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
||||
uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||
|
||||
sampling_priority = 10
|
||||
response = trace_with_sampling_priority(sampling_priority) do
|
||||
@ -127,14 +133,14 @@ class DatadogTest < Minitest::Test
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
span = fetch_spans.last
|
||||
verify_instrumented_request(response, span: span, verb: "GET", uri: uri)
|
||||
verify_no_distributed_headers(response)
|
||||
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||
verify_no_distributed_headers(request_headers(response))
|
||||
verify_analytics_headers(span)
|
||||
end
|
||||
|
||||
def test_datadog_distributed_headers_sampling_priority
|
||||
set_datadog
|
||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
||||
uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||
|
||||
sampling_priority = 10
|
||||
response = trace_with_sampling_priority(sampling_priority) do
|
||||
@ -145,37 +151,51 @@ class DatadogTest < Minitest::Test
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
span = fetch_spans.last
|
||||
verify_instrumented_request(response, span: span, verb: "GET", uri: uri)
|
||||
verify_distributed_headers(response, span: span, sampling_priority: sampling_priority)
|
||||
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||
verify_distributed_headers(request_headers(response), span: span, sampling_priority: sampling_priority)
|
||||
verify_analytics_headers(span)
|
||||
end
|
||||
|
||||
def test_datadog_analytics_enabled
|
||||
set_datadog(analytics_enabled: true)
|
||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
||||
uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||
|
||||
response = HTTPX.get(uri)
|
||||
verify_status(response, 200)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
span = fetch_spans.last
|
||||
verify_instrumented_request(response, span: span, verb: "GET", uri: uri)
|
||||
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||
verify_analytics_headers(span, sample_rate: 1.0)
|
||||
end
|
||||
|
||||
def test_datadog_analytics_sample_rate
|
||||
set_datadog(analytics_enabled: true, analytics_sample_rate: 0.5)
|
||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
||||
uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||
|
||||
response = HTTPX.get(uri)
|
||||
verify_status(response, 200)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
span = fetch_spans.last
|
||||
verify_instrumented_request(response, span: span, verb: "GET", uri: uri)
|
||||
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||
verify_analytics_headers(span, sample_rate: 0.5)
|
||||
end
|
||||
|
||||
def test_datadog_per_request_span_with_retries
|
||||
set_datadog
|
||||
uri = URI(build_uri("/status/404", "http://#{httpbin}"))
|
||||
|
||||
http = HTTPX.plugin(:retries, max_retries: 2, retry_on: ->(r) { r.status == 404 })
|
||||
response = http.get(uri)
|
||||
verify_status(response, 404)
|
||||
|
||||
assert fetch_spans.size == 3, "expected to 3 spans"
|
||||
fetch_spans.each do |span|
|
||||
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri, error: "HTTPX::HTTPError")
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def setup
|
||||
@ -186,144 +206,15 @@ class DatadogTest < Minitest::Test
|
||||
def teardown
|
||||
super
|
||||
Datadog.registry[:httpx].reset_configuration!
|
||||
Datadog.configuration.tracing[:httpx].enabled = false
|
||||
end
|
||||
|
||||
def verify_instrumented_request(response, verb:, uri:, span: fetch_spans.first, service: "httpx", error: nil)
|
||||
assert span.span_type == "http"
|
||||
assert span.name == "httpx.request"
|
||||
assert span.service == service
|
||||
|
||||
assert span.get_tag("out.host") == uri.host
|
||||
assert span.get_tag("out.port") == "80"
|
||||
assert span.get_tag("http.method") == verb
|
||||
assert span.get_tag("http.url") == uri.path
|
||||
|
||||
error_tag = if defined?(::DDTrace) && Gem::Version.new(::DDTrace::VERSION::STRING) >= Gem::Version.new("1.8.0")
|
||||
"error.message"
|
||||
else
|
||||
"error.msg"
|
||||
end
|
||||
|
||||
if error
|
||||
assert span.get_tag("error.type") == "HTTPX::NativeResolveError"
|
||||
assert !span.get_tag(error_tag).nil?
|
||||
assert span.status == 1
|
||||
elsif response.status >= 400
|
||||
assert span.get_tag("http.status_code") == response.status.to_s
|
||||
assert span.get_tag("error.type") == "HTTPX::HTTPError"
|
||||
assert !span.get_tag(error_tag).nil?
|
||||
assert span.status == 1
|
||||
else
|
||||
assert span.status.zero?
|
||||
assert span.get_tag("http.status_code") == response.status.to_s
|
||||
# peer service
|
||||
assert span.get_tag("peer.service") == span.service
|
||||
end
|
||||
def datadog_service_name
|
||||
:httpx
|
||||
end
|
||||
|
||||
def verify_no_distributed_headers(response)
|
||||
request = response.instance_variable_get(:@request)
|
||||
|
||||
assert !request.headers.key?("x-datadog-parent-id")
|
||||
assert !request.headers.key?("x-datadog-trace-id")
|
||||
assert !request.headers.key?("x-datadog-sampling-priority")
|
||||
end
|
||||
|
||||
def verify_distributed_headers(response, span: fetch_spans.first, sampling_priority: 1)
|
||||
request = response.instance_variable_get(:@request)
|
||||
|
||||
assert request.headers["x-datadog-parent-id"] == span.span_id.to_s
|
||||
assert request.headers["x-datadog-trace-id"] == span.trace_id.to_s
|
||||
assert request.headers["x-datadog-sampling-priority"] == sampling_priority.to_s
|
||||
end
|
||||
|
||||
def verify_analytics_headers(span, sample_rate: nil)
|
||||
assert span.get_metric("_dd1.sr.eausr") == sample_rate
|
||||
end
|
||||
|
||||
if defined?(::DDTrace) && Gem::Version.new(::DDTrace::VERSION::STRING) >= Gem::Version.new("1.0.0")
|
||||
|
||||
def set_datadog(options = {}, &blk)
|
||||
Datadog.configure do |c|
|
||||
c.tracing.instrument(:httpx, options, &blk)
|
||||
end
|
||||
|
||||
tracer # initialize tracer patches
|
||||
end
|
||||
|
||||
def tracer
|
||||
@tracer ||= begin
|
||||
tr = Datadog::Tracing.send(:tracer)
|
||||
def tr.write(trace)
|
||||
@traces ||= []
|
||||
@traces << trace
|
||||
end
|
||||
tr
|
||||
end
|
||||
end
|
||||
|
||||
def trace_with_sampling_priority(priority)
|
||||
tracer.trace("foo.bar") do
|
||||
tracer.active_trace.sampling_priority = priority
|
||||
yield
|
||||
end
|
||||
end
|
||||
else
|
||||
|
||||
def set_datadog(options = {}, &blk)
|
||||
Datadog.configure do |c|
|
||||
c.use(:httpx, options, &blk)
|
||||
end
|
||||
|
||||
tracer # initialize tracer patches
|
||||
end
|
||||
|
||||
def tracer
|
||||
@tracer ||= begin
|
||||
tr = Datadog.tracer
|
||||
def tr.write(trace)
|
||||
@spans ||= []
|
||||
@spans << trace
|
||||
end
|
||||
tr
|
||||
end
|
||||
end
|
||||
|
||||
def trace_with_sampling_priority(priority)
|
||||
tracer.trace("foo.bar") do |span|
|
||||
span.context.sampling_priority = priority
|
||||
yield
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Returns spans and caches it (similar to +let(:spans)+).
|
||||
def spans
|
||||
@spans ||= fetch_spans
|
||||
end
|
||||
|
||||
# Retrieves and sorts all spans in the current tracer instance.
|
||||
# This method does not cache its results.
|
||||
def fetch_spans
|
||||
spans = if defined?(::DDTrace) && Gem::Version.new(::DDTrace::VERSION::STRING) >= Gem::Version.new("1.0.0")
|
||||
(tracer.instance_variable_get(:@traces) || []).map(&:spans)
|
||||
else
|
||||
tracer.instance_variable_get(:@spans) || []
|
||||
end
|
||||
spans.flatten.sort! do |a, b|
|
||||
if a.name == b.name
|
||||
if a.resource == b.resource
|
||||
if a.start_time == b.start_time
|
||||
a.end_time <=> b.end_time
|
||||
else
|
||||
a.start_time <=> b.start_time
|
||||
end
|
||||
else
|
||||
a.resource <=> b.resource
|
||||
end
|
||||
else
|
||||
a.name <=> b.name
|
||||
end
|
||||
end
|
||||
def request_headers(response)
|
||||
body = json_body(response)
|
||||
body["headers"].transform_keys(&:downcase)
|
||||
end
|
||||
end
|
||||
|
198
integration_tests/faraday_datadog_test.rb
Normal file
198
integration_tests/faraday_datadog_test.rb
Normal file
@ -0,0 +1,198 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
begin
|
||||
# upcoming 2.0
|
||||
require "datadog"
|
||||
rescue LoadError
|
||||
require "ddtrace"
|
||||
end
|
||||
|
||||
require "test_helper"
|
||||
require "support/http_helpers"
|
||||
require "httpx/adapters/faraday"
|
||||
require_relative "datadog_helpers"
|
||||
|
||||
DATADOG_VERSION = defined?(DDTrace) ? DDTrace::VERSION : Datadog::VERSION
|
||||
|
||||
class FaradayDatadogTest < Minitest::Test
|
||||
include HTTPHelpers
|
||||
include DatadogHelpers
|
||||
include FaradayHelpers
|
||||
|
||||
def test_faraday_datadog_successful_get_request
|
||||
set_datadog
|
||||
uri = URI(build_uri("/status/200"))
|
||||
|
||||
response = faraday_connection.get(uri)
|
||||
verify_status(response, 200)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
verify_instrumented_request(response.status, verb: "GET", uri: uri)
|
||||
verify_distributed_headers(request_headers(response))
|
||||
end
|
||||
|
||||
def test_faraday_datadog_successful_post_request
|
||||
set_datadog
|
||||
uri = URI(build_uri("/status/200"))
|
||||
|
||||
response = faraday_connection.post(uri, "bla")
|
||||
verify_status(response, 200)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
verify_instrumented_request(response.status, verb: "POST", uri: uri)
|
||||
verify_distributed_headers(request_headers(response))
|
||||
end
|
||||
|
||||
def test_faraday_datadog_server_error_request
|
||||
set_datadog
|
||||
uri = URI(build_uri("/status/500"))
|
||||
|
||||
ex = assert_raises(Faraday::ServerError) do
|
||||
faraday_connection.tap do |conn|
|
||||
adapter_handler = conn.builder.handlers.last
|
||||
conn.builder.insert_before adapter_handler, Faraday::Response::RaiseError
|
||||
end.get(uri)
|
||||
end
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
verify_instrumented_request(ex.response[:status], verb: "GET", uri: uri, error: "Error 500")
|
||||
|
||||
verify_distributed_headers(request_headers(ex.response))
|
||||
end
|
||||
|
||||
def test_faraday_datadog_client_error_request
|
||||
set_datadog
|
||||
uri = URI(build_uri("/status/404"))
|
||||
|
||||
ex = assert_raises(Faraday::ResourceNotFound) do
|
||||
faraday_connection.tap do |conn|
|
||||
adapter_handler = conn.builder.handlers.last
|
||||
conn.builder.insert_before adapter_handler, Faraday::Response::RaiseError
|
||||
end.get(uri)
|
||||
end
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
verify_instrumented_request(ex.response[:status], verb: "GET", uri: uri, error: "Error 404")
|
||||
verify_distributed_headers(request_headers(ex.response))
|
||||
end
|
||||
|
||||
def test_faraday_datadog_some_other_error
|
||||
set_datadog
|
||||
uri = URI("http://unexisting/")
|
||||
|
||||
assert_raises(HTTPX::NativeResolveError) { faraday_connection.get(uri) }
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
verify_instrumented_request(nil, verb: "GET", uri: uri, error: "HTTPX::NativeResolveError")
|
||||
end
|
||||
|
||||
def test_faraday_datadog_host_config
|
||||
uri = URI(build_uri("/status/200"))
|
||||
set_datadog(describe: /#{uri.host}/) do |http|
|
||||
http.service_name = "httpbin"
|
||||
http.split_by_domain = false
|
||||
end
|
||||
|
||||
response = faraday_connection.get(uri)
|
||||
verify_status(response, 200)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
verify_instrumented_request(response.status, service: "httpbin", verb: "GET", uri: uri)
|
||||
verify_distributed_headers(request_headers(response))
|
||||
end
|
||||
|
||||
def test_faraday_datadog_split_by_domain
|
||||
uri = URI(build_uri("/status/200"))
|
||||
set_datadog do |http|
|
||||
http.split_by_domain = true
|
||||
end
|
||||
|
||||
response = faraday_connection.get(uri)
|
||||
verify_status(response, 200)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
verify_instrumented_request(response.status, service: uri.host, verb: "GET", uri: uri)
|
||||
verify_distributed_headers(request_headers(response))
|
||||
end
|
||||
|
||||
def test_faraday_datadog_distributed_headers_disabled
|
||||
set_datadog(distributed_tracing: false)
|
||||
uri = URI(build_uri("/status/200"))
|
||||
|
||||
sampling_priority = 10
|
||||
response = trace_with_sampling_priority(sampling_priority) do
|
||||
faraday_connection.get(uri)
|
||||
end
|
||||
verify_status(response, 200)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
span = fetch_spans.last
|
||||
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||
verify_no_distributed_headers(request_headers(response))
|
||||
verify_analytics_headers(span)
|
||||
end unless ENV.key?("CI") # TODO: https://github.com/DataDog/dd-trace-rb/issues/4308
|
||||
|
||||
def test_faraday_datadog_distributed_headers_sampling_priority
|
||||
set_datadog
|
||||
uri = URI(build_uri("/status/200"))
|
||||
|
||||
sampling_priority = 10
|
||||
response = trace_with_sampling_priority(sampling_priority) do
|
||||
faraday_connection.get(uri)
|
||||
end
|
||||
|
||||
verify_status(response, 200)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
span = fetch_spans.last
|
||||
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||
verify_distributed_headers(request_headers(response), span: span, sampling_priority: sampling_priority)
|
||||
verify_analytics_headers(span)
|
||||
end unless ENV.key?("CI") # TODO: https://github.com/DataDog/dd-trace-rb/issues/4308
|
||||
|
||||
def test_faraday_datadog_analytics_enabled
|
||||
set_datadog(analytics_enabled: true)
|
||||
uri = URI(build_uri("/status/200"))
|
||||
|
||||
response = faraday_connection.get(uri)
|
||||
verify_status(response, 200)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
span = fetch_spans.last
|
||||
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||
verify_analytics_headers(span, sample_rate: 1.0)
|
||||
end
|
||||
|
||||
def test_faraday_datadog_analytics_sample_rate
|
||||
set_datadog(analytics_enabled: true, analytics_sample_rate: 0.5)
|
||||
uri = URI(build_uri("/status/200"))
|
||||
|
||||
response = faraday_connection.get(uri)
|
||||
verify_status(response, 200)
|
||||
|
||||
assert !fetch_spans.empty?, "expected to have spans"
|
||||
span = fetch_spans.last
|
||||
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||
verify_analytics_headers(span, sample_rate: 0.5)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def setup
|
||||
super
|
||||
Datadog.registry[:faraday].reset_configuration!
|
||||
end
|
||||
|
||||
def teardown
|
||||
super
|
||||
Datadog.registry[:faraday].reset_configuration!
|
||||
end
|
||||
|
||||
def datadog_service_name
|
||||
:faraday
|
||||
end
|
||||
|
||||
def origin(orig = httpbin)
|
||||
"http://#{orig}"
|
||||
end
|
||||
end
|
@ -1,150 +1,149 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
if RUBY_VERSION >= "2.4.0"
|
||||
require "logger"
|
||||
require "stringio"
|
||||
require "sentry-ruby"
|
||||
require "test_helper"
|
||||
require "support/http_helpers"
|
||||
require "httpx/adapters/sentry"
|
||||
require "logger"
|
||||
require "stringio"
|
||||
require "sentry-ruby"
|
||||
require "test_helper"
|
||||
require "support/http_helpers"
|
||||
require "httpx/adapters/sentry"
|
||||
|
||||
class SentryTest < Minitest::Test
|
||||
include HTTPHelpers
|
||||
class SentryTest < Minitest::Test
|
||||
include HTTPHelpers
|
||||
|
||||
DUMMY_DSN = "http://12345:67890@sentry.localdomain/sentry/42"
|
||||
DUMMY_DSN = "http://12345:67890@sentry.localdomain/sentry/42"
|
||||
|
||||
def test_sentry_send_yes_pii
|
||||
before_pii = Sentry.configuration.send_default_pii
|
||||
begin
|
||||
Sentry.configuration.send_default_pii = true
|
||||
def test_sentry_send_yes_pii
|
||||
before_pii = Sentry.configuration.send_default_pii
|
||||
begin
|
||||
Sentry.configuration.send_default_pii = true
|
||||
|
||||
transaction = Sentry.start_transaction
|
||||
Sentry.get_current_scope.set_span(transaction)
|
||||
|
||||
uri = build_uri("/get")
|
||||
|
||||
response = HTTPX.get(uri, params: { "foo" => "bar" })
|
||||
|
||||
verify_status(response, 200)
|
||||
verify_spans(transaction, response, description: "GET #{uri}?foo=bar")
|
||||
crumb = Sentry.get_current_scope.breadcrumbs.peek
|
||||
assert crumb.category == "httpx"
|
||||
assert crumb.data == { status: 200, method: "GET", url: "#{uri}?foo=bar" }
|
||||
ensure
|
||||
Sentry.configuration.send_default_pii = before_pii
|
||||
end
|
||||
end
|
||||
|
||||
def test_sentry_send_no_pii
|
||||
before_pii = Sentry.configuration.send_default_pii
|
||||
begin
|
||||
Sentry.configuration.send_default_pii = false
|
||||
|
||||
transaction = Sentry.start_transaction
|
||||
Sentry.get_current_scope.set_span(transaction)
|
||||
|
||||
uri = build_uri("/get")
|
||||
|
||||
response = HTTPX.get(uri, params: { "foo" => "bar" })
|
||||
|
||||
verify_status(response, 200)
|
||||
verify_spans(transaction, response, description: "GET #{uri}")
|
||||
crumb = Sentry.get_current_scope.breadcrumbs.peek
|
||||
assert crumb.category == "httpx"
|
||||
assert crumb.data == { status: 200, method: "GET", url: uri }
|
||||
ensure
|
||||
Sentry.configuration.send_default_pii = before_pii
|
||||
end
|
||||
end
|
||||
|
||||
def test_sentry_post_request
|
||||
before_pii = Sentry.configuration.send_default_pii
|
||||
begin
|
||||
Sentry.configuration.send_default_pii = true
|
||||
transaction = Sentry.start_transaction
|
||||
Sentry.get_current_scope.set_span(transaction)
|
||||
|
||||
uri = build_uri("/post")
|
||||
response = HTTPX.post(uri, form: { foo: "bar" })
|
||||
verify_status(response, 200)
|
||||
verify_spans(transaction, response, verb: "POST")
|
||||
|
||||
crumb = Sentry.get_current_scope.breadcrumbs.peek
|
||||
assert crumb.category == "httpx"
|
||||
assert crumb.data == { status: 200, method: "POST", url: uri, body: "foo=bar" }
|
||||
ensure
|
||||
Sentry.configuration.send_default_pii = before_pii
|
||||
end
|
||||
end
|
||||
|
||||
def test_sentry_multiple_requests
|
||||
transaction = Sentry.start_transaction
|
||||
Sentry.get_current_scope.set_span(transaction)
|
||||
|
||||
responses = HTTPX.get(build_uri("/status/200"), build_uri("/status/404"))
|
||||
verify_status(responses[0], 200)
|
||||
verify_status(responses[1], 404)
|
||||
verify_spans(transaction, *responses)
|
||||
end
|
||||
uri = build_uri("/get")
|
||||
|
||||
def test_sentry_server_error_request
|
||||
transaction = Sentry.start_transaction
|
||||
Sentry.get_current_scope.set_span(transaction)
|
||||
response = HTTPX.get(uri, params: { "foo" => "bar" })
|
||||
|
||||
uri = URI("http://unexisting/")
|
||||
|
||||
response = HTTPX.get(uri)
|
||||
|
||||
verify_error_response(response, /name or service not known/)
|
||||
assert response.is_a?(HTTPX::ErrorResponse), "response should contain errors"
|
||||
verify_spans(transaction, response, verb: "GET")
|
||||
verify_status(response, 200)
|
||||
verify_spans(transaction, response, description: "GET #{uri}?foo=bar")
|
||||
crumb = Sentry.get_current_scope.breadcrumbs.peek
|
||||
assert crumb.category == "httpx"
|
||||
assert crumb.data == { error: "name or service not known (unexisting)", method: "GET", url: uri.to_s }
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def verify_spans(transaction, *responses, verb: nil, description: nil)
|
||||
assert transaction.span_recorder.spans.count == responses.size + 1
|
||||
assert transaction.span_recorder.spans[0] == transaction
|
||||
|
||||
response_spans = transaction.span_recorder.spans[1..-1]
|
||||
|
||||
responses.each_with_index do |response, idx|
|
||||
request_span = response_spans[idx]
|
||||
assert request_span.op == "httpx.client"
|
||||
assert !request_span.start_timestamp.nil?
|
||||
assert !request_span.timestamp.nil?
|
||||
assert request_span.start_timestamp != request_span.timestamp
|
||||
assert request_span.description == (description || "#{verb || "GET"} #{response.uri}")
|
||||
if response.is_a?(HTTPX::ErrorResponse)
|
||||
assert request_span.data == { error: response.error.message }
|
||||
else
|
||||
assert request_span.data == { status: response.status }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def setup
|
||||
super
|
||||
|
||||
mock_io = StringIO.new
|
||||
mock_logger = Logger.new(mock_io)
|
||||
|
||||
Sentry.init do |config|
|
||||
config.traces_sample_rate = 1.0
|
||||
config.logger = mock_logger
|
||||
config.dsn = DUMMY_DSN
|
||||
config.transport.transport_class = Sentry::DummyTransport
|
||||
config.breadcrumbs_logger = [:http_logger]
|
||||
# so the events will be sent synchronously for testing
|
||||
config.background_worker_threads = 0
|
||||
end
|
||||
end
|
||||
|
||||
def origin
|
||||
"https://#{httpbin}"
|
||||
assert crumb.data == { status: 200, method: "GET", url: "#{uri}?foo=bar" }
|
||||
ensure
|
||||
Sentry.configuration.send_default_pii = before_pii
|
||||
end
|
||||
end
|
||||
|
||||
def test_sentry_send_no_pii
|
||||
before_pii = Sentry.configuration.send_default_pii
|
||||
begin
|
||||
Sentry.configuration.send_default_pii = false
|
||||
|
||||
transaction = Sentry.start_transaction
|
||||
Sentry.get_current_scope.set_span(transaction)
|
||||
|
||||
uri = build_uri("/get")
|
||||
|
||||
response = HTTPX.get(uri, params: { "foo" => "bar" })
|
||||
|
||||
verify_status(response, 200)
|
||||
verify_spans(transaction, response, description: "GET #{uri}")
|
||||
crumb = Sentry.get_current_scope.breadcrumbs.peek
|
||||
assert crumb.category == "httpx"
|
||||
assert crumb.data == { status: 200, method: "GET", url: uri }
|
||||
ensure
|
||||
Sentry.configuration.send_default_pii = before_pii
|
||||
end
|
||||
end
|
||||
|
||||
def test_sentry_post_request
|
||||
before_pii = Sentry.configuration.send_default_pii
|
||||
begin
|
||||
Sentry.configuration.send_default_pii = true
|
||||
transaction = Sentry.start_transaction
|
||||
Sentry.get_current_scope.set_span(transaction)
|
||||
|
||||
uri = build_uri("/post")
|
||||
response = HTTPX.post(uri, form: { foo: "bar" })
|
||||
verify_status(response, 200)
|
||||
verify_spans(transaction, response, verb: "POST")
|
||||
|
||||
crumb = Sentry.get_current_scope.breadcrumbs.peek
|
||||
assert crumb.category == "httpx"
|
||||
assert crumb.data == { status: 200, method: "POST", url: uri, body: "foo=bar" }
|
||||
ensure
|
||||
Sentry.configuration.send_default_pii = before_pii
|
||||
end
|
||||
end
|
||||
|
||||
def test_sentry_multiple_requests
|
||||
transaction = Sentry.start_transaction
|
||||
Sentry.get_current_scope.set_span(transaction)
|
||||
|
||||
responses = HTTPX.get(build_uri("/status/200"), build_uri("/status/404"))
|
||||
verify_status(responses[0], 200)
|
||||
verify_status(responses[1], 404)
|
||||
verify_spans(transaction, *responses)
|
||||
end
|
||||
|
||||
def test_sentry_server_error_request
|
||||
transaction = Sentry.start_transaction
|
||||
Sentry.get_current_scope.set_span(transaction)
|
||||
|
||||
uri = URI("http://unexisting/")
|
||||
|
||||
response = HTTPX.get(uri)
|
||||
|
||||
verify_error_response(response, /name or service not known/)
|
||||
assert response.is_a?(HTTPX::ErrorResponse), "response should contain errors"
|
||||
verify_spans(transaction, response, verb: "GET")
|
||||
crumb = Sentry.get_current_scope.breadcrumbs.peek
|
||||
assert crumb.category == "httpx"
|
||||
assert crumb.data == { error: "name or service not known", method: "GET", url: uri.to_s }
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def verify_spans(transaction, *responses, verb: nil, description: nil)
|
||||
assert transaction.span_recorder.spans.count == responses.size + 1
|
||||
assert transaction.span_recorder.spans[0] == transaction
|
||||
|
||||
response_spans = transaction.span_recorder.spans[1..-1]
|
||||
|
||||
responses.each_with_index do |response, idx|
|
||||
request_span = response_spans[idx]
|
||||
assert request_span.op == "httpx.client"
|
||||
assert !request_span.start_timestamp.nil?
|
||||
assert !request_span.timestamp.nil?
|
||||
assert request_span.start_timestamp != request_span.timestamp
|
||||
assert request_span.description == (description || "#{verb || "GET"} #{response.uri}")
|
||||
if response.is_a?(HTTPX::ErrorResponse)
|
||||
assert request_span.data == { error: response.error.message }
|
||||
else
|
||||
assert request_span.data == { status: response.status }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def setup
|
||||
super
|
||||
|
||||
mock_io = StringIO.new
|
||||
mock_logger = Logger.new(mock_io)
|
||||
|
||||
Sentry.init do |config|
|
||||
config.traces_sample_rate = 1.0
|
||||
config.sdk_logger = mock_logger
|
||||
config.dsn = DUMMY_DSN
|
||||
config.transport.transport_class = Sentry::DummyTransport
|
||||
config.background_worker_threads = 0
|
||||
config.breadcrumbs_logger = [:http_logger]
|
||||
config.enabled_patches << :httpx
|
||||
# so the events will be sent synchronously for testing
|
||||
end
|
||||
end
|
||||
|
||||
def origin
|
||||
"https://#{httpbin}"
|
||||
end
|
||||
end
|
||||
|
@ -26,6 +26,7 @@ class WebmockTest < Minitest::Test
|
||||
end
|
||||
|
||||
def teardown
|
||||
super
|
||||
WebMock.reset!
|
||||
WebMock.allow_net_connect!
|
||||
WebMock.disable!
|
||||
@ -49,6 +50,14 @@ class WebmockTest < Minitest::Test
|
||||
assert_equal(@exception_class.new("exception message"), response.error)
|
||||
end
|
||||
|
||||
def test_response_not_decoded
|
||||
request = stub_request(:get, MOCK_URL_HTTP).to_return(body: "body", headers: { content_encoding: "gzip" })
|
||||
response = HTTPX.get(MOCK_URL_HTTP)
|
||||
|
||||
assert_equal("body", response.body.to_s)
|
||||
assert_requested(request)
|
||||
end
|
||||
|
||||
def test_to_timeout
|
||||
response = http_request(:get, MOCK_URL_HTTP_TIMEOUT)
|
||||
assert_requested(@stub_timeout)
|
||||
@ -87,7 +96,7 @@ class WebmockTest < Minitest::Test
|
||||
expected_message = "The request GET #{MOCK_URL_HTTP}/ was expected to execute 1 time but it executed 0 times" \
|
||||
"\n\nThe following requests were made:\n\nNo requests were made.\n" \
|
||||
"============================================================"
|
||||
assert_raise_with_message(MiniTest::Assertion, expected_message) do
|
||||
assert_raise_with_message(Minitest::Assertion, expected_message) do
|
||||
assert_requested(:get, MOCK_URL_HTTP)
|
||||
end
|
||||
end
|
||||
@ -96,7 +105,7 @@ class WebmockTest < Minitest::Test
|
||||
expected_message = "The request ANY #{MOCK_URL_HTTP}/ was expected to execute 1 time but it executed 0 times" \
|
||||
"\n\nThe following requests were made:\n\nNo requests were made.\n" \
|
||||
"============================================================"
|
||||
assert_raise_with_message(MiniTest::Assertion, expected_message) do
|
||||
assert_raise_with_message(Minitest::Assertion, expected_message) do
|
||||
assert_requested(@stub_http)
|
||||
end
|
||||
end
|
||||
@ -146,13 +155,36 @@ class WebmockTest < Minitest::Test
|
||||
assert_requested(:get, MOCK_URL_HTTP, query: hash_excluding("a" => %w[b c]))
|
||||
end
|
||||
|
||||
def test_verification_that_expected_request_with_hash_as_body
|
||||
stub_request(:post, MOCK_URL_HTTP).with(body: { foo: "bar" })
|
||||
http_request(:post, MOCK_URL_HTTP, form: { foo: "bar" })
|
||||
assert_requested(:post, MOCK_URL_HTTP, body: { foo: "bar" })
|
||||
end
|
||||
|
||||
def test_verification_that_expected_request_occured_with_form_file
|
||||
file = File.new(fixture_file_path)
|
||||
stub_request(:post, MOCK_URL_HTTP)
|
||||
http_request(:post, MOCK_URL_HTTP, form: { file: file })
|
||||
# TODO: webmock does not support matching multipart request body
|
||||
assert_requested(:post, MOCK_URL_HTTP)
|
||||
end
|
||||
|
||||
def test_verification_that_expected_request_occured_with_form_tempfile
|
||||
stub_request(:post, MOCK_URL_HTTP)
|
||||
Tempfile.open("tmp") do |file|
|
||||
http_request(:post, MOCK_URL_HTTP, form: { file: file })
|
||||
end
|
||||
# TODO: webmock does not support matching multipart request body
|
||||
assert_requested(:post, MOCK_URL_HTTP)
|
||||
end
|
||||
|
||||
def test_verification_that_non_expected_request_didnt_occur
|
||||
expected_message = Regexp.new(
|
||||
"The request GET #{MOCK_URL_HTTP}/ was not expected to execute but it executed 1 time\n\n" \
|
||||
"The following requests were made:\n\nGET #{MOCK_URL_HTTP}/ with headers .+ was made 1 time\n\n" \
|
||||
"============================================================"
|
||||
)
|
||||
assert_raise_with_message(MiniTest::Assertion, expected_message) do
|
||||
assert_raise_with_message(Minitest::Assertion, expected_message) do
|
||||
http_request(:get, "http://www.example.com/")
|
||||
assert_not_requested(:get, "http://www.example.com")
|
||||
end
|
||||
@ -164,7 +196,7 @@ class WebmockTest < Minitest::Test
|
||||
"The following requests were made:\n\nGET #{MOCK_URL_HTTP}/ with headers .+ was made 1 time\n\n" \
|
||||
"============================================================"
|
||||
)
|
||||
assert_raise_with_message(MiniTest::Assertion, expected_message) do
|
||||
assert_raise_with_message(Minitest::Assertion, expected_message) do
|
||||
http_request(:get, "#{MOCK_URL_HTTP}/")
|
||||
refute_requested(:get, MOCK_URL_HTTP)
|
||||
end
|
||||
@ -176,12 +208,43 @@ class WebmockTest < Minitest::Test
|
||||
"The following requests were made:\n\nGET #{MOCK_URL_HTTP}/ with headers .+ was made 1 time\n\n" \
|
||||
"============================================================"
|
||||
)
|
||||
assert_raise_with_message(MiniTest::Assertion, expected_message) do
|
||||
assert_raise_with_message(Minitest::Assertion, expected_message) do
|
||||
http_request(:get, "#{MOCK_URL_HTTP}/")
|
||||
assert_not_requested(@stub_http)
|
||||
end
|
||||
end
|
||||
|
||||
def test_webmock_allows_real_request
|
||||
WebMock.allow_net_connect!
|
||||
uri = build_uri("/get?foo=bar")
|
||||
response = HTTPX.get(uri)
|
||||
verify_status(response, 200)
|
||||
verify_body_length(response)
|
||||
assert_requested(:get, uri, query: { "foo" => "bar" })
|
||||
end
|
||||
|
||||
def test_webmock_allows_real_request_with_body
|
||||
WebMock.allow_net_connect!
|
||||
uri = build_uri("/post")
|
||||
response = HTTPX.post(uri, form: { foo: "bar" })
|
||||
verify_status(response, 200)
|
||||
verify_body_length(response)
|
||||
assert_requested(:post, uri, headers: { "Content-Type" => "application/x-www-form-urlencoded" }, body: "foo=bar")
|
||||
end
|
||||
|
||||
def test_webmock_allows_real_request_with_file_body
|
||||
WebMock.allow_net_connect!
|
||||
uri = build_uri("/post")
|
||||
response = HTTPX.post(uri, form: { image: File.new(fixture_file_path) })
|
||||
verify_status(response, 200)
|
||||
verify_body_length(response)
|
||||
body = json_body(response)
|
||||
verify_header(body["headers"], "Content-Type", "multipart/form-data")
|
||||
verify_uploaded_image(body, "image", "image/jpeg")
|
||||
# TODO: webmock does not support matching multipart request body
|
||||
# assert_requested(:post, uri, headers: { "Content-Type" => "multipart/form-data" }, form: { "image" => File.new(fixture_file_path) })
|
||||
end
|
||||
|
||||
def test_webmock_mix_mock_and_real_request
|
||||
WebMock.allow_net_connect!
|
||||
|
||||
@ -214,6 +277,49 @@ class WebmockTest < Minitest::Test
|
||||
assert_not_requested(:get, "http://#{httpbin}")
|
||||
end
|
||||
|
||||
def test_webmock_follow_redirects_with_stream_plugin_each
|
||||
session = HTTPX.plugin(:follow_redirects).plugin(:stream)
|
||||
redirect_url = "#{MOCK_URL_HTTP}/redirect"
|
||||
initial_request = stub_request(:get, MOCK_URL_HTTP).to_return(status: 302, headers: { location: redirect_url }, body: "redirecting")
|
||||
redirect_request = stub_request(:get, redirect_url).to_return(status: 200, body: "body")
|
||||
|
||||
response = session.get(MOCK_URL_HTTP, stream: true)
|
||||
body = "".b
|
||||
response.each do |chunk|
|
||||
next if (300..399).cover?(response.status)
|
||||
|
||||
body << chunk
|
||||
end
|
||||
assert_equal("body", body)
|
||||
assert_requested(initial_request)
|
||||
assert_requested(redirect_request)
|
||||
end
|
||||
|
||||
def test_webmock_with_stream_plugin_each
|
||||
session = HTTPX.plugin(:stream)
|
||||
request = stub_request(:get, MOCK_URL_HTTP).to_return(body: "body")
|
||||
|
||||
body = "".b
|
||||
response = session.get(MOCK_URL_HTTP, stream: true)
|
||||
response.each do |chunk|
|
||||
next if (300..399).cover?(response.status)
|
||||
|
||||
body << chunk
|
||||
end
|
||||
|
||||
assert_equal("body", body)
|
||||
assert_requested(request)
|
||||
end
|
||||
|
||||
def test_webmock_with_stream_plugin_each_line
|
||||
session = HTTPX.plugin(:stream)
|
||||
request = stub_request(:get, MOCK_URL_HTTP).to_return(body: "First line\nSecond line")
|
||||
|
||||
response = session.get(MOCK_URL_HTTP, stream: true)
|
||||
assert_equal(["First line", "Second line"], response.each_line.to_a)
|
||||
assert_requested(request)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def assert_raise_with_message(e, message, &block)
|
||||
@ -228,4 +334,8 @@ class WebmockTest < Minitest::Test
|
||||
def http_request(meth, *uris, **options)
|
||||
HTTPX.__send__(meth, *uris, **options)
|
||||
end
|
||||
|
||||
def scheme
|
||||
"http://"
|
||||
end
|
||||
end
|
||||
|
82
lib/httpx.rb
82
lib/httpx.rb
@ -2,6 +2,42 @@
|
||||
|
||||
require "httpx/version"
|
||||
|
||||
# Top-Level Namespace
|
||||
#
|
||||
module HTTPX
|
||||
EMPTY = [].freeze
|
||||
EMPTY_HASH = {}.freeze
|
||||
|
||||
# All plugins should be stored under this module/namespace. Can register and load
|
||||
# plugins.
|
||||
#
|
||||
module Plugins
|
||||
@plugins = {}
|
||||
@plugins_mutex = Thread::Mutex.new
|
||||
|
||||
# Loads a plugin based on a name. If the plugin hasn't been loaded, tries to load
|
||||
# it from the load path under "httpx/plugins/" directory.
|
||||
#
|
||||
def self.load_plugin(name)
|
||||
h = @plugins
|
||||
m = @plugins_mutex
|
||||
unless (plugin = m.synchronize { h[name] })
|
||||
require "httpx/plugins/#{name}"
|
||||
raise "Plugin #{name} hasn't been registered" unless (plugin = m.synchronize { h[name] })
|
||||
end
|
||||
plugin
|
||||
end
|
||||
|
||||
# Registers a plugin (+mod+) in the central store indexed by +name+.
|
||||
#
|
||||
def self.register_plugin(name, mod)
|
||||
h = @plugins
|
||||
m = @plugins_mutex
|
||||
m.synchronize { h[name] = mod }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
require "httpx/extensions"
|
||||
|
||||
require "httpx/errors"
|
||||
@ -20,55 +56,11 @@ require "httpx/response"
|
||||
require "httpx/options"
|
||||
require "httpx/chainable"
|
||||
|
||||
require "mutex_m"
|
||||
# Top-Level Namespace
|
||||
#
|
||||
module HTTPX
|
||||
EMPTY = [].freeze
|
||||
|
||||
# All plugins should be stored under this module/namespace. Can register and load
|
||||
# plugins.
|
||||
#
|
||||
module Plugins
|
||||
@plugins = {}
|
||||
@plugins.extend(Mutex_m)
|
||||
|
||||
# Loads a plugin based on a name. If the plugin hasn't been loaded, tries to load
|
||||
# it from the load path under "httpx/plugins/" directory.
|
||||
#
|
||||
def self.load_plugin(name)
|
||||
h = @plugins
|
||||
unless (plugin = h.synchronize { h[name] })
|
||||
require "httpx/plugins/#{name}"
|
||||
raise "Plugin #{name} hasn't been registered" unless (plugin = h.synchronize { h[name] })
|
||||
end
|
||||
plugin
|
||||
end
|
||||
|
||||
# Registers a plugin (+mod+) in the central store indexed by +name+.
|
||||
#
|
||||
def self.register_plugin(name, mod)
|
||||
h = @plugins
|
||||
h.synchronize { h[name] = mod }
|
||||
end
|
||||
end
|
||||
|
||||
# :nocov:
|
||||
def self.const_missing(const_name)
|
||||
super unless const_name == :Client
|
||||
warn "DEPRECATION WARNING: the class #{self}::Client is deprecated. Use #{self}::Session instead."
|
||||
Session
|
||||
end
|
||||
# :nocov:
|
||||
|
||||
extend Chainable
|
||||
end
|
||||
|
||||
require "httpx/session"
|
||||
require "httpx/session_extensions"
|
||||
|
||||
# load integrations when possible
|
||||
|
||||
require "httpx/adapters/datadog" if defined?(DDTrace) || defined?(Datadog)
|
||||
require "httpx/adapters/datadog" if defined?(DDTrace) || defined?(Datadog::Tracing)
|
||||
require "httpx/adapters/sentry" if defined?(Sentry)
|
||||
require "httpx/adapters/webmock" if defined?(WebMock)
|
||||
|
@ -1,177 +1,211 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
if defined?(DDTrace) && DDTrace::VERSION::STRING >= "1.0.0"
|
||||
require "datadog/tracing/contrib/integration"
|
||||
require "datadog/tracing/contrib/configuration/settings"
|
||||
require "datadog/tracing/contrib/patcher"
|
||||
require "datadog/tracing/contrib/integration"
|
||||
require "datadog/tracing/contrib/configuration/settings"
|
||||
require "datadog/tracing/contrib/patcher"
|
||||
|
||||
TRACING_MODULE = Datadog::Tracing
|
||||
else
|
||||
|
||||
require "ddtrace/contrib/integration"
|
||||
require "ddtrace/contrib/configuration/settings"
|
||||
require "ddtrace/contrib/patcher"
|
||||
|
||||
TRACING_MODULE = Datadog
|
||||
end
|
||||
|
||||
module TRACING_MODULE # rubocop:disable Naming/ClassAndModuleCamelCase
|
||||
module Datadog::Tracing
|
||||
module Contrib
|
||||
module HTTPX
|
||||
if defined?(::DDTrace) && ::DDTrace::VERSION::STRING >= "1.0.0"
|
||||
METADATA_MODULE = TRACING_MODULE::Metadata
|
||||
DATADOG_VERSION = defined?(::DDTrace) ? ::DDTrace::VERSION : ::Datadog::VERSION
|
||||
|
||||
TYPE_OUTBOUND = TRACING_MODULE::Metadata::Ext::HTTP::TYPE_OUTBOUND
|
||||
METADATA_MODULE = Datadog::Tracing::Metadata
|
||||
|
||||
TAG_PEER_SERVICE = TRACING_MODULE::Metadata::Ext::TAG_PEER_SERVICE
|
||||
|
||||
TAG_URL = TRACING_MODULE::Metadata::Ext::HTTP::TAG_URL
|
||||
TAG_METHOD = TRACING_MODULE::Metadata::Ext::HTTP::TAG_METHOD
|
||||
TAG_TARGET_HOST = TRACING_MODULE::Metadata::Ext::NET::TAG_TARGET_HOST
|
||||
TAG_TARGET_PORT = TRACING_MODULE::Metadata::Ext::NET::TAG_TARGET_PORT
|
||||
|
||||
TAG_STATUS_CODE = TRACING_MODULE::Metadata::Ext::HTTP::TAG_STATUS_CODE
|
||||
TYPE_OUTBOUND = Datadog::Tracing::Metadata::Ext::HTTP::TYPE_OUTBOUND
|
||||
|
||||
TAG_BASE_SERVICE = if Gem::Version.new(DATADOG_VERSION::STRING) < Gem::Version.new("1.15.0")
|
||||
"_dd.base_service"
|
||||
else
|
||||
|
||||
METADATA_MODULE = Datadog
|
||||
|
||||
TYPE_OUTBOUND = TRACING_MODULE::Ext::HTTP::TYPE_OUTBOUND
|
||||
TAG_PEER_SERVICE = TRACING_MODULE::Ext::Integration::TAG_PEER_SERVICE
|
||||
TAG_URL = TRACING_MODULE::Ext::HTTP::URL
|
||||
TAG_METHOD = TRACING_MODULE::Ext::HTTP::METHOD
|
||||
TAG_TARGET_HOST = TRACING_MODULE::Ext::NET::TARGET_HOST
|
||||
TAG_TARGET_PORT = TRACING_MODULE::Ext::NET::TARGET_PORT
|
||||
TAG_STATUS_CODE = Datadog::Ext::HTTP::STATUS_CODE
|
||||
PROPAGATOR = TRACING_MODULE::HTTPPropagator
|
||||
|
||||
Datadog::Tracing::Contrib::Ext::Metadata::TAG_BASE_SERVICE
|
||||
end
|
||||
TAG_PEER_HOSTNAME = Datadog::Tracing::Metadata::Ext::TAG_PEER_HOSTNAME
|
||||
|
||||
TAG_KIND = Datadog::Tracing::Metadata::Ext::TAG_KIND
|
||||
TAG_CLIENT = Datadog::Tracing::Metadata::Ext::SpanKind::TAG_CLIENT
|
||||
TAG_COMPONENT = Datadog::Tracing::Metadata::Ext::TAG_COMPONENT
|
||||
TAG_OPERATION = Datadog::Tracing::Metadata::Ext::TAG_OPERATION
|
||||
TAG_URL = Datadog::Tracing::Metadata::Ext::HTTP::TAG_URL
|
||||
TAG_METHOD = Datadog::Tracing::Metadata::Ext::HTTP::TAG_METHOD
|
||||
TAG_TARGET_HOST = Datadog::Tracing::Metadata::Ext::NET::TAG_TARGET_HOST
|
||||
TAG_TARGET_PORT = Datadog::Tracing::Metadata::Ext::NET::TAG_TARGET_PORT
|
||||
|
||||
TAG_STATUS_CODE = Datadog::Tracing::Metadata::Ext::HTTP::TAG_STATUS_CODE
|
||||
|
||||
# HTTPX Datadog Plugin
|
||||
#
|
||||
# Enables tracing for httpx requests. A span will be created for each individual requests,
|
||||
# and it'll trace since the moment it is fed to the connection, until the moment the response is
|
||||
# fed back to the session.
|
||||
# Enables tracing for httpx requests.
|
||||
#
|
||||
# A span will be created for each request transaction; the span is created lazily only when
|
||||
# buffering a request, and it is fed the start time stored inside the tracer object.
|
||||
#
|
||||
module Plugin
|
||||
class RequestTracer
|
||||
include Contrib::HttpAnnotationHelper
|
||||
module RequestTracer
|
||||
extend Contrib::HttpAnnotationHelper
|
||||
|
||||
module_function
|
||||
|
||||
SPAN_REQUEST = "httpx.request"
|
||||
|
||||
def initialize(request)
|
||||
@request = request
|
||||
# initializes tracing on the +request+.
|
||||
def call(request)
|
||||
return unless configuration(request).enabled
|
||||
|
||||
span = nil
|
||||
|
||||
# request objects are reused, when already buffered requests get rerouted to a different
|
||||
# connection due to connection issues, or when they already got a response, but need to
|
||||
# be retried. In such situations, the original span needs to be extended for the former,
|
||||
# while a new is required for the latter.
|
||||
request.on(:idle) do
|
||||
span = nil
|
||||
end
|
||||
# the span is initialized when the request is buffered in the parser, which is the closest
|
||||
# one gets to actually sending the request.
|
||||
request.on(:headers) do
|
||||
next if span
|
||||
|
||||
span = initialize_span(request, now)
|
||||
end
|
||||
|
||||
request.on(:response) do |response|
|
||||
unless span
|
||||
next unless response.is_a?(::HTTPX::ErrorResponse) && response.error.respond_to?(:connection)
|
||||
|
||||
# handles the case when the +error+ happened during name resolution, which means
|
||||
# that the tracing start point hasn't been triggered yet; in such cases, the approximate
|
||||
# initial resolving time is collected from the connection, and used as span start time,
|
||||
# and the tracing object in inserted before the on response callback is called.
|
||||
span = initialize_span(request, response.error.connection.init_time)
|
||||
|
||||
end
|
||||
|
||||
finish(response, span)
|
||||
end
|
||||
end
|
||||
|
||||
def call
|
||||
return unless tracing_enabled?
|
||||
def finish(response, span)
|
||||
if response.is_a?(::HTTPX::ErrorResponse)
|
||||
span.set_error(response.error)
|
||||
else
|
||||
span.set_tag(TAG_STATUS_CODE, response.status.to_s)
|
||||
|
||||
@request.on(:response, &method(:finish))
|
||||
span.set_error(::HTTPX::HTTPError.new(response)) if response.status >= 400 && response.status <= 599
|
||||
|
||||
verb = @request.verb
|
||||
uri = @request.uri
|
||||
span.set_tags(
|
||||
Datadog.configuration.tracing.header_tags.response_tags(response.headers.to_h)
|
||||
) if Datadog.configuration.tracing.respond_to?(:header_tags)
|
||||
end
|
||||
|
||||
@span = build_span
|
||||
span.finish
|
||||
end
|
||||
|
||||
@span.resource = verb
|
||||
# return a span initialized with the +@request+ state.
|
||||
def initialize_span(request, start_time)
|
||||
verb = request.verb
|
||||
uri = request.uri
|
||||
|
||||
# Add additional request specific tags to the span.
|
||||
config = configuration(request)
|
||||
|
||||
@span.set_tag(TAG_URL, @request.path)
|
||||
@span.set_tag(TAG_METHOD, verb)
|
||||
span = create_span(request, config, start_time)
|
||||
|
||||
@span.set_tag(TAG_TARGET_HOST, uri.host)
|
||||
@span.set_tag(TAG_TARGET_PORT, uri.port.to_s)
|
||||
span.resource = verb
|
||||
|
||||
# Tag original global service name if not used
|
||||
span.set_tag(TAG_BASE_SERVICE, Datadog.configuration.service) if span.service != Datadog.configuration.service
|
||||
|
||||
span.set_tag(TAG_KIND, TAG_CLIENT)
|
||||
|
||||
span.set_tag(TAG_COMPONENT, "httpx")
|
||||
span.set_tag(TAG_OPERATION, "request")
|
||||
|
||||
span.set_tag(TAG_URL, request.path)
|
||||
span.set_tag(TAG_METHOD, verb)
|
||||
|
||||
span.set_tag(TAG_TARGET_HOST, uri.host)
|
||||
span.set_tag(TAG_TARGET_PORT, uri.port)
|
||||
|
||||
span.set_tag(TAG_PEER_HOSTNAME, uri.host)
|
||||
|
||||
# Tag as an external peer service
|
||||
@span.set_tag(TAG_PEER_SERVICE, @span.service)
|
||||
# span.set_tag(TAG_PEER_SERVICE, span.service)
|
||||
|
||||
propagate_headers if @configuration[:distributed_tracing]
|
||||
if config[:distributed_tracing]
|
||||
propagate_trace_http(
|
||||
Datadog::Tracing.active_trace,
|
||||
request.headers
|
||||
)
|
||||
end
|
||||
|
||||
# Set analytics sample rate
|
||||
if Contrib::Analytics.enabled?(@configuration[:analytics_enabled])
|
||||
Contrib::Analytics.set_sample_rate(@span, @configuration[:analytics_sample_rate])
|
||||
if Contrib::Analytics.enabled?(config[:analytics_enabled])
|
||||
Contrib::Analytics.set_sample_rate(span, config[:analytics_sample_rate])
|
||||
end
|
||||
|
||||
span.set_tags(
|
||||
Datadog.configuration.tracing.header_tags.request_tags(request.headers.to_h)
|
||||
) if Datadog.configuration.tracing.respond_to?(:header_tags)
|
||||
|
||||
span
|
||||
rescue StandardError => e
|
||||
Datadog.logger.error("error preparing span for http request: #{e}")
|
||||
Datadog.logger.error(e.backtrace)
|
||||
end
|
||||
|
||||
def finish(response)
|
||||
return unless @span
|
||||
|
||||
if response.is_a?(::HTTPX::ErrorResponse)
|
||||
@span.set_error(response.error)
|
||||
else
|
||||
@span.set_tag(TAG_STATUS_CODE, response.status.to_s)
|
||||
|
||||
@span.set_error(::HTTPX::HTTPError.new(response)) if response.status >= 400 && response.status <= 599
|
||||
end
|
||||
|
||||
@span.finish
|
||||
def now
|
||||
::Datadog::Core::Utils::Time.now.utc
|
||||
end
|
||||
|
||||
private
|
||||
def configuration(request)
|
||||
Datadog.configuration.tracing[:httpx, request.uri.host]
|
||||
end
|
||||
|
||||
if defined?(::DDTrace) && ::DDTrace::VERSION::STRING >= "1.0.0"
|
||||
if Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("2.0.0")
|
||||
def propagate_trace_http(trace, headers)
|
||||
Datadog::Tracing::Contrib::HTTP.inject(trace, headers)
|
||||
end
|
||||
|
||||
def build_span
|
||||
TRACING_MODULE.trace(
|
||||
def create_span(request, configuration, start_time)
|
||||
Datadog::Tracing.trace(
|
||||
SPAN_REQUEST,
|
||||
service: service_name(@request.uri.host, configuration, Datadog.configuration_for(self)),
|
||||
span_type: TYPE_OUTBOUND
|
||||
service: service_name(request.uri.host, configuration),
|
||||
type: TYPE_OUTBOUND,
|
||||
start_time: start_time
|
||||
)
|
||||
end
|
||||
|
||||
def propagate_headers
|
||||
TRACING_MODULE::Propagation::HTTP.inject!(TRACING_MODULE.active_trace, @request.headers)
|
||||
end
|
||||
|
||||
def configuration
|
||||
@configuration ||= Datadog.configuration.tracing[:httpx, @request.uri.host]
|
||||
end
|
||||
|
||||
def tracing_enabled?
|
||||
TRACING_MODULE.enabled?
|
||||
end
|
||||
else
|
||||
def build_span
|
||||
service_name = configuration[:split_by_domain] ? @request.uri.host : configuration[:service_name]
|
||||
configuration[:tracer].trace(
|
||||
def propagate_trace_http(trace, headers)
|
||||
Datadog::Tracing::Propagation::HTTP.inject!(trace.to_digest, headers)
|
||||
end
|
||||
|
||||
def create_span(request, configuration, start_time)
|
||||
Datadog::Tracing.trace(
|
||||
SPAN_REQUEST,
|
||||
service: service_name,
|
||||
span_type: TYPE_OUTBOUND
|
||||
service: service_name(request.uri.host, configuration),
|
||||
span_type: TYPE_OUTBOUND,
|
||||
start_time: start_time
|
||||
)
|
||||
end
|
||||
|
||||
def propagate_headers
|
||||
Datadog::HTTPPropagator.inject!(@span.context, @request.headers)
|
||||
end
|
||||
|
||||
def configuration
|
||||
@configuration ||= Datadog.configuration[:httpx, @request.uri.host]
|
||||
end
|
||||
|
||||
def tracing_enabled?
|
||||
configuration[:tracer].enabled
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
module RequestMethods
|
||||
def __datadog_enable_trace!
|
||||
return super if @__datadog_enable_trace
|
||||
# intercepts request initialization to inject the tracing logic.
|
||||
def initialize(*)
|
||||
super
|
||||
|
||||
RequestTracer.new(self).call
|
||||
@__datadog_enable_trace = true
|
||||
return unless Datadog::Tracing.enabled?
|
||||
|
||||
RequestTracer.call(self)
|
||||
end
|
||||
end
|
||||
|
||||
module ConnectionMethods
|
||||
def send(request)
|
||||
request.__datadog_enable_trace!
|
||||
attr_reader :init_time
|
||||
|
||||
def initialize(*)
|
||||
super
|
||||
|
||||
@init_time = ::Datadog::Core::Utils::Time.now.utc
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -179,7 +213,7 @@ module TRACING_MODULE # rubocop:disable Naming/ClassAndModuleCamelCase
|
||||
module Configuration
|
||||
# Default settings for httpx
|
||||
#
|
||||
class Settings < TRACING_MODULE::Contrib::Configuration::Settings
|
||||
class Settings < Datadog::Tracing::Contrib::Configuration::Settings
|
||||
DEFAULT_ERROR_HANDLER = lambda do |response|
|
||||
Datadog::Ext::HTTP::ERROR_RANGE.cover?(response.status)
|
||||
end
|
||||
@ -188,29 +222,82 @@ module TRACING_MODULE # rubocop:disable Naming/ClassAndModuleCamelCase
|
||||
option :distributed_tracing, default: true
|
||||
option :split_by_domain, default: false
|
||||
|
||||
option :enabled do |o|
|
||||
o.default { env_to_bool("DD_TRACE_HTTPX_ENABLED", true) }
|
||||
o.lazy
|
||||
if Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("1.13.0")
|
||||
option :enabled do |o|
|
||||
o.type :bool
|
||||
o.env "DD_TRACE_HTTPX_ENABLED"
|
||||
o.default true
|
||||
end
|
||||
|
||||
option :analytics_enabled do |o|
|
||||
o.type :bool
|
||||
o.env "DD_TRACE_HTTPX_ANALYTICS_ENABLED"
|
||||
o.default false
|
||||
end
|
||||
|
||||
option :analytics_sample_rate do |o|
|
||||
o.type :float
|
||||
o.env "DD_TRACE_HTTPX_ANALYTICS_SAMPLE_RATE"
|
||||
o.default 1.0
|
||||
end
|
||||
else
|
||||
option :enabled do |o|
|
||||
o.default { env_to_bool("DD_TRACE_HTTPX_ENABLED", true) }
|
||||
o.lazy
|
||||
end
|
||||
|
||||
option :analytics_enabled do |o|
|
||||
o.default { env_to_bool(%w[DD_TRACE_HTTPX_ANALYTICS_ENABLED DD_HTTPX_ANALYTICS_ENABLED], false) }
|
||||
o.lazy
|
||||
end
|
||||
|
||||
option :analytics_sample_rate do |o|
|
||||
o.default { env_to_float(%w[DD_TRACE_HTTPX_ANALYTICS_SAMPLE_RATE DD_HTTPX_ANALYTICS_SAMPLE_RATE], 1.0) }
|
||||
o.lazy
|
||||
end
|
||||
end
|
||||
|
||||
option :analytics_enabled do |o|
|
||||
o.default { env_to_bool(%w[DD_TRACE_HTTPX_ANALYTICS_ENABLED DD_HTTPX_ANALYTICS_ENABLED], false) }
|
||||
o.lazy
|
||||
if defined?(Datadog::Tracing::Contrib::SpanAttributeSchema)
|
||||
option :service_name do |o|
|
||||
o.default do
|
||||
Datadog::Tracing::Contrib::SpanAttributeSchema.fetch_service_name(
|
||||
"DD_TRACE_HTTPX_SERVICE_NAME",
|
||||
"httpx"
|
||||
)
|
||||
end
|
||||
o.lazy unless Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("1.13.0")
|
||||
end
|
||||
else
|
||||
option :service_name do |o|
|
||||
o.default do
|
||||
ENV.fetch("DD_TRACE_HTTPX_SERVICE_NAME", "httpx")
|
||||
end
|
||||
o.lazy unless Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("1.13.0")
|
||||
end
|
||||
end
|
||||
|
||||
option :analytics_sample_rate do |o|
|
||||
o.default { env_to_float(%w[DD_TRACE_HTTPX_ANALYTICS_SAMPLE_RATE DD_HTTPX_ANALYTICS_SAMPLE_RATE], 1.0) }
|
||||
o.lazy
|
||||
end
|
||||
option :distributed_tracing, default: true
|
||||
|
||||
option :error_handler, default: DEFAULT_ERROR_HANDLER
|
||||
if Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("1.15.0")
|
||||
option :error_handler do |o|
|
||||
o.type :proc
|
||||
o.default_proc(&DEFAULT_ERROR_HANDLER)
|
||||
end
|
||||
elsif Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("1.13.0")
|
||||
option :error_handler do |o|
|
||||
o.type :proc
|
||||
o.experimental_default_proc(&DEFAULT_ERROR_HANDLER)
|
||||
end
|
||||
else
|
||||
option :error_handler, default: DEFAULT_ERROR_HANDLER
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Patcher enables patching of 'httpx' with datadog components.
|
||||
#
|
||||
module Patcher
|
||||
include TRACING_MODULE::Contrib::Patcher
|
||||
include Datadog::Tracing::Contrib::Patcher
|
||||
|
||||
module_function
|
||||
|
||||
@ -233,7 +320,6 @@ module TRACING_MODULE # rubocop:disable Naming/ClassAndModuleCamelCase
|
||||
class Integration
|
||||
include Contrib::Integration
|
||||
|
||||
# MINIMUM_VERSION = Gem::Version.new('0.11.0')
|
||||
MINIMUM_VERSION = Gem::Version.new("0.10.2")
|
||||
|
||||
register_as :httpx
|
||||
@ -250,14 +336,8 @@ module TRACING_MODULE # rubocop:disable Naming/ClassAndModuleCamelCase
|
||||
super && version >= MINIMUM_VERSION
|
||||
end
|
||||
|
||||
if defined?(::DDTrace) && ::DDTrace::VERSION::STRING >= "1.0.0"
|
||||
def new_configuration
|
||||
Configuration::Settings.new
|
||||
end
|
||||
else
|
||||
def default_configuration
|
||||
Configuration::Settings.new
|
||||
end
|
||||
def new_configuration
|
||||
Configuration::Settings.new
|
||||
end
|
||||
|
||||
def patcher
|
||||
|
@ -7,69 +7,112 @@ require "faraday"
|
||||
module Faraday
|
||||
class Adapter
|
||||
class HTTPX < Faraday::Adapter
|
||||
# :nocov:
|
||||
SSL_ERROR = if defined?(Faraday::SSLError)
|
||||
Faraday::SSLError
|
||||
else
|
||||
Faraday::Error::SSLError
|
||||
end
|
||||
|
||||
CONNECTION_FAILED_ERROR = if defined?(Faraday::ConnectionFailed)
|
||||
Faraday::ConnectionFailed
|
||||
else
|
||||
Faraday::Error::ConnectionFailed
|
||||
end
|
||||
# :nocov:
|
||||
|
||||
unless Faraday::RequestOptions.method_defined?(:stream_response?)
|
||||
module RequestOptionsExtensions
|
||||
refine Faraday::RequestOptions do
|
||||
def stream_response?
|
||||
false
|
||||
end
|
||||
end
|
||||
end
|
||||
using RequestOptionsExtensions
|
||||
end
|
||||
|
||||
module RequestMixin
|
||||
using ::HTTPX::HashExtensions
|
||||
def build_connection(env)
|
||||
return @connection if defined?(@connection)
|
||||
|
||||
@connection = ::HTTPX.plugin(:persistent).plugin(ReasonPlugin)
|
||||
@connection = @connection.with(@connection_options) unless @connection_options.empty?
|
||||
connection_opts = options_from_env(env)
|
||||
|
||||
if (bind = env.request.bind)
|
||||
@bind = TCPSocket.new(bind[:host], bind[:port])
|
||||
connection_opts[:io] = @bind
|
||||
end
|
||||
@connection = @connection.with(connection_opts)
|
||||
|
||||
if (proxy = env.request.proxy)
|
||||
proxy_options = { uri: proxy.uri }
|
||||
proxy_options[:username] = proxy.user if proxy.user
|
||||
proxy_options[:password] = proxy.password if proxy.password
|
||||
|
||||
@connection = @connection.plugin(:proxy).with(proxy: proxy_options)
|
||||
end
|
||||
@connection = @connection.plugin(OnDataPlugin) if env.request.stream_response?
|
||||
|
||||
@connection = @config_block.call(@connection) || @connection if @config_block
|
||||
@connection
|
||||
end
|
||||
|
||||
def close
|
||||
@connection.close if @connection
|
||||
@bind.close if @bind
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def connect(env, &blk)
|
||||
connection(env, &blk)
|
||||
rescue ::HTTPX::TLSError => e
|
||||
raise Faraday::SSLError, e
|
||||
rescue Errno::ECONNABORTED,
|
||||
Errno::ECONNREFUSED,
|
||||
Errno::ECONNRESET,
|
||||
Errno::EHOSTUNREACH,
|
||||
Errno::EINVAL,
|
||||
Errno::ENETUNREACH,
|
||||
Errno::EPIPE,
|
||||
::HTTPX::ConnectionError => e
|
||||
raise Faraday::ConnectionFailed, e
|
||||
end
|
||||
|
||||
def build_request(env)
|
||||
meth = env[:method]
|
||||
|
||||
request_options = {
|
||||
headers: env.request_headers,
|
||||
body: env.body,
|
||||
**options_from_env(env),
|
||||
}
|
||||
[meth.to_s.upcase, env.url, request_options]
|
||||
end
|
||||
|
||||
def options_from_env(env)
|
||||
timeout_options = {
|
||||
connect_timeout: env.request.open_timeout,
|
||||
operation_timeout: env.request.timeout,
|
||||
}.compact
|
||||
timeout_options = {}
|
||||
req_opts = env.request
|
||||
if (sec = request_timeout(:read, req_opts))
|
||||
timeout_options[:read_timeout] = sec
|
||||
end
|
||||
|
||||
options = {
|
||||
ssl: {},
|
||||
if (sec = request_timeout(:write, req_opts))
|
||||
timeout_options[:write_timeout] = sec
|
||||
end
|
||||
|
||||
if (sec = request_timeout(:open, req_opts))
|
||||
timeout_options[:connect_timeout] = sec
|
||||
end
|
||||
|
||||
{
|
||||
ssl: ssl_options_from_env(env),
|
||||
timeout: timeout_options,
|
||||
}
|
||||
end
|
||||
|
||||
options[:ssl][:verify_mode] = OpenSSL::SSL::VERIFY_PEER if env.ssl.verify
|
||||
options[:ssl][:ca_file] = env.ssl.ca_file if env.ssl.ca_file
|
||||
options[:ssl][:ca_path] = env.ssl.ca_path if env.ssl.ca_path
|
||||
options[:ssl][:cert_store] = env.ssl.cert_store if env.ssl.cert_store
|
||||
options[:ssl][:cert] = env.ssl.client_cert if env.ssl.client_cert
|
||||
options[:ssl][:key] = env.ssl.client_key if env.ssl.client_key
|
||||
options[:ssl][:ssl_version] = env.ssl.version if env.ssl.version
|
||||
options[:ssl][:verify_depth] = env.ssl.verify_depth if env.ssl.verify_depth
|
||||
options[:ssl][:min_version] = env.ssl.min_version if env.ssl.min_version
|
||||
options[:ssl][:max_version] = env.ssl.max_version if env.ssl.max_version
|
||||
if defined?(::OpenSSL)
|
||||
def ssl_options_from_env(env)
|
||||
ssl_options = {}
|
||||
|
||||
options
|
||||
unless env.ssl.verify.nil?
|
||||
ssl_options[:verify_mode] = env.ssl.verify ? OpenSSL::SSL::VERIFY_PEER : OpenSSL::SSL::VERIFY_NONE
|
||||
end
|
||||
|
||||
ssl_options[:ca_file] = env.ssl.ca_file if env.ssl.ca_file
|
||||
ssl_options[:ca_path] = env.ssl.ca_path if env.ssl.ca_path
|
||||
ssl_options[:cert_store] = env.ssl.cert_store if env.ssl.cert_store
|
||||
ssl_options[:cert] = env.ssl.client_cert if env.ssl.client_cert
|
||||
ssl_options[:key] = env.ssl.client_key if env.ssl.client_key
|
||||
ssl_options[:ssl_version] = env.ssl.version if env.ssl.version
|
||||
ssl_options[:verify_depth] = env.ssl.verify_depth if env.ssl.verify_depth
|
||||
ssl_options[:min_version] = env.ssl.min_version if env.ssl.min_version
|
||||
ssl_options[:max_version] = env.ssl.max_version if env.ssl.max_version
|
||||
ssl_options
|
||||
end
|
||||
else
|
||||
# :nocov:
|
||||
def ssl_options_from_env(*)
|
||||
{}
|
||||
end
|
||||
# :nocov:
|
||||
end
|
||||
end
|
||||
|
||||
@ -100,30 +143,15 @@ module Faraday
|
||||
end
|
||||
|
||||
module ReasonPlugin
|
||||
if RUBY_VERSION < "2.5"
|
||||
def self.load_dependencies(*)
|
||||
require "webrick"
|
||||
end
|
||||
else
|
||||
def self.load_dependencies(*)
|
||||
require "net/http/status"
|
||||
end
|
||||
def self.load_dependencies(*)
|
||||
require "net/http/status"
|
||||
end
|
||||
module ResponseMethods
|
||||
if RUBY_VERSION < "2.5"
|
||||
def reason
|
||||
WEBrick::HTTPStatus::StatusMessage.fetch(@status)
|
||||
end
|
||||
else
|
||||
def reason
|
||||
Net::HTTP::STATUS_CODES.fetch(@status)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def self.session
|
||||
@session ||= ::HTTPX.plugin(:compression).plugin(:persistent).plugin(ReasonPlugin)
|
||||
module ResponseMethods
|
||||
def reason
|
||||
Net::HTTP::STATUS_CODES.fetch(@status, "Non-Standard status code")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
class ParallelManager
|
||||
@ -158,8 +186,9 @@ module Faraday
|
||||
|
||||
include RequestMixin
|
||||
|
||||
def initialize
|
||||
def initialize(options)
|
||||
@handlers = []
|
||||
@connection_options = options
|
||||
end
|
||||
|
||||
def enqueue(request)
|
||||
@ -173,40 +202,51 @@ module Faraday
|
||||
|
||||
env = @handlers.last.env
|
||||
|
||||
session = HTTPX.session.with(options_from_env(env))
|
||||
session = session.plugin(:proxy).with(proxy: { uri: env.request.proxy }) if env.request.proxy
|
||||
session = session.plugin(OnDataPlugin) if env.request.stream_response?
|
||||
connect(env) do |session|
|
||||
requests = @handlers.map { |handler| session.build_request(*build_request(handler.env)) }
|
||||
|
||||
requests = @handlers.map { |handler| session.build_request(*build_request(handler.env)) }
|
||||
if env.request.stream_response?
|
||||
requests.each do |request|
|
||||
request.response_on_data = env.request.on_data
|
||||
end
|
||||
end
|
||||
|
||||
if env.request.stream_response?
|
||||
requests.each do |request|
|
||||
request.response_on_data = env.request.on_data
|
||||
responses = session.request(*requests)
|
||||
Array(responses).each_with_index do |response, index|
|
||||
handler = @handlers[index]
|
||||
handler.on_response.call(response)
|
||||
handler.on_complete.call(handler.env) if handler.on_complete
|
||||
end
|
||||
end
|
||||
rescue ::HTTPX::TimeoutError => e
|
||||
raise Faraday::TimeoutError, e
|
||||
end
|
||||
|
||||
responses = session.request(*requests)
|
||||
Array(responses).each_with_index do |response, index|
|
||||
handler = @handlers[index]
|
||||
handler.on_response.call(response)
|
||||
handler.on_complete.call(handler.env)
|
||||
end
|
||||
# from Faraday::Adapter#connection
|
||||
def connection(env)
|
||||
conn = build_connection(env)
|
||||
return conn unless block_given?
|
||||
|
||||
yield conn
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# from Faraday::Adapter#request_timeout
|
||||
def request_timeout(type, options)
|
||||
key = Faraday::Adapter::TIMEOUT_KEYS[type]
|
||||
options[key] || options[:timeout]
|
||||
end
|
||||
end
|
||||
|
||||
self.supports_parallel = true
|
||||
|
||||
class << self
|
||||
def setup_parallel_manager
|
||||
ParallelManager.new
|
||||
def setup_parallel_manager(options = {})
|
||||
ParallelManager.new(options)
|
||||
end
|
||||
end
|
||||
|
||||
def initialize(app, options = {})
|
||||
super(app)
|
||||
@session_options = options
|
||||
end
|
||||
|
||||
def call(env)
|
||||
super
|
||||
if parallel?(env)
|
||||
@ -224,38 +264,30 @@ module Faraday
|
||||
return handler
|
||||
end
|
||||
|
||||
session = HTTPX.session
|
||||
session = session.with(@session_options) unless @session_options.empty?
|
||||
session = session.with(options_from_env(env))
|
||||
session = session.plugin(:proxy).with(proxy: { uri: env.request.proxy }) if env.request.proxy
|
||||
session = session.plugin(OnDataPlugin) if env.request.stream_response?
|
||||
|
||||
request = session.build_request(*build_request(env))
|
||||
|
||||
request.response_on_data = env.request.on_data if env.request.stream_response?
|
||||
|
||||
response = session.request(request)
|
||||
# do not call #raise_for_status for HTTP 4xx or 5xx, as faraday has a middleware for that.
|
||||
response.raise_for_status unless response.is_a?(::HTTPX::Response)
|
||||
response = connect_and_request(env)
|
||||
save_response(env, response.status, response.body.to_s, response.headers, response.reason) do |response_headers|
|
||||
response_headers.merge!(response.headers)
|
||||
end
|
||||
@app.call(env)
|
||||
rescue ::HTTPX::TLSError => e
|
||||
raise SSL_ERROR, e
|
||||
rescue Errno::ECONNABORTED,
|
||||
Errno::ECONNREFUSED,
|
||||
Errno::ECONNRESET,
|
||||
Errno::EHOSTUNREACH,
|
||||
Errno::EINVAL,
|
||||
Errno::ENETUNREACH,
|
||||
Errno::EPIPE,
|
||||
::HTTPX::ConnectionError => e
|
||||
raise CONNECTION_FAILED_ERROR, e
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def connect_and_request(env)
|
||||
connect(env) do |session|
|
||||
request = session.build_request(*build_request(env))
|
||||
|
||||
request.response_on_data = env.request.on_data if env.request.stream_response?
|
||||
|
||||
response = session.request(request)
|
||||
# do not call #raise_for_status for HTTP 4xx or 5xx, as faraday has a middleware for that.
|
||||
response.raise_for_status unless response.is_a?(::HTTPX::Response)
|
||||
response
|
||||
end
|
||||
rescue ::HTTPX::TimeoutError => e
|
||||
raise Faraday::TimeoutError, e
|
||||
end
|
||||
|
||||
def parallel?(env)
|
||||
env[:parallel_manager]
|
||||
end
|
||||
|
@ -27,6 +27,11 @@ module HTTPX::Plugins
|
||||
def set_sentry_trace_header(request, sentry_span)
|
||||
return unless sentry_span
|
||||
|
||||
config = ::Sentry.configuration
|
||||
url = request.uri.to_s
|
||||
|
||||
return unless config.propagate_traces && config.trace_propagation_targets.any? { |target| url.match?(target) }
|
||||
|
||||
trace = ::Sentry.get_current_client.generate_sentry_trace(sentry_span)
|
||||
request.headers[::Sentry::SENTRY_TRACE_HEADER_NAME] = trace if trace
|
||||
end
|
||||
@ -91,7 +96,7 @@ module HTTPX::Plugins
|
||||
|
||||
module RequestMethods
|
||||
def __sentry_enable_trace!
|
||||
return super if @__sentry_enable_trace
|
||||
return if @__sentry_enable_trace
|
||||
|
||||
Tracer.call(self)
|
||||
@__sentry_enable_trace = true
|
||||
@ -108,7 +113,7 @@ module HTTPX::Plugins
|
||||
end
|
||||
end
|
||||
|
||||
Sentry.register_patch do
|
||||
Sentry.register_patch(:httpx) do
|
||||
sentry_session = HTTPX.plugin(HTTPX::Plugins::Sentry)
|
||||
|
||||
HTTPX.send(:remove_const, :Session)
|
||||
|
@ -2,13 +2,8 @@
|
||||
|
||||
module WebMock
|
||||
module HttpLibAdapters
|
||||
if RUBY_VERSION < "2.5"
|
||||
require "webrick/httpstatus"
|
||||
HTTP_REASONS = WEBrick::HTTPStatus::StatusMessage
|
||||
else
|
||||
require "net/http/status"
|
||||
HTTP_REASONS = Net::HTTP::STATUS_CODES
|
||||
end
|
||||
require "net/http/status"
|
||||
HTTP_REASONS = Net::HTTP::STATUS_CODES
|
||||
|
||||
#
|
||||
# HTTPX plugin for webmock.
|
||||
@ -25,7 +20,7 @@ module WebMock
|
||||
WebMock::RequestSignature.new(
|
||||
request.verb.downcase.to_sym,
|
||||
uri.to_s,
|
||||
body: request.body.each.to_a.join,
|
||||
body: request.body.to_s,
|
||||
headers: request.headers.to_h
|
||||
)
|
||||
end
|
||||
@ -43,27 +38,53 @@ module WebMock
|
||||
|
||||
return build_error_response(request, webmock_response.exception) if webmock_response.exception
|
||||
|
||||
response = request.options.response_class.new(request,
|
||||
webmock_response.status[0],
|
||||
"2.0",
|
||||
webmock_response.headers)
|
||||
response << webmock_response.body.dup
|
||||
response
|
||||
request.options.response_class.new(request,
|
||||
webmock_response.status[0],
|
||||
"2.0",
|
||||
webmock_response.headers).tap do |res|
|
||||
res.mocked = true
|
||||
end
|
||||
end
|
||||
|
||||
def build_error_response(request, exception)
|
||||
HTTPX::ErrorResponse.new(request, exception, request.options)
|
||||
HTTPX::ErrorResponse.new(request, exception)
|
||||
end
|
||||
end
|
||||
|
||||
module InstanceMethods
|
||||
def build_connection(*)
|
||||
connection = super
|
||||
private
|
||||
|
||||
def do_init_connection(connection, selector)
|
||||
super
|
||||
|
||||
connection.once(:unmock_connection) do
|
||||
pool.__send__(:resolve_connection, connection)
|
||||
pool.__send__(:unregister_connection, connection) unless connection.addresses
|
||||
next unless connection.current_session == self
|
||||
|
||||
unless connection.addresses
|
||||
# reset Happy Eyeballs, fail early
|
||||
connection.sibling = nil
|
||||
|
||||
deselect_connection(connection, selector)
|
||||
end
|
||||
resolve_connection(connection, selector)
|
||||
end
|
||||
connection
|
||||
end
|
||||
end
|
||||
|
||||
module ResponseMethods
|
||||
attr_accessor :mocked
|
||||
|
||||
def initialize(*)
|
||||
super
|
||||
@mocked = false
|
||||
end
|
||||
end
|
||||
|
||||
module ResponseBodyMethods
|
||||
def decode_chunk(chunk)
|
||||
return chunk if @response.mocked
|
||||
|
||||
super
|
||||
end
|
||||
end
|
||||
|
||||
@ -85,6 +106,10 @@ module WebMock
|
||||
super
|
||||
end
|
||||
|
||||
def terminate
|
||||
force_reset
|
||||
end
|
||||
|
||||
def send(request)
|
||||
request_signature = Plugin.build_webmock_request_signature(request)
|
||||
WebMock::RequestRegistry.instance.requested_signatures.put(request_signature)
|
||||
@ -93,8 +118,16 @@ module WebMock
|
||||
response = Plugin.build_from_webmock_response(request, mock_response)
|
||||
WebMock::CallbackRegistry.invoke_callbacks({ lib: :httpx }, request_signature, mock_response)
|
||||
log { "mocking #{request.uri} with #{mock_response.inspect}" }
|
||||
request.transition(:headers)
|
||||
request.transition(:body)
|
||||
request.transition(:trailers)
|
||||
request.transition(:done)
|
||||
response.finish!
|
||||
request.response = response
|
||||
request.emit(:response, response)
|
||||
request_signature.headers = request.headers.to_h
|
||||
|
||||
response << mock_response.body.dup unless response.is_a?(HTTPX::ErrorResponse)
|
||||
elsif WebMock.net_connect_allowed?(request_signature.uri)
|
||||
if WebMock::CallbackRegistry.any_callbacks?
|
||||
request.on(:response) do |resp|
|
||||
|
@ -4,7 +4,59 @@ require "strscan"
|
||||
|
||||
module HTTPX
|
||||
module AltSvc
|
||||
@altsvc_mutex = Mutex.new
|
||||
# makes connections able to accept requests destined to primary service.
|
||||
module ConnectionMixin
|
||||
using URIExtensions
|
||||
|
||||
def send(request)
|
||||
request.headers["alt-used"] = @origin.authority if @parser && !@write_buffer.full? && match_altsvcs?(request.uri)
|
||||
|
||||
super
|
||||
end
|
||||
|
||||
def match?(uri, options)
|
||||
return false if !used? && (@state == :closing || @state == :closed)
|
||||
|
||||
match_altsvcs?(uri) && match_altsvc_options?(uri, options)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# checks if this is connection is an alternative service of
|
||||
# +uri+
|
||||
def match_altsvcs?(uri)
|
||||
@origins.any? { |origin| altsvc_match?(uri, origin) } ||
|
||||
AltSvc.cached_altsvc(@origin).any? do |altsvc|
|
||||
origin = altsvc["origin"]
|
||||
altsvc_match?(origin, uri.origin)
|
||||
end
|
||||
end
|
||||
|
||||
def match_altsvc_options?(uri, options)
|
||||
return @options == options unless @options.ssl.all? do |k, v|
|
||||
v == (k == :hostname ? uri.host : options.ssl[k])
|
||||
end
|
||||
|
||||
@options.options_equals?(options, Options::REQUEST_BODY_IVARS + %i[@ssl])
|
||||
end
|
||||
|
||||
def altsvc_match?(uri, other_uri)
|
||||
other_uri = URI(other_uri)
|
||||
|
||||
uri.origin == other_uri.origin || begin
|
||||
case uri.scheme
|
||||
when "h2"
|
||||
(other_uri.scheme == "https" || other_uri.scheme == "h2") &&
|
||||
uri.host == other_uri.host &&
|
||||
uri.port == other_uri.port
|
||||
else
|
||||
false
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@altsvc_mutex = Thread::Mutex.new
|
||||
@altsvcs = Hash.new { |h, k| h[k] = [] }
|
||||
|
||||
module_function
|
||||
@ -46,7 +98,7 @@ module HTTPX
|
||||
|
||||
altsvc = response.headers["alt-svc"]
|
||||
|
||||
# https://tools.ietf.org/html/rfc7838#section-3
|
||||
# https://datatracker.ietf.org/doc/html/rfc7838#section-3
|
||||
# A field value containing the special value "clear" indicates that the
|
||||
# origin requests all alternatives for that origin to be invalidated
|
||||
# (including those specified in the same response, in case of an
|
||||
@ -79,9 +131,9 @@ module HTTPX
|
||||
scanner.skip(/;/)
|
||||
break if scanner.eos? || scanner.scan(/ *, */)
|
||||
end
|
||||
alt_params = Hash[alt_params.map { |field| field.split("=") }]
|
||||
alt_params = Hash[alt_params.map { |field| field.split("=", 2) }]
|
||||
|
||||
alt_proto, alt_authority = alt_service.split("=")
|
||||
alt_proto, alt_authority = alt_service.split("=", 2)
|
||||
alt_origin = parse_altsvc_origin(alt_proto, alt_authority)
|
||||
return unless alt_origin
|
||||
|
||||
@ -98,29 +150,14 @@ module HTTPX
|
||||
end
|
||||
end
|
||||
|
||||
# :nocov:
|
||||
if RUBY_VERSION < "2.2"
|
||||
def parse_altsvc_origin(alt_proto, alt_origin)
|
||||
alt_scheme = parse_altsvc_scheme(alt_proto) or return
|
||||
def parse_altsvc_origin(alt_proto, alt_origin)
|
||||
alt_scheme = parse_altsvc_scheme(alt_proto)
|
||||
|
||||
alt_origin = alt_origin[1..-2] if alt_origin.start_with?("\"") && alt_origin.end_with?("\"")
|
||||
if alt_origin.start_with?(":")
|
||||
alt_origin = "#{alt_scheme}://dummy#{alt_origin}"
|
||||
uri = URI.parse(alt_origin)
|
||||
uri.host = nil
|
||||
uri
|
||||
else
|
||||
URI.parse("#{alt_scheme}://#{alt_origin}")
|
||||
end
|
||||
end
|
||||
else
|
||||
def parse_altsvc_origin(alt_proto, alt_origin)
|
||||
alt_scheme = parse_altsvc_scheme(alt_proto) or return
|
||||
alt_origin = alt_origin[1..-2] if alt_origin.start_with?("\"") && alt_origin.end_with?("\"")
|
||||
return unless alt_scheme
|
||||
|
||||
URI.parse("#{alt_scheme}://#{alt_origin}")
|
||||
end
|
||||
alt_origin = alt_origin[1..-2] if alt_origin.start_with?("\"") && alt_origin.end_with?("\"")
|
||||
|
||||
URI.parse("#{alt_scheme}://#{alt_origin}")
|
||||
end
|
||||
# :nocov:
|
||||
end
|
||||
end
|
||||
|
27
lib/httpx/base64.rb
Normal file
27
lib/httpx/base64.rb
Normal file
@ -0,0 +1,27 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
if RUBY_VERSION < "3.3.0"
|
||||
require "base64"
|
||||
elsif !defined?(Base64)
|
||||
module HTTPX
|
||||
# require "base64" will not be a default gem after ruby 3.4.0
|
||||
module Base64
|
||||
module_function
|
||||
|
||||
def decode64(str)
|
||||
str.unpack1("m")
|
||||
end
|
||||
|
||||
def strict_encode64(bin)
|
||||
[bin].pack("m0")
|
||||
end
|
||||
|
||||
def urlsafe_encode64(bin, padding: true)
|
||||
str = strict_encode64(bin)
|
||||
str.chomp!("==") or str.chomp!("=") unless padding
|
||||
str.tr!("+/", "-_")
|
||||
str
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -3,11 +3,17 @@
|
||||
require "forwardable"
|
||||
|
||||
module HTTPX
|
||||
# Internal class to abstract a string buffer, by wrapping a string and providing the
|
||||
# minimum possible API and functionality required.
|
||||
#
|
||||
# buffer = Buffer.new(640)
|
||||
# buffer.full? #=> false
|
||||
# buffer << "aa"
|
||||
# buffer.capacity #=> 638
|
||||
#
|
||||
class Buffer
|
||||
extend Forwardable
|
||||
|
||||
def_delegator :@buffer, :<<
|
||||
|
||||
def_delegator :@buffer, :to_s
|
||||
|
||||
def_delegator :@buffer, :to_str
|
||||
@ -22,9 +28,22 @@ module HTTPX
|
||||
|
||||
attr_reader :limit
|
||||
|
||||
def initialize(limit)
|
||||
@buffer = "".b
|
||||
@limit = limit
|
||||
if RUBY_VERSION >= "3.4.0"
|
||||
def initialize(limit)
|
||||
@buffer = String.new("", encoding: Encoding::BINARY, capacity: limit)
|
||||
@limit = limit
|
||||
end
|
||||
|
||||
def <<(chunk)
|
||||
@buffer.append_as_bytes(chunk)
|
||||
end
|
||||
else
|
||||
def initialize(limit)
|
||||
@buffer = "".b
|
||||
@limit = limit
|
||||
end
|
||||
|
||||
def_delegator :@buffer, :<<
|
||||
end
|
||||
|
||||
def full?
|
||||
|
@ -4,6 +4,7 @@ module HTTPX
|
||||
module Callbacks
|
||||
def on(type, &action)
|
||||
callbacks(type) << action
|
||||
action
|
||||
end
|
||||
|
||||
def once(type, &block)
|
||||
@ -13,17 +14,13 @@ module HTTPX
|
||||
end
|
||||
end
|
||||
|
||||
def only(type, &block)
|
||||
callbacks(type).clear
|
||||
on(type, &block)
|
||||
end
|
||||
|
||||
def emit(type, *args)
|
||||
log { "emit #{type.inspect} callbacks" } if respond_to?(:log)
|
||||
callbacks(type).delete_if { |pr| :delete == pr.call(*args) } # rubocop:disable Style/YodaCondition
|
||||
end
|
||||
|
||||
def callbacks_for?(type)
|
||||
@callbacks.key?(type) && @callbacks[type].any?
|
||||
@callbacks && @callbacks.key?(type) && @callbacks[type].any?
|
||||
end
|
||||
|
||||
protected
|
||||
|
@ -1,6 +1,8 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module HTTPX
|
||||
# Session mixin, implements most of the APIs that the users call.
|
||||
# delegates to a default session when extended.
|
||||
module Chainable
|
||||
%w[head get post put delete trace options connect patch].each do |meth|
|
||||
class_eval(<<-MOD, __FILE__, __LINE__ + 1)
|
||||
@ -10,80 +12,95 @@ module HTTPX
|
||||
MOD
|
||||
end
|
||||
|
||||
# delegates to the default session (see HTTPX::Session#request).
|
||||
def request(*args, **options)
|
||||
branch(default_options).request(*args, **options)
|
||||
end
|
||||
|
||||
# :nocov:
|
||||
def timeout(**args)
|
||||
warn ":#{__method__} is deprecated, use :with_timeout instead"
|
||||
with(timeout: args)
|
||||
end
|
||||
|
||||
def headers(headers)
|
||||
warn ":#{__method__} is deprecated, use :with_headers instead"
|
||||
with(headers: headers)
|
||||
end
|
||||
# :nocov:
|
||||
|
||||
def accept(type)
|
||||
with(headers: { "accept" => String(type) })
|
||||
end
|
||||
|
||||
# delegates to the default session (see HTTPX::Session#wrap).
|
||||
def wrap(&blk)
|
||||
branch(default_options).wrap(&blk)
|
||||
end
|
||||
|
||||
# returns a new instance loaded with the +pl+ plugin and +options+.
|
||||
def plugin(pl, options = nil, &blk)
|
||||
klass = is_a?(Session) ? self.class : Session
|
||||
klass = is_a?(S) ? self.class : Session
|
||||
klass = Class.new(klass)
|
||||
klass.instance_variable_set(:@default_options, klass.default_options.merge(default_options))
|
||||
klass.plugin(pl, options, &blk).new
|
||||
end
|
||||
|
||||
# deprecated
|
||||
# :nocov:
|
||||
def plugins(pls)
|
||||
warn ":#{__method__} is deprecated, use :plugin instead"
|
||||
klass = is_a?(Session) ? self.class : Session
|
||||
klass = Class.new(klass)
|
||||
klass.instance_variable_set(:@default_options, klass.default_options.merge(default_options))
|
||||
klass.plugins(pls).new
|
||||
end
|
||||
# :nocov:
|
||||
|
||||
# returns a new instance loaded with +options+.
|
||||
def with(options, &blk)
|
||||
branch(default_options.merge(options), &blk)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# returns default instance of HTTPX::Options.
|
||||
def default_options
|
||||
@options || Session.default_options
|
||||
end
|
||||
|
||||
# returns a default instance of HTTPX::Session.
|
||||
def branch(options, &blk)
|
||||
return self.class.new(options, &blk) if is_a?(Session)
|
||||
return self.class.new(options, &blk) if is_a?(S)
|
||||
|
||||
Session.new(options, &blk)
|
||||
end
|
||||
|
||||
def method_missing(meth, *args, **options)
|
||||
return super unless meth =~ /\Awith_(.+)/
|
||||
def method_missing(meth, *args, **options, &blk)
|
||||
case meth
|
||||
when /\Awith_(.+)/
|
||||
|
||||
option = Regexp.last_match(1)
|
||||
option = Regexp.last_match(1)
|
||||
|
||||
return super unless option
|
||||
return super unless option
|
||||
|
||||
with(option.to_sym => (args.first || options))
|
||||
with(option.to_sym => args.first || options)
|
||||
when /\Aon_(.+)/
|
||||
callback = Regexp.last_match(1)
|
||||
|
||||
return super unless %w[
|
||||
connection_opened connection_closed
|
||||
request_error
|
||||
request_started request_body_chunk request_completed
|
||||
response_started response_body_chunk response_completed
|
||||
].include?(callback)
|
||||
|
||||
warn "DEPRECATION WARNING: calling `.#{meth}` on plain HTTPX sessions is deprecated. " \
|
||||
"Use `HTTPX.plugin(:callbacks).#{meth}` instead."
|
||||
|
||||
plugin(:callbacks).__send__(meth, *args, **options, &blk)
|
||||
else
|
||||
super
|
||||
end
|
||||
end
|
||||
|
||||
def respond_to_missing?(meth, *)
|
||||
return super unless meth =~ /\Awith_(.+)/
|
||||
case meth
|
||||
when /\Awith_(.+)/
|
||||
option = Regexp.last_match(1)
|
||||
|
||||
option = Regexp.last_match(1)
|
||||
default_options.respond_to?(option) || super
|
||||
when /\Aon_(.+)/
|
||||
callback = Regexp.last_match(1)
|
||||
|
||||
default_options.respond_to?(option) || super
|
||||
%w[
|
||||
connection_opened connection_closed
|
||||
request_error
|
||||
request_started request_body_chunk request_completed
|
||||
response_started response_body_chunk response_completed
|
||||
].include?(callback) || super
|
||||
else
|
||||
super
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
extend Chainable
|
||||
end
|
||||
|
@ -33,7 +33,6 @@ module HTTPX
|
||||
include Callbacks
|
||||
|
||||
using URIExtensions
|
||||
using NumericExtensions
|
||||
|
||||
require "httpx/connection/http2"
|
||||
require "httpx/connection/http1"
|
||||
@ -42,21 +41,33 @@ module HTTPX
|
||||
|
||||
def_delegator :@write_buffer, :empty?
|
||||
|
||||
attr_reader :type, :io, :origin, :origins, :state, :pending, :options
|
||||
attr_reader :type, :io, :origin, :origins, :state, :pending, :options, :ssl_session, :sibling
|
||||
|
||||
attr_writer :timers
|
||||
attr_writer :current_selector
|
||||
|
||||
attr_accessor :family
|
||||
attr_accessor :current_session, :family
|
||||
|
||||
def initialize(type, uri, options)
|
||||
@type = type
|
||||
protected :sibling
|
||||
|
||||
def initialize(uri, options)
|
||||
@current_session = @current_selector =
|
||||
@parser = @sibling = @coalesced_connection =
|
||||
@io = @ssl_session = @timeout =
|
||||
@connected_at = @response_received_at = nil
|
||||
|
||||
@exhausted = @cloned = @main_sibling = false
|
||||
|
||||
@options = Options.new(options)
|
||||
@type = initialize_type(uri, @options)
|
||||
@origins = [uri.origin]
|
||||
@origin = Utils.to_uri(uri.origin)
|
||||
@options = Options.new(options)
|
||||
@window_size = @options.window_size
|
||||
@read_buffer = Buffer.new(@options.buffer_size)
|
||||
@write_buffer = Buffer.new(@options.buffer_size)
|
||||
@pending = []
|
||||
@inflight = 0
|
||||
@keep_alive_timeout = @options.timeout[:keep_alive_timeout]
|
||||
|
||||
on(:error, &method(:on_error))
|
||||
if @options.io
|
||||
# if there's an already open IO, get its
|
||||
@ -67,14 +78,39 @@ module HTTPX
|
||||
else
|
||||
transition(:idle)
|
||||
end
|
||||
on(:close) do
|
||||
next if @exhausted # it'll reset
|
||||
|
||||
@inflight = 0
|
||||
@keep_alive_timeout = @options.timeout[:keep_alive_timeout]
|
||||
@total_timeout = @options.timeout[:total_timeout]
|
||||
# may be called after ":close" above, so after the connection has been checked back in.
|
||||
# next unless @current_session
|
||||
|
||||
next unless @current_session
|
||||
|
||||
@current_session.deselect_connection(self, @current_selector, @cloned)
|
||||
end
|
||||
on(:terminate) do
|
||||
next if @exhausted # it'll reset
|
||||
|
||||
current_session = @current_session
|
||||
current_selector = @current_selector
|
||||
|
||||
# may be called after ":close" above, so after the connection has been checked back in.
|
||||
next unless current_session && current_selector
|
||||
|
||||
current_session.deselect_connection(self, current_selector)
|
||||
end
|
||||
|
||||
on(:altsvc) do |alt_origin, origin, alt_params|
|
||||
build_altsvc_connection(alt_origin, origin, alt_params)
|
||||
end
|
||||
|
||||
self.addresses = @options.addresses if @options.addresses
|
||||
end
|
||||
|
||||
def peer
|
||||
@origin
|
||||
end
|
||||
|
||||
# this is a semi-private method, to be used by the resolver
|
||||
# to initiate the io object.
|
||||
def addresses=(addrs)
|
||||
@ -90,27 +126,27 @@ module HTTPX
|
||||
end
|
||||
|
||||
def match?(uri, options)
|
||||
return false if @state == :closing || @state == :closed
|
||||
|
||||
return false if exhausted?
|
||||
return false if !used? && (@state == :closing || @state == :closed)
|
||||
|
||||
(
|
||||
(
|
||||
@origins.include?(uri.origin) &&
|
||||
# if there is more than one origin to match, it means that this connection
|
||||
# was the result of coalescing. To prevent blind trust in the case where the
|
||||
# origin came from an ORIGIN frame, we're going to verify the hostname with the
|
||||
# SSL certificate
|
||||
(@origins.size == 1 || @origin == uri.origin || (@io.is_a?(SSL) && @io.verify_hostname(uri.host)))
|
||||
) && @options == options
|
||||
) || (match_altsvcs?(uri) && match_altsvc_options?(uri, options))
|
||||
@origins.include?(uri.origin) &&
|
||||
# if there is more than one origin to match, it means that this connection
|
||||
# was the result of coalescing. To prevent blind trust in the case where the
|
||||
# origin came from an ORIGIN frame, we're going to verify the hostname with the
|
||||
# SSL certificate
|
||||
(@origins.size == 1 || @origin == uri.origin || (@io.is_a?(SSL) && @io.verify_hostname(uri.host)))
|
||||
) && @options == options
|
||||
end
|
||||
|
||||
def expired?
|
||||
return false unless @io
|
||||
|
||||
@io.expired?
|
||||
end
|
||||
|
||||
def mergeable?(connection)
|
||||
return false if @state == :closing || @state == :closed || !@io
|
||||
|
||||
return false if exhausted?
|
||||
|
||||
return false unless connection.addresses
|
||||
|
||||
(
|
||||
@ -119,6 +155,14 @@ module HTTPX
|
||||
) && @options == connection.options
|
||||
end
|
||||
|
||||
# coalesces +self+ into +connection+.
|
||||
def coalesce!(connection)
|
||||
@coalesced_connection = connection
|
||||
|
||||
close_sibling
|
||||
connection.merge(self)
|
||||
end
|
||||
|
||||
# coalescable connections need to be mergeable!
|
||||
# but internally, #mergeable? is called before #coalescable?
|
||||
def coalescable?(connection)
|
||||
@ -133,11 +177,17 @@ module HTTPX
|
||||
end
|
||||
|
||||
def create_idle(options = {})
|
||||
self.class.new(@type, @origin, @options.merge(options))
|
||||
self.class.new(@origin, @options.merge(options))
|
||||
end
|
||||
|
||||
def merge(connection)
|
||||
@origins |= connection.instance_variable_get(:@origins)
|
||||
if connection.ssl_session
|
||||
@ssl_session = connection.ssl_session
|
||||
@io.session_new_cb do |sess|
|
||||
@ssl_session = sess
|
||||
end if @io
|
||||
end
|
||||
connection.purge_pending do |req|
|
||||
send(req)
|
||||
end
|
||||
@ -155,22 +205,10 @@ module HTTPX
|
||||
end
|
||||
end
|
||||
|
||||
# checks if this is connection is an alternative service of
|
||||
# +uri+
|
||||
def match_altsvcs?(uri)
|
||||
@origins.any? { |origin| uri.altsvc_match?(origin) } ||
|
||||
AltSvc.cached_altsvc(@origin).any? do |altsvc|
|
||||
origin = altsvc["origin"]
|
||||
origin.altsvc_match?(uri.origin)
|
||||
end
|
||||
end
|
||||
def io_connected?
|
||||
return @coalesced_connection.io_connected? if @coalesced_connection
|
||||
|
||||
def match_altsvc_options?(uri, options)
|
||||
return @options == options unless @options.ssl[:hostname] == uri.host
|
||||
|
||||
dup_options = @options.merge(ssl: { hostname: nil })
|
||||
dup_options.ssl.delete(:hostname)
|
||||
dup_options == options
|
||||
@io && @io.state == :connected
|
||||
end
|
||||
|
||||
def connecting?
|
||||
@ -178,7 +216,12 @@ module HTTPX
|
||||
end
|
||||
|
||||
def inflight?
|
||||
@parser && !@parser.empty? && !@write_buffer.empty?
|
||||
@parser && (
|
||||
# parser may be dealing with other requests (possibly started from a different fiber)
|
||||
!@parser.empty? ||
|
||||
# connection may be doing connection termination handshake
|
||||
!@write_buffer.empty?
|
||||
)
|
||||
end
|
||||
|
||||
def interests
|
||||
@ -194,6 +237,9 @@ module HTTPX
|
||||
|
||||
return @parser.interests if @parser
|
||||
|
||||
nil
|
||||
rescue StandardError => e
|
||||
emit(:error, e)
|
||||
nil
|
||||
end
|
||||
|
||||
@ -203,16 +249,22 @@ module HTTPX
|
||||
|
||||
def call
|
||||
case @state
|
||||
when :idle
|
||||
connect
|
||||
consume
|
||||
when :closed
|
||||
return
|
||||
when :closing
|
||||
consume
|
||||
transition(:closed)
|
||||
emit(:close)
|
||||
when :open
|
||||
consume
|
||||
end
|
||||
nil
|
||||
rescue StandardError => e
|
||||
@write_buffer.clear
|
||||
emit(:error, e)
|
||||
raise e
|
||||
end
|
||||
|
||||
def close
|
||||
@ -221,24 +273,38 @@ module HTTPX
|
||||
@parser.close if @parser
|
||||
end
|
||||
|
||||
def terminate
|
||||
case @state
|
||||
when :idle
|
||||
purge_after_closed
|
||||
emit(:terminate)
|
||||
when :closed
|
||||
@connected_at = nil
|
||||
end
|
||||
|
||||
close
|
||||
end
|
||||
|
||||
# bypasses the state machine to force closing of connections still connecting.
|
||||
# **only** used for Happy Eyeballs v2.
|
||||
def force_reset
|
||||
def force_reset(cloned = false)
|
||||
@state = :closing
|
||||
@cloned = cloned
|
||||
transition(:closed)
|
||||
emit(:close)
|
||||
end
|
||||
|
||||
def reset
|
||||
return if @state == :closing || @state == :closed
|
||||
|
||||
transition(:closing)
|
||||
|
||||
transition(:closed)
|
||||
emit(:close)
|
||||
end
|
||||
|
||||
def send(request)
|
||||
if @parser && !@write_buffer.full?
|
||||
request.headers["alt-used"] = @origin.authority if match_altsvcs?(request.uri)
|
||||
return @coalesced_connection.send(request) if @coalesced_connection
|
||||
|
||||
if @parser && !@write_buffer.full?
|
||||
if @response_received_at && @keep_alive_timeout &&
|
||||
Utils.elapsed_time(@response_received_at) > @keep_alive_timeout
|
||||
# when pushing a request into an existing connection, we have to check whether there
|
||||
@ -246,8 +312,9 @@ module HTTPX
|
||||
# for such cases, we want to ping for availability before deciding to shovel requests.
|
||||
log(level: 3) { "keep alive timeout expired, pinging connection..." }
|
||||
@pending << request
|
||||
parser.ping
|
||||
transition(:active) if @state == :inactive
|
||||
parser.ping
|
||||
request.ping!
|
||||
return
|
||||
end
|
||||
|
||||
@ -258,28 +325,26 @@ module HTTPX
|
||||
end
|
||||
|
||||
def timeout
|
||||
if @total_timeout
|
||||
return @total_timeout unless @connected_at
|
||||
return if @state == :closed || @state == :inactive
|
||||
|
||||
elapsed_time = @total_timeout - Utils.elapsed_time(@connected_at)
|
||||
|
||||
if elapsed_time.negative?
|
||||
ex = TotalTimeoutError.new(@total_timeout, "Timed out after #{@total_timeout} seconds")
|
||||
ex.set_backtrace(caller)
|
||||
on_error(ex)
|
||||
return
|
||||
end
|
||||
|
||||
return elapsed_time
|
||||
end
|
||||
|
||||
return @timeout if defined?(@timeout)
|
||||
return @timeout if @timeout
|
||||
|
||||
return @options.timeout[:connect_timeout] if @state == :idle
|
||||
|
||||
@options.timeout[:operation_timeout]
|
||||
end
|
||||
|
||||
def idling
|
||||
purge_after_closed
|
||||
@write_buffer.clear
|
||||
transition(:idle)
|
||||
@parser = nil if @parser
|
||||
end
|
||||
|
||||
def used?
|
||||
@connected_at
|
||||
end
|
||||
|
||||
def deactivate
|
||||
transition(:inactive)
|
||||
end
|
||||
@ -288,28 +353,65 @@ module HTTPX
|
||||
@state == :open || @state == :inactive
|
||||
end
|
||||
|
||||
def raise_timeout_error(interval)
|
||||
error = HTTPX::TimeoutError.new(interval, "timed out while waiting on select")
|
||||
def handle_socket_timeout(interval)
|
||||
error = OperationTimeoutError.new(interval, "timed out while waiting on select")
|
||||
error.set_backtrace(caller)
|
||||
on_error(error)
|
||||
end
|
||||
|
||||
def sibling=(connection)
|
||||
@sibling = connection
|
||||
|
||||
return unless connection
|
||||
|
||||
@main_sibling = connection.sibling.nil?
|
||||
|
||||
return unless @main_sibling
|
||||
|
||||
connection.sibling = self
|
||||
end
|
||||
|
||||
def handle_connect_error(error)
|
||||
return handle_error(error) unless @sibling && @sibling.connecting?
|
||||
|
||||
@sibling.merge(self)
|
||||
|
||||
force_reset(true)
|
||||
end
|
||||
|
||||
def disconnect
|
||||
return unless @current_session && @current_selector
|
||||
|
||||
emit(:close)
|
||||
@current_session = nil
|
||||
@current_selector = nil
|
||||
end
|
||||
|
||||
# :nocov:
|
||||
def inspect
|
||||
"#<#{self.class}:#{object_id} " \
|
||||
"@origin=#{@origin} " \
|
||||
"@state=#{@state} " \
|
||||
"@pending=#{@pending.size} " \
|
||||
"@io=#{@io}>"
|
||||
end
|
||||
# :nocov:
|
||||
|
||||
private
|
||||
|
||||
def connect
|
||||
transition(:open)
|
||||
end
|
||||
|
||||
def exhausted?
|
||||
@parser && parser.exhausted?
|
||||
end
|
||||
|
||||
def consume
|
||||
return unless @io
|
||||
|
||||
catch(:called) do
|
||||
epiped = false
|
||||
loop do
|
||||
# connection may have
|
||||
return if @state == :idle
|
||||
|
||||
parser.consume
|
||||
|
||||
# we exit if there's no more requests to process
|
||||
@ -339,8 +441,10 @@ module HTTPX
|
||||
#
|
||||
loop do
|
||||
siz = @io.read(@window_size, @read_buffer)
|
||||
log(level: 3, color: :cyan) { "IO READ: #{siz} bytes..." }
|
||||
log(level: 3, color: :cyan) { "IO READ: #{siz} bytes... (wsize: #{@window_size}, rbuffer: #{@read_buffer.bytesize})" }
|
||||
unless siz
|
||||
@write_buffer.clear
|
||||
|
||||
ex = EOFError.new("descriptor closed")
|
||||
ex.set_backtrace(caller)
|
||||
on_error(ex)
|
||||
@ -395,6 +499,8 @@ module HTTPX
|
||||
end
|
||||
log(level: 3, color: :cyan) { "IO WRITE: #{siz} bytes..." }
|
||||
unless siz
|
||||
@write_buffer.clear
|
||||
|
||||
ex = EOFError.new("descriptor closed")
|
||||
ex.set_backtrace(caller)
|
||||
on_error(ex)
|
||||
@ -440,17 +546,22 @@ module HTTPX
|
||||
|
||||
def send_request_to_parser(request)
|
||||
@inflight += 1
|
||||
parser.send(request)
|
||||
|
||||
request.peer_address = @io.ip
|
||||
set_request_timeouts(request)
|
||||
|
||||
parser.send(request)
|
||||
|
||||
return unless @state == :inactive
|
||||
|
||||
transition(:active)
|
||||
# mark request as ping, as this inactive connection may have been
|
||||
# closed by the server, and we don't want that to influence retry
|
||||
# bookkeeping.
|
||||
request.ping!
|
||||
end
|
||||
|
||||
def build_parser(protocol = @io.protocol)
|
||||
parser = self.class.parser_type(protocol).new(@write_buffer, @options)
|
||||
parser = parser_type(protocol).new(@write_buffer, @options)
|
||||
set_parser_callbacks(parser)
|
||||
parser
|
||||
end
|
||||
@ -462,6 +573,7 @@ module HTTPX
|
||||
end
|
||||
@response_received_at = Utils.now
|
||||
@inflight -= 1
|
||||
response.finish!
|
||||
request.emit(:response, response)
|
||||
end
|
||||
parser.on(:altsvc) do |alt_origin, origin, alt_params|
|
||||
@ -474,32 +586,49 @@ module HTTPX
|
||||
request.emit(:promise, parser, stream)
|
||||
end
|
||||
parser.on(:exhausted) do
|
||||
emit(:exhausted)
|
||||
@exhausted = true
|
||||
current_session = @current_session
|
||||
current_selector = @current_selector
|
||||
begin
|
||||
parser.close
|
||||
@pending.concat(parser.pending)
|
||||
ensure
|
||||
@current_session = current_session
|
||||
@current_selector = current_selector
|
||||
end
|
||||
|
||||
case @state
|
||||
when :closed
|
||||
idling
|
||||
@exhausted = false
|
||||
when :closing
|
||||
once(:closed) do
|
||||
idling
|
||||
@exhausted = false
|
||||
end
|
||||
end
|
||||
end
|
||||
parser.on(:origin) do |origin|
|
||||
@origins |= [origin]
|
||||
end
|
||||
parser.on(:close) do |force|
|
||||
transition(:closing)
|
||||
if force || @state == :idle
|
||||
transition(:closed)
|
||||
emit(:close)
|
||||
if force
|
||||
reset
|
||||
emit(:terminate)
|
||||
end
|
||||
end
|
||||
parser.on(:close_handshake) do
|
||||
consume
|
||||
end
|
||||
parser.on(:reset) do
|
||||
if parser.empty?
|
||||
reset
|
||||
else
|
||||
transition(:closing)
|
||||
transition(:closed)
|
||||
emit(:reset)
|
||||
|
||||
@parser.reset if @parser
|
||||
transition(:idle)
|
||||
transition(:open)
|
||||
@pending.concat(parser.pending) unless parser.empty?
|
||||
current_session = @current_session
|
||||
current_selector = @current_selector
|
||||
reset
|
||||
unless @pending.empty?
|
||||
idling
|
||||
@current_session = current_session
|
||||
@current_selector = current_selector
|
||||
end
|
||||
end
|
||||
parser.on(:current_timeout) do
|
||||
@ -508,15 +637,28 @@ module HTTPX
|
||||
parser.on(:timeout) do |tout|
|
||||
@timeout = tout
|
||||
end
|
||||
parser.on(:error) do |request, ex|
|
||||
case ex
|
||||
when MisdirectedRequestError
|
||||
emit(:misdirected, request)
|
||||
else
|
||||
response = ErrorResponse.new(request, ex, @options)
|
||||
request.response = response
|
||||
request.emit(:response, response)
|
||||
parser.on(:error) do |request, error|
|
||||
case error
|
||||
when :http_1_1_required
|
||||
current_session = @current_session
|
||||
current_selector = @current_selector
|
||||
parser.close
|
||||
|
||||
other_connection = current_session.find_connection(@origin, current_selector,
|
||||
@options.merge(ssl: { alpn_protocols: %w[http/1.1] }))
|
||||
other_connection.merge(self)
|
||||
request.transition(:idle)
|
||||
other_connection.send(request)
|
||||
next
|
||||
when OperationTimeoutError
|
||||
# request level timeouts should take precedence
|
||||
next unless request.active_timeouts.empty?
|
||||
end
|
||||
|
||||
@inflight -= 1
|
||||
response = ErrorResponse.new(request, error)
|
||||
request.response = response
|
||||
request.emit(:response, response)
|
||||
end
|
||||
end
|
||||
|
||||
@ -531,19 +673,22 @@ module HTTPX
|
||||
Errno::ENETUNREACH,
|
||||
Errno::EPIPE,
|
||||
Errno::ENOENT,
|
||||
SocketError => e
|
||||
SocketError,
|
||||
IOError => e
|
||||
# connect errors, exit gracefully
|
||||
error = ConnectionError.new(e.message)
|
||||
error.set_backtrace(e.backtrace)
|
||||
connecting? && callbacks_for?(:connect_error) ? emit(:connect_error, error) : handle_error(error)
|
||||
handle_connect_error(error) if connecting?
|
||||
@state = :closed
|
||||
emit(:close)
|
||||
rescue TLSError => e
|
||||
purge_after_closed
|
||||
disconnect
|
||||
rescue TLSError, ::HTTP2::Error::ProtocolError, ::HTTP2::Error::HandshakeError => e
|
||||
# connect errors, exit gracefully
|
||||
handle_error(e)
|
||||
connecting? && callbacks_for?(:connect_error) ? emit(:connect_error, e) : handle_error(e)
|
||||
handle_connect_error(e) if connecting?
|
||||
@state = :closed
|
||||
emit(:close)
|
||||
purge_after_closed
|
||||
disconnect
|
||||
end
|
||||
|
||||
def handle_transition(nextstate)
|
||||
@ -551,11 +696,12 @@ module HTTPX
|
||||
when :idle
|
||||
@timeout = @current_timeout = @options.timeout[:connect_timeout]
|
||||
|
||||
@connected_at = @response_received_at = nil
|
||||
when :open
|
||||
return if @state == :closed
|
||||
|
||||
@io.connect
|
||||
emit(:tcp_open, self) if @io.state == :connected
|
||||
close_sibling if @io.state == :connected
|
||||
|
||||
return unless @io.connected?
|
||||
|
||||
@ -567,92 +713,203 @@ module HTTPX
|
||||
emit(:open)
|
||||
when :inactive
|
||||
return unless @state == :open
|
||||
when :closing
|
||||
return unless @state == :open
|
||||
|
||||
# do not deactivate connection in use
|
||||
return if @inflight.positive?
|
||||
when :closing
|
||||
return unless @state == :idle || @state == :open
|
||||
|
||||
unless @write_buffer.empty?
|
||||
# preset state before handshake, as error callbacks
|
||||
# may take it back here.
|
||||
@state = nextstate
|
||||
# handshakes, try sending
|
||||
consume
|
||||
@write_buffer.clear
|
||||
return
|
||||
end
|
||||
when :closed
|
||||
return unless @state == :closing
|
||||
return unless @write_buffer.empty?
|
||||
|
||||
purge_after_closed
|
||||
disconnect if @pending.empty?
|
||||
|
||||
when :already_open
|
||||
nextstate = :open
|
||||
# the first check for given io readiness must still use a timeout.
|
||||
# connect is the reasonable choice in such a case.
|
||||
@timeout = @options.timeout[:connect_timeout]
|
||||
send_pending
|
||||
when :active
|
||||
return unless @state == :inactive
|
||||
|
||||
nextstate = :open
|
||||
emit(:activate)
|
||||
|
||||
# activate
|
||||
@current_session.select_connection(self, @current_selector)
|
||||
end
|
||||
log(level: 3) { "#{@state} -> #{nextstate}" }
|
||||
@state = nextstate
|
||||
end
|
||||
|
||||
def close_sibling
|
||||
return unless @sibling
|
||||
|
||||
if @sibling.io_connected?
|
||||
reset
|
||||
# TODO: transition connection to closed
|
||||
end
|
||||
|
||||
unless @sibling.state == :closed
|
||||
merge(@sibling) unless @main_sibling
|
||||
@sibling.force_reset(true)
|
||||
end
|
||||
|
||||
@sibling = nil
|
||||
end
|
||||
|
||||
def purge_after_closed
|
||||
@io.close if @io
|
||||
@read_buffer.clear
|
||||
remove_instance_variable(:@timeout) if defined?(@timeout)
|
||||
@timeout = nil
|
||||
end
|
||||
|
||||
def initialize_type(uri, options)
|
||||
options.transport || begin
|
||||
case uri.scheme
|
||||
when "http"
|
||||
"tcp"
|
||||
when "https"
|
||||
"ssl"
|
||||
else
|
||||
raise UnsupportedSchemeError, "#{uri}: #{uri.scheme}: unsupported URI scheme"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# returns an HTTPX::Connection for the negotiated Alternative Service (or none).
|
||||
def build_altsvc_connection(alt_origin, origin, alt_params)
|
||||
# do not allow security downgrades on altsvc negotiation
|
||||
return if @origin.scheme == "https" && alt_origin.scheme != "https"
|
||||
|
||||
altsvc = AltSvc.cached_altsvc_set(origin, alt_params.merge("origin" => alt_origin))
|
||||
|
||||
# altsvc already exists, somehow it wasn't advertised, probably noop
|
||||
return unless altsvc
|
||||
|
||||
alt_options = @options.merge(ssl: @options.ssl.merge(hostname: URI(origin).host))
|
||||
|
||||
connection = @current_session.find_connection(alt_origin, @current_selector, alt_options)
|
||||
|
||||
# advertised altsvc is the same origin being used, ignore
|
||||
return if connection == self
|
||||
|
||||
connection.extend(AltSvc::ConnectionMixin) unless connection.is_a?(AltSvc::ConnectionMixin)
|
||||
|
||||
log(level: 1) { "#{origin} alt-svc: #{alt_origin}" }
|
||||
|
||||
connection.merge(self)
|
||||
terminate
|
||||
rescue UnsupportedSchemeError
|
||||
altsvc["noop"] = true
|
||||
nil
|
||||
end
|
||||
|
||||
def build_socket(addrs = nil)
|
||||
transport_type = case @type
|
||||
when "tcp" then TCP
|
||||
when "ssl" then SSL
|
||||
when "unix" then UNIX
|
||||
else
|
||||
raise Error, "unsupported transport (#{@type})"
|
||||
case @type
|
||||
when "tcp"
|
||||
TCP.new(peer, addrs, @options)
|
||||
when "ssl"
|
||||
SSL.new(peer, addrs, @options) do |sock|
|
||||
sock.ssl_session = @ssl_session
|
||||
sock.session_new_cb do |sess|
|
||||
@ssl_session = sess
|
||||
|
||||
sock.ssl_session = sess
|
||||
end
|
||||
end
|
||||
when "unix"
|
||||
path = Array(addrs).first
|
||||
|
||||
path = String(path) if path
|
||||
|
||||
UNIX.new(peer, path, @options)
|
||||
else
|
||||
raise Error, "unsupported transport (#{@type})"
|
||||
end
|
||||
transport_type.new(@origin, addrs, @options)
|
||||
end
|
||||
|
||||
def on_error(error)
|
||||
if error.instance_of?(TimeoutError)
|
||||
def on_error(error, request = nil)
|
||||
if error.is_a?(OperationTimeoutError)
|
||||
|
||||
if @total_timeout && @connected_at &&
|
||||
Utils.elapsed_time(@connected_at) > @total_timeout
|
||||
ex = TotalTimeoutError.new(@total_timeout, "Timed out after #{@total_timeout} seconds")
|
||||
ex.set_backtrace(error.backtrace)
|
||||
error = ex
|
||||
else
|
||||
# inactive connections do not contribute to the select loop, therefore
|
||||
# they should not fail due to such errors.
|
||||
return if @state == :inactive
|
||||
# inactive connections do not contribute to the select loop, therefore
|
||||
# they should not fail due to such errors.
|
||||
return if @state == :inactive
|
||||
|
||||
if @timeout
|
||||
@timeout -= error.timeout
|
||||
return unless @timeout <= 0
|
||||
end
|
||||
|
||||
error = error.to_connection_error if connecting?
|
||||
if @timeout
|
||||
@timeout -= error.timeout
|
||||
return unless @timeout <= 0
|
||||
end
|
||||
|
||||
error = error.to_connection_error if connecting?
|
||||
end
|
||||
handle_error(error)
|
||||
handle_error(error, request)
|
||||
reset
|
||||
end
|
||||
|
||||
def handle_error(error)
|
||||
parser.handle_error(error) if @parser && parser.respond_to?(:handle_error)
|
||||
while (request = @pending.shift)
|
||||
response = ErrorResponse.new(request, error, request.options)
|
||||
request.response = response
|
||||
request.emit(:response, response)
|
||||
def handle_error(error, request = nil)
|
||||
parser.handle_error(error, request) if @parser && parser.respond_to?(:handle_error)
|
||||
while (req = @pending.shift)
|
||||
next if request && req == request
|
||||
|
||||
response = ErrorResponse.new(req, error)
|
||||
req.response = response
|
||||
req.emit(:response, response)
|
||||
end
|
||||
|
||||
return unless request
|
||||
|
||||
@inflight -= 1
|
||||
response = ErrorResponse.new(request, error)
|
||||
request.response = response
|
||||
request.emit(:response, response)
|
||||
end
|
||||
|
||||
def set_request_timeouts(request)
|
||||
write_timeout = request.write_timeout
|
||||
request.once(:headers) do
|
||||
@timers.after(write_timeout) { write_timeout_callback(request, write_timeout) }
|
||||
end unless write_timeout.nil? || write_timeout.infinite?
|
||||
set_request_write_timeout(request)
|
||||
set_request_read_timeout(request)
|
||||
set_request_request_timeout(request)
|
||||
end
|
||||
|
||||
def set_request_read_timeout(request)
|
||||
read_timeout = request.read_timeout
|
||||
request.once(:done) do
|
||||
@timers.after(read_timeout) { read_timeout_callback(request, read_timeout) }
|
||||
end unless read_timeout.nil? || read_timeout.infinite?
|
||||
|
||||
return if read_timeout.nil? || read_timeout.infinite?
|
||||
|
||||
set_request_timeout(:read_timeout, request, read_timeout, :done, :response) do
|
||||
read_timeout_callback(request, read_timeout)
|
||||
end
|
||||
end
|
||||
|
||||
def set_request_write_timeout(request)
|
||||
write_timeout = request.write_timeout
|
||||
|
||||
return if write_timeout.nil? || write_timeout.infinite?
|
||||
|
||||
set_request_timeout(:write_timeout, request, write_timeout, :headers, %i[done response]) do
|
||||
write_timeout_callback(request, write_timeout)
|
||||
end
|
||||
end
|
||||
|
||||
def set_request_request_timeout(request)
|
||||
request_timeout = request.request_timeout
|
||||
request.once(:headers) do
|
||||
@timers.after(request_timeout) { read_timeout_callback(request, request_timeout, RequestTimeoutError) }
|
||||
end unless request_timeout.nil? || request_timeout.infinite?
|
||||
|
||||
return if request_timeout.nil? || request_timeout.infinite?
|
||||
|
||||
set_request_timeout(:request_timeout, request, request_timeout, :headers, :complete) do
|
||||
read_timeout_callback(request, request_timeout, RequestTimeoutError)
|
||||
end
|
||||
end
|
||||
|
||||
def write_timeout_callback(request, write_timeout)
|
||||
@ -660,7 +917,8 @@ module HTTPX
|
||||
|
||||
@write_buffer.clear
|
||||
error = WriteTimeoutError.new(request, nil, write_timeout)
|
||||
on_error(error)
|
||||
|
||||
on_error(error, request)
|
||||
end
|
||||
|
||||
def read_timeout_callback(request, read_timeout, error_type = ReadTimeoutError)
|
||||
@ -670,18 +928,32 @@ module HTTPX
|
||||
|
||||
@write_buffer.clear
|
||||
error = error_type.new(request, request.response, read_timeout)
|
||||
on_error(error)
|
||||
|
||||
on_error(error, request)
|
||||
end
|
||||
|
||||
class << self
|
||||
def parser_type(protocol)
|
||||
case protocol
|
||||
when "h2" then HTTP2
|
||||
when "http/1.1" then HTTP1
|
||||
else
|
||||
raise Error, "unsupported protocol (##{protocol})"
|
||||
def set_request_timeout(label, request, timeout, start_event, finish_events, &callback)
|
||||
request.set_timeout_callback(start_event) do
|
||||
timer = @current_selector.after(timeout, callback)
|
||||
request.active_timeouts << label
|
||||
|
||||
Array(finish_events).each do |event|
|
||||
# clean up request timeouts if the connection errors out
|
||||
request.set_timeout_callback(event) do
|
||||
timer.cancel
|
||||
request.active_timeouts.delete(label)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def parser_type(protocol)
|
||||
case protocol
|
||||
when "h2" then HTTP2
|
||||
when "http/1.1" then HTTP1
|
||||
else
|
||||
raise Error, "unsupported protocol (##{protocol})"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -7,15 +7,17 @@ module HTTPX
|
||||
include Callbacks
|
||||
include Loggable
|
||||
|
||||
MAX_REQUESTS = 100
|
||||
MAX_REQUESTS = 200
|
||||
CRLF = "\r\n"
|
||||
|
||||
attr_reader :pending, :requests
|
||||
|
||||
attr_accessor :max_concurrent_requests
|
||||
|
||||
def initialize(buffer, options)
|
||||
@options = Options.new(options)
|
||||
@options = options
|
||||
@max_concurrent_requests = @options.max_concurrent_requests || MAX_REQUESTS
|
||||
@max_requests = @options.max_requests || MAX_REQUESTS
|
||||
@max_requests = @options.max_requests
|
||||
@parser = Parser::HTTP1.new(self)
|
||||
@buffer = buffer
|
||||
@version = [1, 1]
|
||||
@ -47,6 +49,7 @@ module HTTPX
|
||||
@max_requests = @options.max_requests || MAX_REQUESTS
|
||||
@parser.reset!
|
||||
@handshake_completed = false
|
||||
@pending.concat(@requests) unless @requests.empty?
|
||||
end
|
||||
|
||||
def close
|
||||
@ -90,7 +93,7 @@ module HTTPX
|
||||
concurrent_requests_limit = [@max_concurrent_requests, requests_limit].min
|
||||
@requests.each_with_index do |request, idx|
|
||||
break if idx >= concurrent_requests_limit
|
||||
next if request.state == :done
|
||||
next unless request.can_buffer?
|
||||
|
||||
handle(request)
|
||||
end
|
||||
@ -116,7 +119,7 @@ module HTTPX
|
||||
@parser.http_version.join("."),
|
||||
headers)
|
||||
log(color: :yellow) { "-> HEADLINE: #{response.status} HTTP/#{@parser.http_version.join(".")}" }
|
||||
log(color: :yellow) { response.headers.each.map { |f, v| "-> HEADER: #{f}: #{v}" }.join("\n") }
|
||||
log(color: :yellow) { response.headers.each.map { |f, v| "-> HEADER: #{f}: #{log_redact(v)}" }.join("\n") }
|
||||
|
||||
@request.response = response
|
||||
on_complete if response.finished?
|
||||
@ -128,38 +131,46 @@ module HTTPX
|
||||
response = @request.response
|
||||
log(level: 2) { "trailer headers received" }
|
||||
|
||||
log(color: :yellow) { h.each.map { |f, v| "-> HEADER: #{f}: #{v.join(", ")}" }.join("\n") }
|
||||
log(color: :yellow) { h.each.map { |f, v| "-> HEADER: #{f}: #{log_redact(v.join(", "))}" }.join("\n") }
|
||||
response.merge_headers(h)
|
||||
end
|
||||
|
||||
def on_data(chunk)
|
||||
return unless @request
|
||||
request = @request
|
||||
|
||||
return unless request
|
||||
|
||||
log(color: :green) { "-> DATA: #{chunk.bytesize} bytes..." }
|
||||
log(level: 2, color: :green) { "-> #{chunk.inspect}" }
|
||||
response = @request.response
|
||||
log(level: 2, color: :green) { "-> #{log_redact(chunk.inspect)}" }
|
||||
response = request.response
|
||||
|
||||
response << chunk
|
||||
rescue StandardError => e
|
||||
error_response = ErrorResponse.new(request, e)
|
||||
request.response = error_response
|
||||
dispatch
|
||||
end
|
||||
|
||||
def on_complete
|
||||
return unless @request
|
||||
request = @request
|
||||
|
||||
return unless request
|
||||
|
||||
log(level: 2) { "parsing complete" }
|
||||
dispatch
|
||||
end
|
||||
|
||||
def dispatch
|
||||
if @request.expects?
|
||||
request = @request
|
||||
|
||||
if request.expects?
|
||||
@parser.reset!
|
||||
return handle(@request)
|
||||
return handle(request)
|
||||
end
|
||||
|
||||
request = @request
|
||||
@request = nil
|
||||
@requests.shift
|
||||
response = request.response
|
||||
response.finish!
|
||||
emit(:response, request, response)
|
||||
|
||||
if @parser.upgrade?
|
||||
@ -169,12 +180,23 @@ module HTTPX
|
||||
|
||||
@parser.reset!
|
||||
@max_requests -= 1
|
||||
manage_connection(response)
|
||||
if response.is_a?(ErrorResponse)
|
||||
disable
|
||||
else
|
||||
manage_connection(request, response)
|
||||
end
|
||||
|
||||
send(@pending.shift) unless @pending.empty?
|
||||
if exhausted?
|
||||
@pending.concat(@requests)
|
||||
@requests.clear
|
||||
|
||||
emit(:exhausted)
|
||||
else
|
||||
send(@pending.shift) unless @pending.empty?
|
||||
end
|
||||
end
|
||||
|
||||
def handle_error(ex)
|
||||
def handle_error(ex, request = nil)
|
||||
if (ex.is_a?(EOFError) || ex.is_a?(TimeoutError)) && @request && @request.response &&
|
||||
!@request.response.headers.key?("content-length") &&
|
||||
!@request.response.headers.key?("transfer-encoding")
|
||||
@ -188,23 +210,28 @@ module HTTPX
|
||||
if @pipelining
|
||||
catch(:called) { disable }
|
||||
else
|
||||
@requests.each do |request|
|
||||
emit(:error, request, ex)
|
||||
@requests.each do |req|
|
||||
next if request && request == req
|
||||
|
||||
emit(:error, req, ex)
|
||||
end
|
||||
@pending.each do |request|
|
||||
emit(:error, request, ex)
|
||||
@pending.each do |req|
|
||||
next if request && request == req
|
||||
|
||||
emit(:error, req, ex)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def ping
|
||||
reset
|
||||
emit(:reset)
|
||||
emit(:exhausted)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def manage_connection(response)
|
||||
def manage_connection(request, response)
|
||||
connection = response.headers["connection"]
|
||||
case connection
|
||||
when /keep-alive/i
|
||||
@ -221,7 +248,7 @@ module HTTPX
|
||||
return unless keep_alive
|
||||
|
||||
parameters = Hash[keep_alive.split(/ *, */).map do |pair|
|
||||
pair.split(/ *= */)
|
||||
pair.split(/ *= */, 2)
|
||||
end]
|
||||
@max_requests = parameters["max"].to_i - 1 if parameters.key?("max")
|
||||
|
||||
@ -234,7 +261,7 @@ module HTTPX
|
||||
disable
|
||||
when nil
|
||||
# In HTTP/1.1, it's keep alive by default
|
||||
return if response.version == "1.1"
|
||||
return if response.version == "1.1" && request.headers["connection"] != "close"
|
||||
|
||||
disable
|
||||
end
|
||||
@ -242,6 +269,7 @@ module HTTPX
|
||||
|
||||
def disable
|
||||
disable_pipelining
|
||||
reset
|
||||
emit(:reset)
|
||||
throw(:called)
|
||||
end
|
||||
@ -272,29 +300,31 @@ module HTTPX
|
||||
request.body.chunk!
|
||||
end
|
||||
|
||||
connection = request.headers["connection"]
|
||||
extra_headers = {}
|
||||
|
||||
connection ||= if request.options.persistent
|
||||
# when in a persistent connection, the request can't be at
|
||||
# the edge of a renegotiation
|
||||
if @requests.index(request) + 1 < @max_requests
|
||||
"keep-alive"
|
||||
unless request.headers.key?("connection")
|
||||
connection_value = if request.persistent?
|
||||
# when in a persistent connection, the request can't be at
|
||||
# the edge of a renegotiation
|
||||
if @requests.index(request) + 1 < @max_requests
|
||||
"keep-alive"
|
||||
else
|
||||
"close"
|
||||
end
|
||||
else
|
||||
"close"
|
||||
end
|
||||
else
|
||||
# when it's not a persistent connection, it sets "Connection: close" always
|
||||
# on the last request of the possible batch (either allowed max requests,
|
||||
# or if smaller, the size of the batch itself)
|
||||
requests_limit = [@max_requests, @requests.size].min
|
||||
if request == @requests[requests_limit - 1]
|
||||
"close"
|
||||
else
|
||||
"keep-alive"
|
||||
# when it's not a persistent connection, it sets "Connection: close" always
|
||||
# on the last request of the possible batch (either allowed max requests,
|
||||
# or if smaller, the size of the batch itself)
|
||||
requests_limit = [@max_requests, @requests.size].min
|
||||
if request == @requests[requests_limit - 1]
|
||||
"close"
|
||||
else
|
||||
"keep-alive"
|
||||
end
|
||||
end
|
||||
|
||||
extra_headers["connection"] = connection_value
|
||||
end
|
||||
|
||||
extra_headers = { "connection" => connection }
|
||||
extra_headers["host"] = request.authority unless request.headers.key?("host")
|
||||
extra_headers
|
||||
end
|
||||
@ -331,7 +361,7 @@ module HTTPX
|
||||
|
||||
while (chunk = request.drain_body)
|
||||
log(color: :green) { "<- DATA: #{chunk.bytesize} bytes..." }
|
||||
log(level: 2, color: :green) { "<- #{chunk.inspect}" }
|
||||
log(level: 2, color: :green) { "<- #{log_redact(chunk.inspect)}" }
|
||||
@buffer << chunk
|
||||
throw(:buffer_full, request) if @buffer.full?
|
||||
end
|
||||
@ -350,18 +380,17 @@ module HTTPX
|
||||
end
|
||||
|
||||
def join_headers2(headers)
|
||||
buffer = "".b
|
||||
headers.each do |field, value|
|
||||
buffer << "#{capitalized(field)}: #{value}" << CRLF
|
||||
log(color: :yellow) { "<- HEADER: #{buffer.chomp}" }
|
||||
@buffer << buffer
|
||||
buffer.clear
|
||||
field = capitalized(field)
|
||||
log(color: :yellow) { "<- HEADER: #{[field, log_redact(value)].join(": ")}" }
|
||||
@buffer << "#{field}: #{value}#{CRLF}"
|
||||
end
|
||||
end
|
||||
|
||||
UPCASED = {
|
||||
"www-authenticate" => "WWW-Authenticate",
|
||||
"http2-settings" => "HTTP2-Settings",
|
||||
"content-md5" => "Content-MD5",
|
||||
}.freeze
|
||||
|
||||
def capitalized(field)
|
||||
|
@ -1,18 +1,24 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "securerandom"
|
||||
require "http/2/next"
|
||||
require "http/2"
|
||||
|
||||
module HTTPX
|
||||
class Connection::HTTP2
|
||||
include Callbacks
|
||||
include Loggable
|
||||
|
||||
MAX_CONCURRENT_REQUESTS = HTTP2Next::DEFAULT_MAX_CONCURRENT_STREAMS
|
||||
MAX_CONCURRENT_REQUESTS = ::HTTP2::DEFAULT_MAX_CONCURRENT_STREAMS
|
||||
|
||||
class Error < Error
|
||||
def initialize(id, code)
|
||||
super("stream #{id} closed with error: #{code}")
|
||||
def initialize(id, error)
|
||||
super("stream #{id} closed with error: #{error}")
|
||||
end
|
||||
end
|
||||
|
||||
class PingError < Error
|
||||
def initialize
|
||||
super(0, :ping_error)
|
||||
end
|
||||
end
|
||||
|
||||
@ -25,7 +31,7 @@ module HTTPX
|
||||
attr_reader :streams, :pending
|
||||
|
||||
def initialize(buffer, options)
|
||||
@options = Options.new(options)
|
||||
@options = options
|
||||
@settings = @options.http2_settings
|
||||
@pending = []
|
||||
@streams = {}
|
||||
@ -35,7 +41,7 @@ module HTTPX
|
||||
@handshake_completed = false
|
||||
@wait_for_handshake = @settings.key?(:wait_for_handshake) ? @settings.delete(:wait_for_handshake) : true
|
||||
@max_concurrent_requests = @options.max_concurrent_requests || MAX_CONCURRENT_REQUESTS
|
||||
@max_requests = @options.max_requests || 0
|
||||
@max_requests = @options.max_requests
|
||||
init_connection
|
||||
end
|
||||
|
||||
@ -52,10 +58,12 @@ module HTTPX
|
||||
if @connection.state == :closed
|
||||
return unless @handshake_completed
|
||||
|
||||
return if @buffer.empty?
|
||||
|
||||
return :w
|
||||
end
|
||||
|
||||
unless (@connection.state == :connected && @handshake_completed)
|
||||
unless @connection.state == :connected && @handshake_completed
|
||||
return @buffer.empty? ? :r : :rw
|
||||
end
|
||||
|
||||
@ -73,8 +81,11 @@ module HTTPX
|
||||
end
|
||||
|
||||
def close
|
||||
@connection.goaway unless @connection.state == :closed
|
||||
emit(:close)
|
||||
unless @connection.state == :closed
|
||||
@connection.goaway
|
||||
emit(:timeout, @options.timeout[:close_handshake_timeout])
|
||||
end
|
||||
emit(:close, true)
|
||||
end
|
||||
|
||||
def empty?
|
||||
@ -82,29 +93,17 @@ module HTTPX
|
||||
end
|
||||
|
||||
def exhausted?
|
||||
return false if @max_requests.zero? && @connection.active_stream_count.zero?
|
||||
|
||||
@connection.active_stream_count >= @max_requests
|
||||
!@max_requests.positive?
|
||||
end
|
||||
|
||||
def <<(data)
|
||||
@connection << data
|
||||
end
|
||||
|
||||
def can_buffer_more_requests?
|
||||
if @handshake_completed
|
||||
@streams.size < @max_concurrent_requests &&
|
||||
@streams.size < @max_requests
|
||||
else
|
||||
!@wait_for_handshake &&
|
||||
@streams.size < @max_concurrent_requests
|
||||
end
|
||||
end
|
||||
|
||||
def send(request)
|
||||
def send(request, head = false)
|
||||
unless can_buffer_more_requests?
|
||||
@pending << request
|
||||
return
|
||||
head ? @pending.unshift(request) : @pending << request
|
||||
return false
|
||||
end
|
||||
unless (stream = @streams[request])
|
||||
stream = @connection.new_stream
|
||||
@ -114,47 +113,57 @@ module HTTPX
|
||||
end
|
||||
handle(request, stream)
|
||||
true
|
||||
rescue HTTP2Next::Error::StreamLimitExceeded
|
||||
rescue ::HTTP2::Error::StreamLimitExceeded
|
||||
@pending.unshift(request)
|
||||
emit(:exhausted)
|
||||
false
|
||||
end
|
||||
|
||||
def consume
|
||||
@streams.each do |request, stream|
|
||||
next if request.state == :done
|
||||
next unless request.can_buffer?
|
||||
|
||||
handle(request, stream)
|
||||
end
|
||||
end
|
||||
|
||||
def handle_error(ex)
|
||||
if ex.instance_of?(TimeoutError) && !@handshake_completed && @connection.state != :closed
|
||||
def handle_error(ex, request = nil)
|
||||
if ex.is_a?(OperationTimeoutError) && !@handshake_completed && @connection.state != :closed
|
||||
@connection.goaway(:settings_timeout, "closing due to settings timeout")
|
||||
emit(:close_handshake)
|
||||
settings_ex = SettingsTimeoutError.new(ex.timeout, ex.message)
|
||||
settings_ex.set_backtrace(ex.backtrace)
|
||||
ex = settings_ex
|
||||
end
|
||||
@streams.each_key do |request|
|
||||
emit(:error, request, ex)
|
||||
@streams.each_key do |req|
|
||||
next if request && request == req
|
||||
|
||||
emit(:error, req, ex)
|
||||
end
|
||||
@pending.each do |request|
|
||||
emit(:error, request, ex)
|
||||
while (req = @pending.shift)
|
||||
next if request && request == req
|
||||
|
||||
emit(:error, req, ex)
|
||||
end
|
||||
end
|
||||
|
||||
def ping
|
||||
ping = SecureRandom.gen_random(8)
|
||||
@connection.ping(ping)
|
||||
@connection.ping(ping.dup)
|
||||
ensure
|
||||
@pings << ping
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def can_buffer_more_requests?
|
||||
(@handshake_completed || !@wait_for_handshake) &&
|
||||
@streams.size < @max_concurrent_requests &&
|
||||
@streams.size < @max_requests
|
||||
end
|
||||
|
||||
def send_pending
|
||||
while (request = @pending.shift)
|
||||
break unless send(request)
|
||||
break unless send(request, true)
|
||||
end
|
||||
end
|
||||
|
||||
@ -171,8 +180,7 @@ module HTTPX
|
||||
end
|
||||
|
||||
def init_connection
|
||||
@connection = HTTP2Next::Client.new(@settings)
|
||||
@connection.max_streams = @max_requests if @connection.respond_to?(:max_streams=) && @max_requests.positive?
|
||||
@connection = ::HTTP2::Client.new(@settings)
|
||||
@connection.on(:frame, &method(:on_frame))
|
||||
@connection.on(:frame_sent, &method(:on_frame_sent))
|
||||
@connection.on(:frame_received, &method(:on_frame_received))
|
||||
@ -218,12 +226,12 @@ module HTTPX
|
||||
extra_headers = set_protocol_headers(request)
|
||||
|
||||
if request.headers.key?("host")
|
||||
log { "forbidden \"host\" header found (#{request.headers["host"]}), will use it as authority..." }
|
||||
log { "forbidden \"host\" header found (#{log_redact(request.headers["host"])}), will use it as authority..." }
|
||||
extra_headers[":authority"] = request.headers["host"]
|
||||
end
|
||||
|
||||
log(level: 1, color: :yellow) do
|
||||
request.headers.merge(extra_headers).each.map { |k, v| "#{stream.id}: -> HEADER: #{k}: #{v}" }.join("\n")
|
||||
"\n#{request.headers.merge(extra_headers).each.map { |k, v| "#{stream.id}: -> HEADER: #{k}: #{log_redact(v)}" }.join("\n")}"
|
||||
end
|
||||
stream.headers(request.headers.each(extra_headers), end_stream: request.body.empty?)
|
||||
end
|
||||
@ -235,7 +243,7 @@ module HTTPX
|
||||
end
|
||||
|
||||
log(level: 1, color: :yellow) do
|
||||
request.trailers.each.map { |k, v| "#{stream.id}: -> HEADER: #{k}: #{v}" }.join("\n")
|
||||
request.trailers.each.map { |k, v| "#{stream.id}: -> HEADER: #{k}: #{log_redact(v)}" }.join("\n")
|
||||
end
|
||||
stream.headers(request.trailers.each, end_stream: true)
|
||||
end
|
||||
@ -246,13 +254,13 @@ module HTTPX
|
||||
chunk = @drains.delete(request) || request.drain_body
|
||||
while chunk
|
||||
next_chunk = request.drain_body
|
||||
log(level: 1, color: :green) { "#{stream.id}: -> DATA: #{chunk.bytesize} bytes..." }
|
||||
log(level: 2, color: :green) { "#{stream.id}: -> #{chunk.inspect}" }
|
||||
stream.data(chunk, end_stream: !(next_chunk || request.trailers? || request.callbacks_for?(:trailers)))
|
||||
send_chunk(request, stream, chunk, next_chunk)
|
||||
|
||||
if next_chunk && (@buffer.full? || request.body.unbounded_body?)
|
||||
@drains[request] = next_chunk
|
||||
throw(:buffer_full)
|
||||
end
|
||||
|
||||
chunk = next_chunk
|
||||
end
|
||||
|
||||
@ -261,6 +269,16 @@ module HTTPX
|
||||
on_stream_refuse(stream, request, error)
|
||||
end
|
||||
|
||||
def send_chunk(request, stream, chunk, next_chunk)
|
||||
log(level: 1, color: :green) { "#{stream.id}: -> DATA: #{chunk.bytesize} bytes..." }
|
||||
log(level: 2, color: :green) { "#{stream.id}: -> #{log_redact(chunk.inspect)}" }
|
||||
stream.data(chunk, end_stream: end_stream?(request, next_chunk))
|
||||
end
|
||||
|
||||
def end_stream?(request, next_chunk)
|
||||
!(next_chunk || request.trailers? || request.callbacks_for?(:trailers))
|
||||
end
|
||||
|
||||
######
|
||||
# HTTP/2 Callbacks
|
||||
######
|
||||
@ -274,7 +292,7 @@ module HTTPX
|
||||
end
|
||||
|
||||
log(color: :yellow) do
|
||||
h.map { |k, v| "#{stream.id}: <- HEADER: #{k}: #{v}" }.join("\n")
|
||||
h.map { |k, v| "#{stream.id}: <- HEADER: #{k}: #{log_redact(v)}" }.join("\n")
|
||||
end
|
||||
_, status = h.shift
|
||||
headers = request.options.headers_class.new(h)
|
||||
@ -287,14 +305,14 @@ module HTTPX
|
||||
|
||||
def on_stream_trailers(stream, response, h)
|
||||
log(color: :yellow) do
|
||||
h.map { |k, v| "#{stream.id}: <- HEADER: #{k}: #{v}" }.join("\n")
|
||||
h.map { |k, v| "#{stream.id}: <- HEADER: #{k}: #{log_redact(v)}" }.join("\n")
|
||||
end
|
||||
response.merge_headers(h)
|
||||
end
|
||||
|
||||
def on_stream_data(stream, request, data)
|
||||
log(level: 1, color: :green) { "#{stream.id}: <- DATA: #{data.bytesize} bytes..." }
|
||||
log(level: 2, color: :green) { "#{stream.id}: <- #{data.inspect}" }
|
||||
log(level: 2, color: :green) { "#{stream.id}: <- #{log_redact(data.inspect)}" }
|
||||
request.response << data
|
||||
end
|
||||
|
||||
@ -311,25 +329,33 @@ module HTTPX
|
||||
@streams.delete(request)
|
||||
|
||||
if error
|
||||
ex = Error.new(stream.id, error)
|
||||
ex.set_backtrace(caller)
|
||||
response = ErrorResponse.new(request, ex, request.options)
|
||||
emit(:response, request, response)
|
||||
case error
|
||||
when :http_1_1_required
|
||||
emit(:error, request, error)
|
||||
else
|
||||
ex = Error.new(stream.id, error)
|
||||
ex.set_backtrace(caller)
|
||||
response = ErrorResponse.new(request, ex)
|
||||
request.response = response
|
||||
emit(:response, request, response)
|
||||
end
|
||||
else
|
||||
response = request.response
|
||||
if response && response.is_a?(Response) && response.status == 421
|
||||
ex = MisdirectedRequestError.new(response)
|
||||
ex.set_backtrace(caller)
|
||||
emit(:error, request, ex)
|
||||
emit(:error, request, :http_1_1_required)
|
||||
else
|
||||
emit(:response, request, response)
|
||||
end
|
||||
end
|
||||
send(@pending.shift) unless @pending.empty?
|
||||
|
||||
return unless @streams.empty? && exhausted?
|
||||
|
||||
close
|
||||
emit(:exhausted) unless @pending.empty?
|
||||
if @pending.empty?
|
||||
close
|
||||
else
|
||||
emit(:exhausted)
|
||||
end
|
||||
end
|
||||
|
||||
def on_frame(bytes)
|
||||
@ -339,14 +365,7 @@ module HTTPX
|
||||
def on_settings(*)
|
||||
@handshake_completed = true
|
||||
emit(:current_timeout)
|
||||
|
||||
if @max_requests.zero?
|
||||
@max_requests = @connection.remote_settings[:settings_max_concurrent_streams]
|
||||
|
||||
@connection.max_streams = @max_requests if @connection.respond_to?(:max_streams=) && @max_requests.positive?
|
||||
end
|
||||
|
||||
@max_concurrent_requests = [@max_concurrent_requests, @max_requests].min
|
||||
@max_concurrent_requests = [@max_concurrent_requests, @connection.remote_settings[:settings_max_concurrent_streams]].min
|
||||
send_pending
|
||||
end
|
||||
|
||||
@ -354,7 +373,12 @@ module HTTPX
|
||||
is_connection_closed = @connection.state == :closed
|
||||
if error
|
||||
@buffer.clear if is_connection_closed
|
||||
if error == :no_error
|
||||
case error
|
||||
when :http_1_1_required
|
||||
while (request = @pending.shift)
|
||||
emit(:error, request, error)
|
||||
end
|
||||
when :no_error
|
||||
ex = GoawayError.new
|
||||
@pending.unshift(*@streams.keys)
|
||||
@drains.clear
|
||||
@ -362,8 +386,11 @@ module HTTPX
|
||||
else
|
||||
ex = Error.new(0, error)
|
||||
end
|
||||
ex.set_backtrace(caller)
|
||||
handle_error(ex)
|
||||
|
||||
if ex
|
||||
ex.set_backtrace(caller)
|
||||
handle_error(ex)
|
||||
end
|
||||
end
|
||||
return unless is_connection_closed && @streams.empty?
|
||||
|
||||
@ -373,8 +400,15 @@ module HTTPX
|
||||
def on_frame_sent(frame)
|
||||
log(level: 2) { "#{frame[:stream]}: frame was sent!" }
|
||||
log(level: 2, color: :blue) do
|
||||
payload = frame
|
||||
payload = payload.merge(payload: frame[:payload].bytesize) if frame[:type] == :data
|
||||
payload =
|
||||
case frame[:type]
|
||||
when :data
|
||||
frame.merge(payload: frame[:payload].bytesize)
|
||||
when :headers, :ping
|
||||
frame.merge(payload: log_redact(frame[:payload]))
|
||||
else
|
||||
frame
|
||||
end
|
||||
"#{frame[:stream]}: #{payload}"
|
||||
end
|
||||
end
|
||||
@ -382,15 +416,22 @@ module HTTPX
|
||||
def on_frame_received(frame)
|
||||
log(level: 2) { "#{frame[:stream]}: frame was received!" }
|
||||
log(level: 2, color: :magenta) do
|
||||
payload = frame
|
||||
payload = payload.merge(payload: frame[:payload].bytesize) if frame[:type] == :data
|
||||
payload =
|
||||
case frame[:type]
|
||||
when :data
|
||||
frame.merge(payload: frame[:payload].bytesize)
|
||||
when :headers, :ping
|
||||
frame.merge(payload: log_redact(frame[:payload]))
|
||||
else
|
||||
frame
|
||||
end
|
||||
"#{frame[:stream]}: #{payload}"
|
||||
end
|
||||
end
|
||||
|
||||
def on_altsvc(origin, frame)
|
||||
log(level: 2) { "#{frame[:stream]}: altsvc frame was received" }
|
||||
log(level: 2) { "#{frame[:stream]}: #{frame.inspect}" }
|
||||
log(level: 2) { "#{frame[:stream]}: #{log_redact(frame.inspect)}" }
|
||||
alt_origin = URI.parse("#{frame[:proto]}://#{frame[:host]}:#{frame[:port]}")
|
||||
params = { "ma" => frame[:max_age] }
|
||||
emit(:altsvc, origin, alt_origin, origin, params)
|
||||
@ -405,11 +446,9 @@ module HTTPX
|
||||
end
|
||||
|
||||
def on_pong(ping)
|
||||
if @pings.delete(ping.to_s)
|
||||
emit(:pong)
|
||||
else
|
||||
close(:protocol_error, "ping payload did not match")
|
||||
end
|
||||
raise PingError unless @pings.delete(ping.to_s)
|
||||
|
||||
emit(:pong)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -51,8 +51,6 @@ module HTTPX
|
||||
# non-canonical domain.
|
||||
attr_reader :domain
|
||||
|
||||
DOT = "." # :nodoc:
|
||||
|
||||
class << self
|
||||
def new(domain)
|
||||
return domain if domain.is_a?(self)
|
||||
@ -63,8 +61,12 @@ module HTTPX
|
||||
# Normalizes a _domain_ using the Punycode algorithm as necessary.
|
||||
# The result will be a downcased, ASCII-only string.
|
||||
def normalize(domain)
|
||||
domain = domain.ascii_only? ? domain : domain.chomp(DOT).unicode_normalize(:nfc)
|
||||
Punycode.encode_hostname(domain).downcase
|
||||
unless domain.ascii_only?
|
||||
domain = domain.chomp(".").unicode_normalize(:nfc)
|
||||
domain = Punycode.encode_hostname(domain)
|
||||
end
|
||||
|
||||
domain.downcase
|
||||
end
|
||||
end
|
||||
|
||||
@ -73,7 +75,7 @@ module HTTPX
|
||||
def initialize(hostname)
|
||||
hostname = String(hostname)
|
||||
|
||||
raise ArgumentError, "domain name must not start with a dot: #{hostname}" if hostname.start_with?(DOT)
|
||||
raise ArgumentError, "domain name must not start with a dot: #{hostname}" if hostname.start_with?(".")
|
||||
|
||||
begin
|
||||
@ipaddr = IPAddr.new(hostname)
|
||||
@ -84,7 +86,7 @@ module HTTPX
|
||||
end
|
||||
|
||||
@hostname = DomainName.normalize(hostname)
|
||||
tld = if (last_dot = @hostname.rindex(DOT))
|
||||
tld = if (last_dot = @hostname.rindex("."))
|
||||
@hostname[(last_dot + 1)..-1]
|
||||
else
|
||||
@hostname
|
||||
@ -94,7 +96,7 @@ module HTTPX
|
||||
@domain = if last_dot
|
||||
# fallback - accept cookies down to second level
|
||||
# cf. http://www.dkim-reputation.org/regdom-libs/
|
||||
if (penultimate_dot = @hostname.rindex(DOT, last_dot - 1))
|
||||
if (penultimate_dot = @hostname.rindex(".", last_dot - 1))
|
||||
@hostname[(penultimate_dot + 1)..-1]
|
||||
else
|
||||
@hostname
|
||||
@ -126,17 +128,12 @@ module HTTPX
|
||||
@domain && self <= domain && domain <= @domain
|
||||
end
|
||||
|
||||
# def ==(other)
|
||||
# other = DomainName.new(other)
|
||||
# other.hostname == @hostname
|
||||
# end
|
||||
|
||||
def <=>(other)
|
||||
other = DomainName.new(other)
|
||||
othername = other.hostname
|
||||
if othername == @hostname
|
||||
0
|
||||
elsif @hostname.end_with?(othername) && @hostname[-othername.size - 1, 1] == DOT
|
||||
elsif @hostname.end_with?(othername) && @hostname[-othername.size - 1, 1] == "."
|
||||
# The other is higher
|
||||
-1
|
||||
else
|
||||
|
@ -1,20 +1,27 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module HTTPX
|
||||
# the default exception class for exceptions raised by HTTPX.
|
||||
class Error < StandardError; end
|
||||
|
||||
class UnsupportedSchemeError < Error; end
|
||||
|
||||
class ConnectionError < Error; end
|
||||
|
||||
# Error raised when there was a timeout. Its subclasses allow for finer-grained
|
||||
# control of which timeout happened.
|
||||
class TimeoutError < Error
|
||||
# The timeout value which caused this error to be raised.
|
||||
attr_reader :timeout
|
||||
|
||||
# initializes the timeout exception with the +timeout+ causing the error, and the
|
||||
# error +message+ for it.
|
||||
def initialize(timeout, message)
|
||||
@timeout = timeout
|
||||
super(message)
|
||||
end
|
||||
|
||||
# clones this error into a HTTPX::ConnectionTimeoutError.
|
||||
def to_connection_error
|
||||
ex = ConnectTimeoutError.new(@timeout, message)
|
||||
ex.set_backtrace(backtrace)
|
||||
@ -22,13 +29,22 @@ module HTTPX
|
||||
end
|
||||
end
|
||||
|
||||
class TotalTimeoutError < TimeoutError; end
|
||||
# Raise when it can't acquire a connection from the pool.
|
||||
class PoolTimeoutError < TimeoutError; end
|
||||
|
||||
# Error raised when there was a timeout establishing the connection to a server.
|
||||
# This may be raised due to timeouts during TCP and TLS (when applicable) connection
|
||||
# establishment.
|
||||
class ConnectTimeoutError < TimeoutError; end
|
||||
|
||||
# Error raised when there was a timeout while sending a request, or receiving a response
|
||||
# from the server.
|
||||
class RequestTimeoutError < TimeoutError
|
||||
# The HTTPX::Request request object this exception refers to.
|
||||
attr_reader :request
|
||||
|
||||
# initializes the exception with the +request+ and +response+ it refers to, and the
|
||||
# +timeout+ causing the error, and the
|
||||
def initialize(request, response, timeout)
|
||||
@request = request
|
||||
@response = response
|
||||
@ -40,19 +56,31 @@ module HTTPX
|
||||
end
|
||||
end
|
||||
|
||||
# Error raised when there was a timeout while receiving a response from the server.
|
||||
class ReadTimeoutError < RequestTimeoutError; end
|
||||
|
||||
# Error raised when there was a timeout while sending a request from the server.
|
||||
class WriteTimeoutError < RequestTimeoutError; end
|
||||
|
||||
# Error raised when there was a timeout while waiting for the HTTP/2 settings frame from the server.
|
||||
class SettingsTimeoutError < TimeoutError; end
|
||||
|
||||
# Error raised when there was a timeout while resolving a domain to an IP.
|
||||
class ResolveTimeoutError < TimeoutError; end
|
||||
|
||||
# Error raise when there was a timeout waiting for readiness of the socket the request is related to.
|
||||
class OperationTimeoutError < TimeoutError; end
|
||||
|
||||
# Error raised when there was an error while resolving a domain to an IP.
|
||||
class ResolveError < Error; end
|
||||
|
||||
# Error raised when there was an error while resolving a domain to an IP
|
||||
# using a HTTPX::Resolver::Native resolver.
|
||||
class NativeResolveError < ResolveError
|
||||
attr_reader :connection, :host
|
||||
|
||||
# initializes the exception with the +connection+ it refers to, the +host+ domain
|
||||
# which failed to resolve, and the error +message+.
|
||||
def initialize(connection, host, message = "Can't resolve #{host}")
|
||||
@connection = connection
|
||||
@host = host
|
||||
@ -60,18 +88,22 @@ module HTTPX
|
||||
end
|
||||
end
|
||||
|
||||
# The exception class for HTTP responses with 4xx or 5xx status.
|
||||
class HTTPError < Error
|
||||
# The HTTPX::Response response object this exception refers to.
|
||||
attr_reader :response
|
||||
|
||||
# Creates the instance and assigns the HTTPX::Response +response+.
|
||||
def initialize(response)
|
||||
@response = response
|
||||
super("HTTP Error: #{@response.status} #{@response.headers}\n#{@response.body}")
|
||||
end
|
||||
|
||||
# The HTTP response status.
|
||||
#
|
||||
# error.status #=> 404
|
||||
def status
|
||||
@response.status
|
||||
end
|
||||
end
|
||||
|
||||
class MisdirectedRequestError < HTTPError; end
|
||||
end
|
||||
|
@ -3,96 +3,6 @@
|
||||
require "uri"
|
||||
|
||||
module HTTPX
|
||||
unless Method.method_defined?(:curry)
|
||||
|
||||
# Backport
|
||||
#
|
||||
# Ruby 2.1 and lower implement curry only for Procs.
|
||||
#
|
||||
# Why not using Refinements? Because they don't work for Method (tested with ruby 2.1.9).
|
||||
#
|
||||
module CurryMethods
|
||||
# Backport for the Method#curry method, which is part of ruby core since 2.2 .
|
||||
#
|
||||
def curry(*args)
|
||||
to_proc.curry(*args)
|
||||
end
|
||||
end
|
||||
Method.__send__(:include, CurryMethods)
|
||||
end
|
||||
|
||||
unless String.method_defined?(:+@)
|
||||
# Backport for +"", to initialize unfrozen strings from the string literal.
|
||||
#
|
||||
module LiteralStringExtensions
|
||||
def +@
|
||||
frozen? ? dup : self
|
||||
end
|
||||
end
|
||||
String.__send__(:include, LiteralStringExtensions)
|
||||
end
|
||||
|
||||
unless Numeric.method_defined?(:positive?)
|
||||
# Ruby 2.3 Backport (Numeric#positive?)
|
||||
#
|
||||
module PosMethods
|
||||
def positive?
|
||||
self > 0
|
||||
end
|
||||
end
|
||||
Numeric.__send__(:include, PosMethods)
|
||||
end
|
||||
|
||||
unless Numeric.method_defined?(:negative?)
|
||||
# Ruby 2.3 Backport (Numeric#negative?)
|
||||
#
|
||||
module NegMethods
|
||||
def negative?
|
||||
self < 0
|
||||
end
|
||||
end
|
||||
Numeric.__send__(:include, NegMethods)
|
||||
end
|
||||
|
||||
module NumericExtensions
|
||||
# Ruby 2.4 backport
|
||||
refine Numeric do
|
||||
def infinite?
|
||||
self == Float::INFINITY
|
||||
end unless Numeric.method_defined?(:infinite?)
|
||||
end
|
||||
end
|
||||
|
||||
module StringExtensions
|
||||
refine String do
|
||||
# Ruby 2.5 backport
|
||||
def delete_suffix!(suffix)
|
||||
suffix = Backports.coerce_to_str(suffix)
|
||||
chomp! if frozen?
|
||||
len = suffix.length
|
||||
if len > 0 && index(suffix, -len)
|
||||
self[-len..-1] = ''
|
||||
self
|
||||
else
|
||||
nil
|
||||
end
|
||||
end unless String.method_defined?(:delete_suffix!)
|
||||
end
|
||||
end
|
||||
|
||||
module HashExtensions
|
||||
refine Hash do
|
||||
# Ruby 2.4 backport
|
||||
def compact
|
||||
h = {}
|
||||
each do |key, value|
|
||||
h[key] = value unless value == nil
|
||||
end
|
||||
h
|
||||
end unless Hash.method_defined?(:compact)
|
||||
end
|
||||
end
|
||||
|
||||
module ArrayExtensions
|
||||
module FilterMap
|
||||
refine Array do
|
||||
@ -108,16 +18,6 @@ module HTTPX
|
||||
end unless Array.method_defined?(:filter_map)
|
||||
end
|
||||
|
||||
module Sum
|
||||
refine Array do
|
||||
# Ruby 2.6 backport
|
||||
def sum(accumulator = 0, &block)
|
||||
values = block_given? ? map(&block) : self
|
||||
values.inject(accumulator, :+)
|
||||
end
|
||||
end unless Array.method_defined?(:sum)
|
||||
end
|
||||
|
||||
module Intersect
|
||||
refine Array do
|
||||
# Ruby 3.1 backport
|
||||
@ -133,30 +33,6 @@ module HTTPX
|
||||
end
|
||||
end
|
||||
|
||||
module IOExtensions
|
||||
refine IO do
|
||||
# Ruby 2.3 backport
|
||||
# provides a fallback for rubies where IO#wait isn't implemented,
|
||||
# but IO#wait_readable and IO#wait_writable are.
|
||||
def wait(timeout = nil, _mode = :read_write)
|
||||
r, w = IO.select([self], [self], nil, timeout)
|
||||
|
||||
return unless r || w
|
||||
|
||||
self
|
||||
end unless IO.method_defined?(:wait) && IO.instance_method(:wait).arity == 2
|
||||
end
|
||||
end
|
||||
|
||||
module RegexpExtensions
|
||||
refine(Regexp) do
|
||||
# Ruby 2.4 backport
|
||||
def match?(*args)
|
||||
!match(*args).nil?
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
module URIExtensions
|
||||
# uri 0.11 backport, ships with ruby 3.1
|
||||
refine URI::Generic do
|
||||
@ -178,21 +54,6 @@ module HTTPX
|
||||
def origin
|
||||
"#{scheme}://#{authority}"
|
||||
end unless URI::HTTP.method_defined?(:origin)
|
||||
|
||||
def altsvc_match?(uri)
|
||||
uri = URI.parse(uri)
|
||||
|
||||
origin == uri.origin || begin
|
||||
case scheme
|
||||
when "h2"
|
||||
(uri.scheme == "https" || uri.scheme == "h2") &&
|
||||
host == uri.host &&
|
||||
(port || default_port) == (uri.port || uri.default_port)
|
||||
else
|
||||
false
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -11,20 +11,32 @@ module HTTPX
|
||||
end
|
||||
|
||||
def initialize(headers = nil)
|
||||
if headers.nil? || headers.empty?
|
||||
@headers = headers.to_h
|
||||
return
|
||||
end
|
||||
|
||||
@headers = {}
|
||||
return unless headers
|
||||
|
||||
headers.each do |field, value|
|
||||
array_value(value).each do |v|
|
||||
add(downcased(field), v)
|
||||
field = downcased(field)
|
||||
|
||||
value = array_value(value)
|
||||
|
||||
current = @headers[field]
|
||||
|
||||
if current.nil?
|
||||
@headers[field] = value
|
||||
else
|
||||
current.concat(value)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# cloned initialization
|
||||
def initialize_clone(orig)
|
||||
def initialize_clone(orig, **kwargs)
|
||||
super
|
||||
@headers = orig.instance_variable_get(:@headers).clone
|
||||
@headers = orig.instance_variable_get(:@headers).clone(**kwargs)
|
||||
end
|
||||
|
||||
# dupped initialization
|
||||
@ -39,17 +51,6 @@ module HTTPX
|
||||
super
|
||||
end
|
||||
|
||||
def same_headers?(headers)
|
||||
@headers.empty? || begin
|
||||
headers.each do |k, v|
|
||||
next unless key?(k)
|
||||
|
||||
return false unless v == self[k]
|
||||
end
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
# merges headers with another header-quack.
|
||||
# the merge rule is, if the header already exists,
|
||||
# ignore what the +other+ headers has. Otherwise, set
|
||||
@ -119,6 +120,10 @@ module HTTPX
|
||||
other == to_hash
|
||||
end
|
||||
|
||||
def empty?
|
||||
@headers.empty?
|
||||
end
|
||||
|
||||
# the headers store in Hash format
|
||||
def to_hash
|
||||
Hash[to_a]
|
||||
@ -137,7 +142,8 @@ module HTTPX
|
||||
|
||||
# :nocov:
|
||||
def inspect
|
||||
to_hash.inspect
|
||||
"#<#{self.class}:#{object_id} " \
|
||||
"#{to_hash.inspect}>"
|
||||
end
|
||||
# :nocov:
|
||||
|
||||
@ -160,12 +166,7 @@ module HTTPX
|
||||
private
|
||||
|
||||
def array_value(value)
|
||||
case value
|
||||
when Array
|
||||
value.map { |val| String(val).strip }
|
||||
else
|
||||
[String(value).strip]
|
||||
end
|
||||
Array(value)
|
||||
end
|
||||
|
||||
def downcased(field)
|
||||
|
@ -4,4 +4,8 @@ require "socket"
|
||||
require "httpx/io/udp"
|
||||
require "httpx/io/tcp"
|
||||
require "httpx/io/unix"
|
||||
require "httpx/io/ssl"
|
||||
|
||||
begin
|
||||
require "httpx/io/ssl"
|
||||
rescue LoadError
|
||||
end
|
||||
|
@ -4,26 +4,49 @@ require "openssl"
|
||||
|
||||
module HTTPX
|
||||
TLSError = OpenSSL::SSL::SSLError
|
||||
IPRegex = Regexp.union(Resolv::IPv4::Regex, Resolv::IPv6::Regex)
|
||||
|
||||
class SSL < TCP
|
||||
using RegexpExtensions unless Regexp.method_defined?(:match?)
|
||||
# rubocop:disable Style/MutableConstant
|
||||
TLS_OPTIONS = { alpn_protocols: %w[h2 http/1.1].freeze }
|
||||
# https://github.com/jruby/jruby-openssl/issues/284
|
||||
# TODO: remove when dropping support for jruby-openssl < 0.15.4
|
||||
TLS_OPTIONS[:verify_hostname] = true if RUBY_ENGINE == "jruby" && JOpenSSL::VERSION < "0.15.4"
|
||||
# rubocop:enable Style/MutableConstant
|
||||
TLS_OPTIONS.freeze
|
||||
|
||||
TLS_OPTIONS = if OpenSSL::SSL::SSLContext.instance_methods.include?(:alpn_protocols)
|
||||
{ alpn_protocols: %w[h2 http/1.1].freeze }.freeze
|
||||
else
|
||||
{}.freeze
|
||||
end
|
||||
attr_writer :ssl_session
|
||||
|
||||
def initialize(_, _, options)
|
||||
super
|
||||
@ctx = OpenSSL::SSL::SSLContext.new
|
||||
|
||||
ctx_options = TLS_OPTIONS.merge(options.ssl)
|
||||
@sni_hostname = ctx_options.delete(:hostname) || @hostname
|
||||
@ctx.set_params(ctx_options) unless ctx_options.empty?
|
||||
@state = :negotiated if @keep_open
|
||||
|
||||
@hostname_is_ip = IPRegex.match?(@sni_hostname)
|
||||
if @keep_open && @io.is_a?(OpenSSL::SSL::SSLSocket)
|
||||
# externally initiated ssl socket
|
||||
@ctx = @io.context
|
||||
@state = :negotiated
|
||||
else
|
||||
@ctx = OpenSSL::SSL::SSLContext.new
|
||||
@ctx.set_params(ctx_options) unless ctx_options.empty?
|
||||
unless @ctx.session_cache_mode.nil? # a dummy method on JRuby
|
||||
@ctx.session_cache_mode =
|
||||
OpenSSL::SSL::SSLContext::SESSION_CACHE_CLIENT | OpenSSL::SSL::SSLContext::SESSION_CACHE_NO_INTERNAL_STORE
|
||||
end
|
||||
|
||||
yield(self) if block_given?
|
||||
end
|
||||
|
||||
@verify_hostname = @ctx.verify_hostname
|
||||
end
|
||||
|
||||
if OpenSSL::SSL::SSLContext.method_defined?(:session_new_cb=)
|
||||
def session_new_cb(&pr)
|
||||
@ctx.session_new_cb = proc { |_, sess| pr.call(sess) }
|
||||
end
|
||||
else
|
||||
# session_new_cb not implemented under JRuby
|
||||
def session_new_cb; end
|
||||
end
|
||||
|
||||
def protocol
|
||||
@ -32,6 +55,20 @@ module HTTPX
|
||||
super
|
||||
end
|
||||
|
||||
if RUBY_ENGINE == "jruby"
|
||||
# in jruby, alpn_protocol may return ""
|
||||
# https://github.com/jruby/jruby-openssl/issues/287
|
||||
def protocol
|
||||
proto = @io.alpn_protocol
|
||||
|
||||
return super if proto.nil? || proto.empty?
|
||||
|
||||
proto
|
||||
rescue StandardError
|
||||
super
|
||||
end
|
||||
end
|
||||
|
||||
def can_verify_peer?
|
||||
@ctx.verify_mode == OpenSSL::SSL::VERIFY_PEER
|
||||
end
|
||||
@ -43,85 +80,57 @@ module HTTPX
|
||||
OpenSSL::SSL.verify_certificate_identity(@io.peer_cert, host)
|
||||
end
|
||||
|
||||
def close
|
||||
super
|
||||
# allow reconnections
|
||||
# connect only works if initial @io is a socket
|
||||
@io = @io.io if @io.respond_to?(:io)
|
||||
end
|
||||
|
||||
def connected?
|
||||
@state == :negotiated
|
||||
end
|
||||
|
||||
def expired?
|
||||
super || ssl_session_expired?
|
||||
end
|
||||
|
||||
def ssl_session_expired?
|
||||
@ssl_session.nil? || Process.clock_gettime(Process::CLOCK_REALTIME) >= (@ssl_session.time.to_f + @ssl_session.timeout)
|
||||
end
|
||||
|
||||
def connect
|
||||
super
|
||||
return if @state == :negotiated ||
|
||||
@state != :connected
|
||||
return if @state == :negotiated
|
||||
|
||||
unless @state == :connected
|
||||
super
|
||||
return unless @state == :connected
|
||||
end
|
||||
|
||||
unless @io.is_a?(OpenSSL::SSL::SSLSocket)
|
||||
if (hostname_is_ip = (@ip == @sni_hostname))
|
||||
# IPv6 address would be "[::1]", must turn to "0000:0000:0000:0000:0000:0000:0000:0001" for cert SAN check
|
||||
@sni_hostname = @ip.to_string
|
||||
# IP addresses in SNI is not valid per RFC 6066, section 3.
|
||||
@ctx.verify_hostname = false
|
||||
end
|
||||
|
||||
@io = OpenSSL::SSL::SSLSocket.new(@io, @ctx)
|
||||
@io.hostname = @sni_hostname unless @hostname_is_ip
|
||||
|
||||
@io.hostname = @sni_hostname unless hostname_is_ip
|
||||
@io.session = @ssl_session unless ssl_session_expired?
|
||||
@io.sync_close = true
|
||||
end
|
||||
try_ssl_connect
|
||||
end
|
||||
|
||||
if RUBY_VERSION < "2.3"
|
||||
# :nocov:
|
||||
def try_ssl_connect
|
||||
@io.connect_nonblock
|
||||
@io.post_connection_check(@sni_hostname) if @ctx.verify_mode != OpenSSL::SSL::VERIFY_NONE && !@hostname_is_ip
|
||||
transition(:negotiated)
|
||||
@interests = :w
|
||||
rescue ::IO::WaitReadable
|
||||
def try_ssl_connect
|
||||
ret = @io.connect_nonblock(exception: false)
|
||||
log(level: 3, color: :cyan) { "TLS CONNECT: #{ret}..." }
|
||||
case ret
|
||||
when :wait_readable
|
||||
@interests = :r
|
||||
rescue ::IO::WaitWritable
|
||||
return
|
||||
when :wait_writable
|
||||
@interests = :w
|
||||
return
|
||||
end
|
||||
|
||||
def read(_, buffer)
|
||||
super
|
||||
rescue ::IO::WaitWritable
|
||||
buffer.clear
|
||||
0
|
||||
end
|
||||
|
||||
def write(*)
|
||||
super
|
||||
rescue ::IO::WaitReadable
|
||||
0
|
||||
end
|
||||
# :nocov:
|
||||
else
|
||||
def try_ssl_connect
|
||||
case @io.connect_nonblock(exception: false)
|
||||
when :wait_readable
|
||||
@interests = :r
|
||||
return
|
||||
when :wait_writable
|
||||
@interests = :w
|
||||
return
|
||||
end
|
||||
@io.post_connection_check(@sni_hostname) if @ctx.verify_mode != OpenSSL::SSL::VERIFY_NONE && !@hostname_is_ip
|
||||
transition(:negotiated)
|
||||
@interests = :w
|
||||
end
|
||||
|
||||
# :nocov:
|
||||
if OpenSSL::VERSION < "2.0.6"
|
||||
def read(size, buffer)
|
||||
@io.read_nonblock(size, buffer)
|
||||
buffer.bytesize
|
||||
rescue ::IO::WaitReadable,
|
||||
::IO::WaitWritable
|
||||
buffer.clear
|
||||
0
|
||||
rescue EOFError
|
||||
nil
|
||||
end
|
||||
end
|
||||
# :nocov:
|
||||
@io.post_connection_check(@sni_hostname) if @ctx.verify_mode != OpenSSL::SSL::VERIFY_NONE && @verify_hostname
|
||||
transition(:negotiated)
|
||||
@interests = :w
|
||||
end
|
||||
|
||||
private
|
||||
@ -130,6 +139,7 @@ module HTTPX
|
||||
case nextstate
|
||||
when :negotiated
|
||||
return unless @state == :connected
|
||||
|
||||
when :closed
|
||||
return unless @state == :negotiated ||
|
||||
@state == :connected
|
||||
|
@ -17,7 +17,7 @@ module HTTPX
|
||||
@state = :idle
|
||||
@addresses = []
|
||||
@hostname = origin.host
|
||||
@options = Options.new(options)
|
||||
@options = options
|
||||
@fallback_protocol = @options.fallback_protocol
|
||||
@port = origin.port
|
||||
@interests = :w
|
||||
@ -38,7 +38,10 @@ module HTTPX
|
||||
add_addresses(addresses)
|
||||
end
|
||||
@ip_index = @addresses.size - 1
|
||||
# @io ||= build_socket
|
||||
end
|
||||
|
||||
def socket
|
||||
@io
|
||||
end
|
||||
|
||||
def add_addresses(addrs)
|
||||
@ -72,10 +75,20 @@ module HTTPX
|
||||
@io = build_socket
|
||||
end
|
||||
try_connect
|
||||
rescue Errno::EHOSTUNREACH,
|
||||
Errno::ENETUNREACH => e
|
||||
raise e if @ip_index <= 0
|
||||
|
||||
log { "failed connecting to #{@ip} (#{e.message}), evict from cache and trying next..." }
|
||||
Resolver.cached_lookup_evict(@hostname, @ip)
|
||||
|
||||
@ip_index -= 1
|
||||
@io = build_socket
|
||||
retry
|
||||
rescue Errno::ECONNREFUSED,
|
||||
Errno::EADDRNOTAVAIL,
|
||||
Errno::EHOSTUNREACH,
|
||||
SocketError => e
|
||||
SocketError,
|
||||
IOError => e
|
||||
raise e if @ip_index <= 0
|
||||
|
||||
log { "failed connecting to #{@ip} (#{e.message}), trying next..." }
|
||||
@ -91,84 +104,45 @@ module HTTPX
|
||||
retry
|
||||
end
|
||||
|
||||
if RUBY_VERSION < "2.3"
|
||||
# :nocov:
|
||||
def try_connect
|
||||
@io.connect_nonblock(Socket.sockaddr_in(@port, @ip.to_s))
|
||||
rescue ::IO::WaitWritable, Errno::EALREADY
|
||||
@interests = :w
|
||||
rescue ::IO::WaitReadable
|
||||
def try_connect
|
||||
ret = @io.connect_nonblock(Socket.sockaddr_in(@port, @ip.to_s), exception: false)
|
||||
log(level: 3, color: :cyan) { "TCP CONNECT: #{ret}..." }
|
||||
case ret
|
||||
when :wait_readable
|
||||
@interests = :r
|
||||
rescue Errno::EISCONN
|
||||
transition(:connected)
|
||||
@interests = :w
|
||||
else
|
||||
transition(:connected)
|
||||
return
|
||||
when :wait_writable
|
||||
@interests = :w
|
||||
return
|
||||
end
|
||||
private :try_connect
|
||||
transition(:connected)
|
||||
@interests = :w
|
||||
rescue Errno::EALREADY
|
||||
@interests = :w
|
||||
end
|
||||
private :try_connect
|
||||
|
||||
def read(size, buffer)
|
||||
@io.read_nonblock(size, buffer)
|
||||
log { "READ: #{buffer.bytesize} bytes..." }
|
||||
buffer.bytesize
|
||||
rescue ::IO::WaitReadable
|
||||
def read(size, buffer)
|
||||
ret = @io.read_nonblock(size, buffer, exception: false)
|
||||
if ret == :wait_readable
|
||||
buffer.clear
|
||||
0
|
||||
rescue EOFError
|
||||
nil
|
||||
return 0
|
||||
end
|
||||
return if ret.nil?
|
||||
|
||||
def write(buffer)
|
||||
siz = @io.write_nonblock(buffer)
|
||||
log { "WRITE: #{siz} bytes..." }
|
||||
buffer.shift!(siz)
|
||||
siz
|
||||
rescue ::IO::WaitWritable
|
||||
0
|
||||
rescue EOFError
|
||||
nil
|
||||
end
|
||||
# :nocov:
|
||||
else
|
||||
def try_connect
|
||||
case @io.connect_nonblock(Socket.sockaddr_in(@port, @ip.to_s), exception: false)
|
||||
when :wait_readable
|
||||
@interests = :r
|
||||
return
|
||||
when :wait_writable
|
||||
@interests = :w
|
||||
return
|
||||
end
|
||||
transition(:connected)
|
||||
@interests = :w
|
||||
rescue Errno::EALREADY
|
||||
@interests = :w
|
||||
end
|
||||
private :try_connect
|
||||
log { "READ: #{buffer.bytesize} bytes..." }
|
||||
buffer.bytesize
|
||||
end
|
||||
|
||||
def read(size, buffer)
|
||||
ret = @io.read_nonblock(size, buffer, exception: false)
|
||||
if ret == :wait_readable
|
||||
buffer.clear
|
||||
return 0
|
||||
end
|
||||
return if ret.nil?
|
||||
def write(buffer)
|
||||
siz = @io.write_nonblock(buffer, exception: false)
|
||||
return 0 if siz == :wait_writable
|
||||
return if siz.nil?
|
||||
|
||||
log { "READ: #{buffer.bytesize} bytes..." }
|
||||
buffer.bytesize
|
||||
end
|
||||
log { "WRITE: #{siz} bytes..." }
|
||||
|
||||
def write(buffer)
|
||||
siz = @io.write_nonblock(buffer, exception: false)
|
||||
return 0 if siz == :wait_writable
|
||||
return if siz.nil?
|
||||
|
||||
log { "WRITE: #{siz} bytes..." }
|
||||
|
||||
buffer.shift!(siz)
|
||||
siz
|
||||
end
|
||||
buffer.shift!(siz)
|
||||
siz
|
||||
end
|
||||
|
||||
def close
|
||||
@ -189,9 +163,25 @@ module HTTPX
|
||||
@state == :idle || @state == :closed
|
||||
end
|
||||
|
||||
def expired?
|
||||
# do not mess with external sockets
|
||||
return false if @options.io
|
||||
|
||||
return true unless @addresses
|
||||
|
||||
resolver_addresses = Resolver.nolookup_resolve(@hostname)
|
||||
|
||||
(Array(resolver_addresses) & @addresses).empty?
|
||||
end
|
||||
|
||||
# :nocov:
|
||||
def inspect
|
||||
"#<#{self.class}: #{@ip}:#{@port} (state: #{@state})>"
|
||||
"#<#{self.class}:#{object_id} " \
|
||||
"#{@ip}:#{@port} " \
|
||||
"@state=#{@state} " \
|
||||
"@hostname=#{@hostname} " \
|
||||
"@addresses=#{@addresses} " \
|
||||
"@state=#{@state}>"
|
||||
end
|
||||
# :nocov:
|
||||
|
||||
@ -219,12 +209,9 @@ module HTTPX
|
||||
end
|
||||
|
||||
def log_transition_state(nextstate)
|
||||
case nextstate
|
||||
when :connected
|
||||
"Connected to #{host} (##{@io.fileno})"
|
||||
else
|
||||
"#{host} #{@state} -> #{nextstate}"
|
||||
end
|
||||
label = host
|
||||
label = "#{label}(##{@io.fileno})" if nextstate == :connected
|
||||
"#{label} #{@state} -> #{nextstate}"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -23,45 +23,19 @@ module HTTPX
|
||||
true
|
||||
end
|
||||
|
||||
if RUBY_VERSION < "2.3"
|
||||
# :nocov:
|
||||
def close
|
||||
@io.close
|
||||
rescue StandardError
|
||||
nil
|
||||
end
|
||||
# :nocov:
|
||||
else
|
||||
def close
|
||||
@io.close
|
||||
end
|
||||
def close
|
||||
@io.close
|
||||
end
|
||||
|
||||
# :nocov:
|
||||
if (RUBY_ENGINE == "truffleruby" && RUBY_ENGINE_VERSION < "21.1.0") ||
|
||||
RUBY_VERSION < "2.3"
|
||||
if RUBY_ENGINE == "jruby"
|
||||
# In JRuby, sendmsg_nonblock is not implemented
|
||||
def write(buffer)
|
||||
siz = @io.sendmsg_nonblock(buffer.to_s, 0, Socket.sockaddr_in(@port, @host.to_s))
|
||||
siz = @io.send(buffer.to_s, 0, @host, @port)
|
||||
log { "WRITE: #{siz} bytes..." }
|
||||
buffer.shift!(siz)
|
||||
siz
|
||||
rescue ::IO::WaitWritable
|
||||
0
|
||||
rescue EOFError
|
||||
nil
|
||||
end
|
||||
|
||||
def read(size, buffer)
|
||||
data, _ = @io.recvfrom_nonblock(size)
|
||||
buffer.replace(data)
|
||||
log { "READ: #{buffer.bytesize} bytes..." }
|
||||
buffer.bytesize
|
||||
rescue ::IO::WaitReadable
|
||||
0
|
||||
rescue IOError
|
||||
end
|
||||
else
|
||||
|
||||
def write(buffer)
|
||||
siz = @io.sendmsg_nonblock(buffer.to_s, 0, Socket.sockaddr_in(@port, @host.to_s), exception: false)
|
||||
return 0 if siz == :wait_writable
|
||||
@ -72,26 +46,17 @@ module HTTPX
|
||||
buffer.shift!(siz)
|
||||
siz
|
||||
end
|
||||
|
||||
def read(size, buffer)
|
||||
ret = @io.recvfrom_nonblock(size, 0, buffer, exception: false)
|
||||
return 0 if ret == :wait_readable
|
||||
return if ret.nil?
|
||||
|
||||
log { "READ: #{buffer.bytesize} bytes..." }
|
||||
|
||||
buffer.bytesize
|
||||
rescue IOError
|
||||
end
|
||||
end
|
||||
|
||||
# In JRuby, sendmsg_nonblock is not implemented
|
||||
def write(buffer)
|
||||
siz = @io.send(buffer.to_s, 0, @host, @port)
|
||||
log { "WRITE: #{siz} bytes..." }
|
||||
buffer.shift!(siz)
|
||||
siz
|
||||
end if RUBY_ENGINE == "jruby"
|
||||
# :nocov:
|
||||
def read(size, buffer)
|
||||
ret = @io.recvfrom_nonblock(size, 0, buffer, exception: false)
|
||||
return 0 if ret == :wait_readable
|
||||
return if ret.nil?
|
||||
|
||||
log { "READ: #{buffer.bytesize} bytes..." }
|
||||
|
||||
buffer.bytesize
|
||||
rescue IOError
|
||||
end
|
||||
end
|
||||
end
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user