mirror of
https://github.com/HoneyryderChuck/httpx.git
synced 2025-08-17 00:02:29 -04:00
Compare commits
749 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
0261449b39 | ||
|
84c8126cd9 | ||
|
ff3f1f726f | ||
|
b8b710470c | ||
|
0f3e3ab068 | ||
|
095fbb3463 | ||
|
7790589c1f | ||
|
dd8608ec3b | ||
|
8205b351aa | ||
|
5992628926 | ||
|
39370b5883 | ||
|
1801a7815c | ||
|
0953e4f91a | ||
|
a78a3f0b7c | ||
|
aeb8fe5382 | ||
|
03170b6c89 | ||
|
814d607a45 | ||
|
5502332e7e | ||
|
f3b68950d6 | ||
|
2c4638784f | ||
|
b0016525e3 | ||
|
49555694fe | ||
|
93e5efa32e | ||
|
8b3c1da507 | ||
|
d64f247e11 | ||
|
f64c3ab599 | ||
|
af03ddba3b | ||
|
7012ca1f27 | ||
|
d405f8905f | ||
|
3ff10f142a | ||
|
51ce9d10a4 | ||
|
6bde11b09c | ||
|
0c2808fa25 | ||
|
cb78091e03 | ||
|
6fa69ba475 | ||
|
4a78e78d32 | ||
|
0e393987d0 | ||
|
12483fa7c8 | ||
|
d955ba616a | ||
|
804d5b878b | ||
|
75702165fd | ||
|
120bbad126 | ||
|
35446e9fe1 | ||
|
3ed41ef2bf | ||
|
9ffbceff87 | ||
|
757c9ae32c | ||
|
5d88ccedf9 | ||
|
85808b6569 | ||
|
d5483a4264 | ||
|
540430c00e | ||
|
3a417a4623 | ||
|
35c18a1b9b | ||
|
cf19fe5221 | ||
|
f9c2fc469a | ||
|
9b513faab4 | ||
|
0be39faefc | ||
|
08c5f394ba | ||
|
55411178ce | ||
|
a5c83e84d3 | ||
|
d7e15c4441 | ||
|
012255e49c | ||
|
d20506acb8 | ||
|
28399f1b88 | ||
|
953101afde | ||
|
055ee47b83 | ||
|
dbad275c65 | ||
|
fe69231e6c | ||
|
4c61df768a | ||
|
aec150b030 | ||
|
29a43c4bc3 | ||
|
34c2fee60c | ||
|
c62966361e | ||
|
2b87a3d5e5 | ||
|
3dd767cdc2 | ||
|
a9255c52aa | ||
|
32031e8a03 | ||
|
f328646c08 | ||
|
0484dd76c8 | ||
|
17c1090b7a | ||
|
87f4ce4b03 | ||
|
1ec7442322 | ||
|
723959cf92 | ||
|
10b4b9c7c0 | ||
|
1b39bcd3a3 | ||
|
44a2041ea8 | ||
|
b63f9f1ae2 | ||
|
467dd5e7e5 | ||
|
c626fae3da | ||
|
7f6b78540b | ||
|
b120ce4657 | ||
|
32c36bb4ee | ||
|
cc0626429b | ||
|
a0e2c1258a | ||
|
6bd3c15384 | ||
|
0d23c464f5 | ||
|
a75b89db74 | ||
|
7173616154 | ||
|
69f9557780 | ||
|
339af65cc1 | ||
|
3df6edbcfc | ||
|
5c2f8ab0b1 | ||
|
0c335fd03d | ||
|
bf19cde364 | ||
|
7e0ddb7ab2 | ||
|
4cd3136922 | ||
|
642122a0f5 | ||
|
42d42a92b4 | ||
|
fb6a509d98 | ||
|
3c22f36a6c | ||
|
51b2693842 | ||
|
1ab5855961 | ||
|
f82816feb3 | ||
|
ee229aa74c | ||
|
793e900ce8 | ||
|
1241586eb4 | ||
|
cbf454ae13 | ||
|
180d3b0e59 | ||
|
84db0072fb | ||
|
c48f6c8e8f | ||
|
870b8aed69 | ||
|
56b8e9647a | ||
|
1f59688791 | ||
|
e63c75a86c | ||
|
3eaf58e258 | ||
|
9ff62404a6 | ||
|
4d694f9517 | ||
|
22952f6a4a | ||
|
7660e4c555 | ||
|
a9cc787210 | ||
|
970830a025 | ||
|
7a3d38aeee | ||
|
54bb617902 | ||
|
cf08ae99f5 | ||
|
c8ce4cd8c8 | ||
|
6658a2ce24 | ||
|
7169f6aaaf | ||
|
ffc4824762 | ||
|
8e050e846f | ||
|
e40d3c9552 | ||
|
ba60ef79a7 | ||
|
ca49c9ef41 | ||
|
7010484b2a | ||
|
06eba512a6 | ||
|
f9ed0ab602 | ||
|
5632e522c2 | ||
|
cfdb719a8e | ||
|
b2a1b9cded | ||
|
5917c63a70 | ||
|
6af8ad0132 | ||
|
35ac13406d | ||
|
d00c46d363 | ||
|
a437de36e8 | ||
|
797fd28142 | ||
|
6d4266d4a4 | ||
|
eb8c18ccda | ||
|
4653b48602 | ||
|
8287a55b95 | ||
|
9faed647bf | ||
|
5268f60021 | ||
|
132e4b4ebe | ||
|
b502247284 | ||
|
e5d852573a | ||
|
d17ac7c8c3 | ||
|
b1c08f16d5 | ||
|
f618c6447a | ||
|
4454b1bbcc | ||
|
88f8f5d287 | ||
|
999b6a603a | ||
|
f8d05b0e82 | ||
|
a7f2271652 | ||
|
55f1f6800b | ||
|
3e736b1f05 | ||
|
f5497eec4f | ||
|
08015e0851 | ||
|
a0f472ba02 | ||
|
8bee6956eb | ||
|
97cbdf117d | ||
|
383f2a01d8 | ||
|
8a473b4ccd | ||
|
b6c8f70aaf | ||
|
f5aa6142a0 | ||
|
56d82e6370 | ||
|
41e95d5b86 | ||
|
46a39f2b0d | ||
|
8009fc11b7 | ||
|
398c08eb4d | ||
|
723fda297f | ||
|
35ee625827 | ||
|
210abfb2f5 | ||
|
53bf6824f8 | ||
|
cb8a97c837 | ||
|
0063ab6093 | ||
|
7811cbf3a7 | ||
|
7c21c33999 | ||
|
e45edcbfce | ||
|
7e705dc57e | ||
|
dae4364664 | ||
|
8dfd1edf85 | ||
|
d2fd20b3ec | ||
|
28fdbb1a3d | ||
|
23857f196a | ||
|
bf1ef451f2 | ||
|
d68e98be5a | ||
|
fd57d72a22 | ||
|
a74bd9f397 | ||
|
f76be1983b | ||
|
86cb30926f | ||
|
ed8fafd11d | ||
|
5333def40d | ||
|
ab78e3189e | ||
|
b26313d18e | ||
|
2af9bc0626 | ||
|
f573c1c50b | ||
|
2d999063fc | ||
|
1a44b8ea48 | ||
|
8eeafaa008 | ||
|
0ec8e80f0f | ||
|
f2bca9fcbf | ||
|
6ca17c47a0 | ||
|
016ed04f61 | ||
|
5b59011a89 | ||
|
7548347421 | ||
|
43c4cf500e | ||
|
aecb6f5ddd | ||
|
6ac3d346b9 | ||
|
946f93471c | ||
|
f68ff945c1 | ||
|
9fa9dd5350 | ||
|
1c0cb0185c | ||
|
2a1338ca5b | ||
|
cb847f25ad | ||
|
44311d08a5 | ||
|
17003840d3 | ||
|
a4bebf91bc | ||
|
691215ca6f | ||
|
999d86ae3e | ||
|
a4c2fb92e7 | ||
|
66d3a9e00d | ||
|
e418783ea9 | ||
|
36ddd84c85 | ||
|
f7a5b3ae90 | ||
|
3afe853517 | ||
|
853ebd5e36 | ||
|
f820b8cfcb | ||
|
062fd5a7f4 | ||
|
70bf874f4a | ||
|
bf9d847516 | ||
|
d45cae096b | ||
|
717b932e01 | ||
|
da11cb320c | ||
|
4bf07e75ac | ||
|
3b52ef3c09 | ||
|
ac809d18cc | ||
|
85019e5493 | ||
|
95c1a264ee | ||
|
32313ef02e | ||
|
ed9df06b38 | ||
|
b9086f37cf | ||
|
d3ed551203 | ||
|
1b0e9b49ef | ||
|
8797434ae7 | ||
|
25c87f3b96 | ||
|
26c63a43e0 | ||
|
3217fc03f8 | ||
|
b7b63c4460 | ||
|
7d8388af28 | ||
|
a53d7f1e01 | ||
|
c019f1b3a7 | ||
|
594f6056da | ||
|
113e9fd4ef | ||
|
e32d226151 | ||
|
a3246e506d | ||
|
ccb22827a2 | ||
|
94e154261b | ||
|
c23561f80c | ||
|
681650e9a6 | ||
|
31f0543da2 | ||
|
5e3daadf9c | ||
|
6b9a737756 | ||
|
1f9dcfb353 | ||
|
d77e97d31d | ||
|
69e7e533de | ||
|
840bb55ab3 | ||
|
5223d51475 | ||
|
8ffa04d4a8 | ||
|
4a351bc095 | ||
|
11d197ff24 | ||
|
12fbca468b | ||
|
79d5d16c1b | ||
|
e204bc6df0 | ||
|
6783b378d3 | ||
|
9d7681cb46 | ||
|
c6139e40db | ||
|
a4b95db01c | ||
|
91b9e13cd0 | ||
|
8d5def5f02 | ||
|
3e504fb511 | ||
|
492097d551 | ||
|
02ed2ae87d | ||
|
599b6865da | ||
|
7c0e776044 | ||
|
7ea0b32161 | ||
|
72b0267598 | ||
|
4a966d4cb8 | ||
|
70f1ffc65d | ||
|
fda0ea8b0e | ||
|
2443ded12b | ||
|
1db2d00d07 | ||
|
40b4884d87 | ||
|
823e7446f4 | ||
|
83b4c73b92 | ||
|
9844a55205 | ||
|
6e1bc89256 | ||
|
8ec0765bd7 | ||
|
6b893872fb | ||
|
ca8346b193 | ||
|
7115f0cdce | ||
|
74fc7bf77d | ||
|
002459b9b6 | ||
|
1ee39870da | ||
|
b8db28abd2 | ||
|
fafe7c140c | ||
|
047dc30487 | ||
|
7278647688 | ||
|
09fbb32b9a | ||
|
4e7ad8fd23 | ||
|
9a3ddfd0e4 | ||
|
e250ea5118 | ||
|
2689adc390 | ||
|
ba31204227 | ||
|
581b749e89 | ||
|
7562346357 | ||
|
e7aa53365e | ||
|
0b671fa2f9 | ||
|
8b2ee0b466 | ||
|
b686119a6f | ||
|
dcbd2f81e3 | ||
|
0fffa98e83 | ||
|
08ba389fd6 | ||
|
587271ff77 | ||
|
7062b3c49b | ||
|
b1cec40743 | ||
|
2d6fde2e5d | ||
|
3a3188efff | ||
|
7928624639 | ||
|
d61df6d84f | ||
|
c388d8ec9a | ||
|
ad02ad5327 | ||
|
af6ce5dca4 | ||
|
68dd8e223f | ||
|
d9fbd5194e | ||
|
0ba7112a9f | ||
|
0c262bc19d | ||
|
b03a46d25e | ||
|
69f58bc358 | ||
|
41c1aace80 | ||
|
423f05173c | ||
|
d82008ddcf | ||
|
19f46574cb | ||
|
713887cf08 | ||
|
a3cfcc71ec | ||
|
0f431500c0 | ||
|
9d03dab83d | ||
|
7e7c06597a | ||
|
83157412e7 | ||
|
461dac06d5 | ||
|
d60cfb7e44 | ||
|
20c8dde9ef | ||
|
594640c10c | ||
|
1f7a251925 | ||
|
7ab251f755 | ||
|
3d9779cc63 | ||
|
b234465219 | ||
|
51a8b508ac | ||
|
b86529655f | ||
|
4434daa5ea | ||
|
dec17e8d85 | ||
|
c6a63b55a9 | ||
|
be5a91ce2e | ||
|
c4445074ad | ||
|
b1146b9f55 | ||
|
78d67cd364 | ||
|
2fbec7ab6a | ||
|
fbfd17351f | ||
|
3c914f741d | ||
|
ad14df6a7a | ||
|
cf43257006 | ||
|
06076fc908 | ||
|
d5c9a518d8 | ||
|
d5eee7f2d1 | ||
|
ab51dcbbc1 | ||
|
8982dc0fe4 | ||
|
8e3d5f4094 | ||
|
77006fd0c9 | ||
|
bab19efcfe | ||
|
f1bccaae2e | ||
|
b5b59b10d7 | ||
|
91fba0a971 | ||
|
a839c2d6f1 | ||
|
3cf07839cc | ||
|
112dc10dba | ||
|
b086c237ee | ||
|
ffd20d73c8 | ||
|
861f7a0d34 | ||
|
7a7ad75ef7 | ||
|
2f513526d3 | ||
|
566b804b65 | ||
|
5a08853e7a | ||
|
dd0473e7cf | ||
|
067e32923c | ||
|
f3a241fcc1 | ||
|
4ad2c50143 | ||
|
194b5ae3dc | ||
|
0633daaf8e | ||
|
7dd06c5e87 | ||
|
8d30ce1588 | ||
|
9187692615 | ||
|
99621de555 | ||
|
e9d5b75298 | ||
|
994049da8c | ||
|
84d01b5358 | ||
|
ff914d380d | ||
|
9d04c6747c | ||
|
8e0a5665f0 | ||
|
dc7b41e7da | ||
|
b1fc1907ab | ||
|
c1a25d34d3 | ||
|
5a9113e445 | ||
|
cc4b8d4c9e | ||
|
890d4b8d50 | ||
|
9afc138e25 | ||
|
76737b3b99 | ||
|
5b570c21fb | ||
|
31ec7a2ecf | ||
|
2e32aa6707 | ||
|
5feba82ffb | ||
|
1be8fdd1f0 | ||
|
4848e5be14 | ||
|
c4b6df2637 | ||
|
874bb6f1cf | ||
|
7842d075ad | ||
|
1bd7831c85 | ||
|
5816debef5 | ||
|
97c44a37ae | ||
|
3c060a4e8c | ||
|
fb7302c361 | ||
|
4670c94241 | ||
|
864a6cd2ae | ||
|
815f3bd638 | ||
|
c2e4e5030b | ||
|
086e6bc970 | ||
|
58fb2c2191 | ||
|
8268b12a77 | ||
|
290db4847a | ||
|
1e146e711c | ||
|
f88322cdff | ||
|
7a96cbe228 | ||
|
7143245c37 | ||
|
885bf947b5 | ||
|
e29a91e7f7 | ||
|
7878595460 | ||
|
7a1cdd2c3d | ||
|
9bab254710 | ||
|
b32f936365 | ||
|
4809e1d0d0 | ||
|
529daa3c6f | ||
|
37314ec930 | ||
|
b38d8805a6 | ||
|
b2cfe285b4 | ||
|
36cab0c1af | ||
|
793840f762 | ||
|
a784941932 | ||
|
ae14d6a9fe | ||
|
f1bd41fada | ||
|
2760e588ac | ||
|
c60ad23618 | ||
|
9b3691b2bc | ||
|
1c64a31ac8 | ||
|
290da6f1fe | ||
|
ea46cb08a4 | ||
|
8ec98064a1 | ||
|
b8f0d0fbcd | ||
|
911a27b20a | ||
|
a586dd0d44 | ||
|
79756e4ac4 | ||
|
354bba3179 | ||
|
b0dfe68ebe | ||
|
fa513a9ac9 | ||
|
716e98af5b | ||
|
6437b4b5fb | ||
|
ce5c2c2f21 | ||
|
4eb1ccb532 | ||
|
b0e1e2e837 | ||
|
ee66b7e5cc | ||
|
b82e57c281 | ||
|
aa4f267a29 | ||
|
ef3ae2a38e | ||
|
78c29804a1 | ||
|
cce68bcd98 | ||
|
a27f735eb8 | ||
|
abe4997d44 | ||
|
1c7881eda3 | ||
|
5be39fe60e | ||
|
02c1917004 | ||
|
20164c647b | ||
|
8290afc737 | ||
|
95681aa86e | ||
|
c7431f1b19 | ||
|
6106f5cd43 | ||
|
b6611ec321 | ||
|
9636e58bec | ||
|
ca602ed936 | ||
|
fb6b5d0887 | ||
|
5faf8fa050 | ||
|
ffb24f71c6 | ||
|
a9ecbec6f1 | ||
|
5f8bc74f0b | ||
|
8b80f15ee7 | ||
|
0d24204b83 | ||
|
ac21f563de | ||
|
55c71e2b80 | ||
|
c150bd1341 | ||
|
ce7eb0b91a | ||
|
b24ed83a8b | ||
|
0d9a8d76fc | ||
|
187bdbc20f | ||
|
bb3183a0b8 | ||
|
100394b29c | ||
|
7345c19d5d | ||
|
801e0aa907 | ||
|
0910c2749b | ||
|
300cb83ab8 | ||
|
ca6fa4605b | ||
|
1bebb179ce | ||
|
8632da0a22 | ||
|
a864db0182 | ||
|
fcf41b990e | ||
|
4c01dd0b9b | ||
|
bea2c4d5c6 | ||
|
f442e81414 | ||
|
18f2bea9b0 | ||
|
f6bee9e6e4 | ||
|
d9a52ec795 | ||
|
4b074a6d8a | ||
|
791a94322f | ||
|
3cd063b153 | ||
|
9a64fadb56 | ||
|
e178bc9f20 | ||
|
4ef2d9c3ce | ||
|
39d0356340 | ||
|
1e05cdbe62 | ||
|
e27301013d | ||
|
f477871bfa | ||
|
fac8a62037 | ||
|
ec7b845c67 | ||
|
ce07b2ff50 | ||
|
c2bd6c8540 | ||
|
1aa2b08db7 | ||
|
14c94e6d14 | ||
|
8f54afe7b3 | ||
|
9465a077b1 | ||
|
168e530dab | ||
|
159fa74a3f | ||
|
5bb74ec465 | ||
|
949bcdbc2a | ||
|
ceaa994eba | ||
|
489c7280ec | ||
|
c5fc8aeb19 | ||
|
d5e469d6c6 | ||
|
bc99188c80 | ||
|
6176afbf2c | ||
|
1cc9d4f04b | ||
|
62217f6a76 | ||
|
e4facd9b7a | ||
|
ba8b4a4bc9 | ||
|
82a0c8cf11 | ||
|
bdc9478aa8 | ||
|
8bd4dc1fbd | ||
|
dbc7536724 | ||
|
062109a5bc | ||
|
09a3df54c4 | ||
|
554b5a663c | ||
|
0cb169afab | ||
|
61ce888e47 | ||
|
e8f1657821 | ||
|
f089d57d7d | ||
|
2de2b026be | ||
|
9d3dd72b80 | ||
|
c1da8d29fc | ||
|
1fa9846f56 | ||
|
ba6fc820b7 | ||
|
16ecdd2b57 | ||
|
2896134f67 | ||
|
97a34cfcbc | ||
|
ca75148e86 | ||
|
834873638d | ||
|
4618845a97 | ||
|
5db6e28534 | ||
|
fb86669872 | ||
|
013f24ba80 | ||
|
96eae65da1 | ||
|
a3ac1993e9 | ||
|
5ca0dcdf8d | ||
|
8a66233148 | ||
|
377abc84c7 | ||
|
ede4ccdf30 | ||
|
7e06957cc2 | ||
|
ad7da6edfa | ||
|
62868f64b3 | ||
|
09be632cd9 | ||
|
803718108e | ||
|
f8020b9c10 | ||
|
11210e3a23 | ||
|
c48969996e | ||
|
c7ccc9eaf6 | ||
|
e4869e1a4b | ||
|
dd84195db6 | ||
|
d856ae81e0 | ||
|
1494ba872a | ||
|
685e6e4c7f | ||
|
085cec0c8e | ||
|
288ac05508 | ||
|
c777aa779e | ||
|
d55bfec80c | ||
|
e88956a16f | ||
|
aab30279ac | ||
|
2f9247abfb | ||
|
0d58408c58 | ||
|
3f73d2e3ce | ||
|
896914e189 | ||
|
4f587c5508 | ||
|
a9cb0a69a2 | ||
|
6baca35422 | ||
|
b4c5e75705 | ||
|
d859c3a1eb | ||
|
b7f5a3dfad | ||
|
8cd1aac99c | ||
|
f0f6b5f7e2 | ||
|
acbc22e79f | ||
|
134bef69e0 | ||
|
477c3601fc | ||
|
f0dabb9a83 | ||
|
7407adefb9 | ||
|
91bfa84c12 | ||
|
7473af6d9d | ||
|
4292644870 | ||
|
2e11ee5b32 | ||
|
0c8398b3db | ||
|
52e738b586 | ||
|
c0afc295a5 | ||
|
ed7c56f12c | ||
|
be7075beb8 | ||
|
f9a6aab475 | ||
|
cc441b33d8 | ||
|
b8d97cc414 | ||
|
eab39a5f99 | ||
|
5ffab53364 | ||
|
b24421e18c | ||
|
487a747544 | ||
|
ef2f0cc998 | ||
|
f03d9bb648 | ||
|
0f234c2d7b | ||
|
f4171e3cf5 | ||
|
9c831205e0 | ||
|
a429a6af22 | ||
|
73484df323 | ||
|
819e11f680 | ||
|
9b2c8e773d | ||
|
607fa42672 | ||
|
0ce42ba694 | ||
|
463bf15ba8 | ||
|
835a851dd6 | ||
|
1b9422e828 | ||
|
2ef2b5f797 | ||
|
7be554dc62 | ||
|
b7a850f6da | ||
|
b7d421fdcd | ||
|
93b4ac8542 | ||
|
892dd6d37f | ||
|
6ae05006c6 | ||
|
f0167925ec | ||
|
afead02c46 | ||
|
baab52f440 | ||
|
1c04bf7cdb | ||
|
4b058cc837 | ||
|
5bc2949a49 | ||
|
1a2db03c26 | ||
|
17a26be1a9 | ||
|
3ec44fd56a | ||
|
ee6c5b231f | ||
|
255fc98d44 | ||
|
4f0b41a791 | ||
|
e4338979a6 | ||
|
85f0ac8ed3 | ||
|
e25ac201d2 | ||
|
38b871aa8e | ||
|
0b18bb63e8 | ||
|
afbde420a7 | ||
|
244563720a | ||
|
886c091901 | ||
|
11942b2c74 | ||
|
b2848ea718 | ||
|
b9ee892b20 | ||
|
af457255ca | ||
|
0397d6d814 | ||
|
4d61ba1cc2 | ||
|
23fe515eac | ||
|
75bf8de36a | ||
|
d24cf98785 | ||
|
896253bcbc | ||
|
32188352a5 | ||
|
b9b2715b10 | ||
|
7c1d7083ab | ||
|
bed0d03b9c | ||
|
0555132740 | ||
|
9342f983d5 | ||
|
52082359f0 | ||
|
59cc0037fc | ||
|
eb0291ed87 | ||
|
03059786b6 | ||
|
1475f9a2ec | ||
|
8daf49a505 | ||
|
73468e5424 | ||
|
46ce583de3 | ||
|
f066bc534f | ||
|
709101cf0f | ||
|
0d969a7a3c | ||
|
0f988e3e9f | ||
|
9bcae578d7 | ||
|
45c8dcb36b | ||
|
5655c602c7 | ||
|
af38476a14 | ||
|
2dda42cf9f | ||
|
e4b9557c8e | ||
|
6bdf827c65 | ||
|
ddffe33bcd | ||
|
f193e164ff | ||
|
af2da64c62 | ||
|
1433f35186 | ||
|
507339907c | ||
|
1fb4046d52 | ||
|
c71d4048af | ||
|
877e561a45 | ||
|
1765ddf0f8 | ||
|
5ad314607d | ||
|
b154d97438 | ||
|
07624e529f | ||
|
a772ab42d0 | ||
|
b13b0f86eb |
2
.gitignore
vendored
2
.gitignore
vendored
@ -16,3 +16,5 @@ public
|
|||||||
build
|
build
|
||||||
.sass-cache
|
.sass-cache
|
||||||
wiki
|
wiki
|
||||||
|
.gem_rbs_collection/
|
||||||
|
rbs_collection.lock.yaml
|
@ -8,7 +8,7 @@ image:
|
|||||||
name: docker/compose:latest
|
name: docker/compose:latest
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
# this variable enables caching withing docker-in-docker
|
# this variable enables caching within docker-in-docker
|
||||||
# https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#use-docker-in-docker-workflow-with-docker-executor
|
# https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#use-docker-in-docker-workflow-with-docker-executor
|
||||||
MOUNT_POINT: /builds/$CI_PROJECT_PATH/vendor
|
MOUNT_POINT: /builds/$CI_PROJECT_PATH/vendor
|
||||||
# bundler-specific
|
# bundler-specific
|
||||||
@ -38,33 +38,40 @@ cache:
|
|||||||
paths:
|
paths:
|
||||||
- vendor
|
- vendor
|
||||||
|
|
||||||
|
lint rubocop code:
|
||||||
|
image: "ruby:3.4"
|
||||||
|
variables:
|
||||||
|
BUNDLE_WITHOUT: test:coverage:assorted
|
||||||
|
before_script:
|
||||||
|
- bundle install
|
||||||
|
script:
|
||||||
|
- bundle exec rake rubocop
|
||||||
|
lint rubocop wiki:
|
||||||
|
image: "ruby:3.4"
|
||||||
|
rules:
|
||||||
|
- if: $CI_PIPELINE_SOURCE == "schedule"
|
||||||
|
variables:
|
||||||
|
BUNDLE_ONLY: lint
|
||||||
|
before_script:
|
||||||
|
- git clone https://gitlab.com/os85/httpx.wiki.git
|
||||||
|
- bundle install
|
||||||
|
- |
|
||||||
|
cat > .rubocop-wiki.yml << FILE
|
||||||
|
require:
|
||||||
|
- rubocop-md
|
||||||
|
|
||||||
|
AllCops:
|
||||||
|
TargetRubyVersion: 3.4
|
||||||
|
DisabledByDefault: true
|
||||||
|
FILE
|
||||||
|
script:
|
||||||
|
- bundle exec rubocop httpx.wiki --config .rubocop-wiki.yml
|
||||||
|
|
||||||
test jruby:
|
test jruby:
|
||||||
<<: *test_settings
|
<<: *test_settings
|
||||||
script:
|
script:
|
||||||
./spec.sh jruby 9.0.0.0
|
./spec.sh jruby 9.0.0.0
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
test ruby 2/3:
|
|
||||||
<<: *test_settings
|
|
||||||
script:
|
|
||||||
./spec.sh ruby 2.3
|
|
||||||
test ruby 2/4:
|
|
||||||
<<: *test_settings
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
script:
|
|
||||||
./spec.sh ruby 2.4
|
|
||||||
test ruby 2/5:
|
|
||||||
<<: *test_settings
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
script:
|
|
||||||
./spec.sh ruby 2.5
|
|
||||||
test ruby 2/6:
|
|
||||||
<<: *test_settings
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
script:
|
|
||||||
./spec.sh ruby 2.6
|
|
||||||
test ruby 2/7:
|
test ruby 2/7:
|
||||||
<<: *test_settings
|
<<: *test_settings
|
||||||
script:
|
script:
|
||||||
@ -83,20 +90,28 @@ test ruby 3/1:
|
|||||||
./spec.sh ruby 3.1
|
./spec.sh ruby 3.1
|
||||||
test ruby 3/2:
|
test ruby 3/2:
|
||||||
<<: *test_settings
|
<<: *test_settings
|
||||||
<<: *yjit_matrix
|
|
||||||
script:
|
script:
|
||||||
./spec.sh ruby 3.2
|
./spec.sh ruby 3.2
|
||||||
|
test ruby 3/3:
|
||||||
|
<<: *test_settings
|
||||||
|
script:
|
||||||
|
./spec.sh ruby 3.3
|
||||||
|
test ruby 3/4:
|
||||||
|
<<: *test_settings
|
||||||
|
<<: *yjit_matrix
|
||||||
|
script:
|
||||||
|
./spec.sh ruby 3.4
|
||||||
test truffleruby:
|
test truffleruby:
|
||||||
<<: *test_settings
|
<<: *test_settings
|
||||||
script:
|
script:
|
||||||
./spec.sh truffleruby latest
|
./spec.sh truffleruby latest
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
regression tests:
|
regression tests:
|
||||||
image: "ruby:3.2"
|
image: "ruby:3.4"
|
||||||
variables:
|
variables:
|
||||||
BUNDLE_WITHOUT: assorted
|
BUNDLE_WITHOUT: lint:assorted
|
||||||
CI: 1
|
CI: 1
|
||||||
COVERAGE_KEY: "$RUBY_ENGINE-$RUBY_VERSION-regression-tests"
|
COVERAGE_KEY: "ruby-3.4-regression-tests"
|
||||||
artifacts:
|
artifacts:
|
||||||
paths:
|
paths:
|
||||||
- coverage/
|
- coverage/
|
||||||
@ -108,12 +123,12 @@ regression tests:
|
|||||||
- bundle exec rake regression_tests
|
- bundle exec rake regression_tests
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
coverage: '/\(\d+.\d+\%\) covered/'
|
coverage: '/Coverage: \d+.\d+\%/'
|
||||||
stage: prepare
|
stage: prepare
|
||||||
variables:
|
variables:
|
||||||
BUNDLE_WITHOUT: test:assorted
|
BUNDLE_WITHOUT: lint:test:assorted
|
||||||
|
|
||||||
image: "ruby:3.2"
|
image: "ruby:3.4"
|
||||||
script:
|
script:
|
||||||
- gem install simplecov --no-doc
|
- gem install simplecov --no-doc
|
||||||
# this is a workaround, because simplecov doesn't support relative paths.
|
# this is a workaround, because simplecov doesn't support relative paths.
|
||||||
@ -135,7 +150,7 @@ pages:
|
|||||||
stage: deploy
|
stage: deploy
|
||||||
needs:
|
needs:
|
||||||
- coverage
|
- coverage
|
||||||
image: "ruby:3.2"
|
image: "ruby:3.4"
|
||||||
before_script:
|
before_script:
|
||||||
- gem install hanna-nouveau
|
- gem install hanna-nouveau
|
||||||
script:
|
script:
|
||||||
|
18
.rubocop.yml
18
.rubocop.yml
@ -1,6 +1,8 @@
|
|||||||
inherit_from: .rubocop_todo.yml
|
inherit_from: .rubocop_todo.yml
|
||||||
|
|
||||||
require: rubocop-performance
|
require:
|
||||||
|
- rubocop-performance
|
||||||
|
- rubocop-md
|
||||||
|
|
||||||
AllCops:
|
AllCops:
|
||||||
NewCops: enable
|
NewCops: enable
|
||||||
@ -23,9 +25,10 @@ AllCops:
|
|||||||
- 'vendor/**/*'
|
- 'vendor/**/*'
|
||||||
- 'www/**/*'
|
- 'www/**/*'
|
||||||
- 'lib/httpx/extensions.rb'
|
- 'lib/httpx/extensions.rb'
|
||||||
- 'lib/httpx/punycode.rb'
|
|
||||||
# Do not lint ffi block, for openssl parity
|
# Do not lint ffi block, for openssl parity
|
||||||
- 'test/extensions/response_pattern_match.rb'
|
- 'test/extensions/response_pattern_match.rb'
|
||||||
|
# Old release notes
|
||||||
|
- !ruby/regexp /doc/release_notes/0_.*.md/
|
||||||
|
|
||||||
Metrics/ClassLength:
|
Metrics/ClassLength:
|
||||||
Enabled: false
|
Enabled: false
|
||||||
@ -89,6 +92,10 @@ Style/GlobalVars:
|
|||||||
Exclude:
|
Exclude:
|
||||||
- lib/httpx/plugins/internal_telemetry.rb
|
- lib/httpx/plugins/internal_telemetry.rb
|
||||||
|
|
||||||
|
Style/CommentedKeyword:
|
||||||
|
Exclude:
|
||||||
|
- integration_tests/faraday_datadog_test.rb
|
||||||
|
|
||||||
Style/RedundantBegin:
|
Style/RedundantBegin:
|
||||||
Enabled: false
|
Enabled: false
|
||||||
|
|
||||||
@ -118,6 +125,9 @@ Style/HashSyntax:
|
|||||||
Style/AndOr:
|
Style/AndOr:
|
||||||
Enabled: False
|
Enabled: False
|
||||||
|
|
||||||
|
Style/ArgumentsForwarding:
|
||||||
|
Enabled: False
|
||||||
|
|
||||||
Naming/MethodParameterName:
|
Naming/MethodParameterName:
|
||||||
Enabled: false
|
Enabled: false
|
||||||
|
|
||||||
@ -170,3 +180,7 @@ Performance/StringIdentifierArgument:
|
|||||||
|
|
||||||
Style/Lambda:
|
Style/Lambda:
|
||||||
Enabled: false
|
Enabled: false
|
||||||
|
|
||||||
|
Style/TrivialAccessors:
|
||||||
|
Exclude:
|
||||||
|
- 'test/pool_test.rb'
|
@ -11,7 +11,7 @@ Metrics/ModuleLength:
|
|||||||
Max: 325
|
Max: 325
|
||||||
|
|
||||||
Metrics/BlockLength:
|
Metrics/BlockLength:
|
||||||
Max: 200
|
Max: 500
|
||||||
|
|
||||||
Metrics/BlockNesting:
|
Metrics/BlockNesting:
|
||||||
Enabled: False
|
Enabled: False
|
||||||
|
@ -6,5 +6,5 @@ SimpleCov.start do
|
|||||||
add_filter "/integration_tests/"
|
add_filter "/integration_tests/"
|
||||||
add_filter "/regression_tests/"
|
add_filter "/regression_tests/"
|
||||||
add_filter "/lib/httpx/plugins/internal_telemetry.rb"
|
add_filter "/lib/httpx/plugins/internal_telemetry.rb"
|
||||||
add_filter "/lib/httpx/punycode.rb"
|
add_filter "/lib/httpx/base64.rb"
|
||||||
end
|
end
|
||||||
|
@ -14,7 +14,7 @@ require "httpx"
|
|||||||
|
|
||||||
response = HTTPX.get("https://google.com/")
|
response = HTTPX.get("https://google.com/")
|
||||||
# Will print response.body
|
# Will print response.body
|
||||||
puts response.to_s
|
puts response
|
||||||
```
|
```
|
||||||
|
|
||||||
## Multiple HTTP Requests
|
## Multiple HTTP Requests
|
||||||
@ -24,7 +24,7 @@ require "httpx"
|
|||||||
|
|
||||||
uri = "https://google.com"
|
uri = "https://google.com"
|
||||||
|
|
||||||
responses = HTTPX.new(uri, uri)
|
responses = HTTPX.get(uri, uri)
|
||||||
|
|
||||||
# OR
|
# OR
|
||||||
HTTPX.wrap do |client|
|
HTTPX.wrap do |client|
|
||||||
@ -37,17 +37,17 @@ end
|
|||||||
## Headers
|
## Headers
|
||||||
|
|
||||||
```ruby
|
```ruby
|
||||||
HTTPX.headers("user-agent" => "My Ruby Script").get("https://google.com")
|
HTTPX.with(headers: { "user-agent" => "My Ruby Script" }).get("https://google.com")
|
||||||
```
|
```
|
||||||
|
|
||||||
## HTTP Methods
|
## HTTP Methods
|
||||||
|
|
||||||
```ruby
|
```ruby
|
||||||
HTTP.get("https://myapi.com/users/1")
|
HTTPX.get("https://myapi.com/users/1")
|
||||||
HTTP.post("https://myapi.com/users")
|
HTTPX.post("https://myapi.com/users")
|
||||||
HTTP.patch("https://myapi.com/users/1")
|
HTTPX.patch("https://myapi.com/users/1")
|
||||||
HTTP.put("https://myapi.com/users/1")
|
HTTPX.put("https://myapi.com/users/1")
|
||||||
HTTP.delete("https://myapi.com/users/1")
|
HTTPX.delete("https://myapi.com/users/1")
|
||||||
```
|
```
|
||||||
|
|
||||||
## HTTP Authentication
|
## HTTP Authentication
|
||||||
@ -56,13 +56,13 @@ HTTP.delete("https://myapi.com/users/1")
|
|||||||
require "httpx"
|
require "httpx"
|
||||||
|
|
||||||
# Basic Auth
|
# Basic Auth
|
||||||
response = HTTPX.plugin(:basic_authentication).basic_authentication("username", "password").get("https://google.com")
|
response = HTTPX.plugin(:basic_auth).basic_auth("username", "password").get("https://google.com")
|
||||||
|
|
||||||
# Digest Auth
|
# Digest Auth
|
||||||
response = HTTPX.plugin(:digest_authentication).digest_authentication("username", "password").get("https://google.com")
|
response = HTTPX.plugin(:digest_auth).digest_auth("username", "password").get("https://google.com")
|
||||||
|
|
||||||
# Token Auth
|
# Bearer Token Auth
|
||||||
response = HTTPX.plugin(:authentication).authentication("eyrandomtoken").get("https://google.com")
|
response = HTTPX.plugin(:auth).authorization("eyrandomtoken").get("https://google.com")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@ -74,11 +74,11 @@ require "httpx"
|
|||||||
response = HTTPX.get("https://google.com/")
|
response = HTTPX.get("https://google.com/")
|
||||||
response.status # => 301
|
response.status # => 301
|
||||||
response.headers["location"] #=> "https://www.google.com/"
|
response.headers["location"] #=> "https://www.google.com/"
|
||||||
response.body # => "<HTML><HEAD><meta http-equiv=\"content-type\" ....
|
response.headers["cache-control"] #=> public, max-age=2592000
|
||||||
response["cache-control"] # => public, max-age=2592000
|
response.body.to_s #=> "<HTML><HEAD><meta http-equiv=\"content-type\" ....
|
||||||
```
|
```
|
||||||
|
|
||||||
## POST form request
|
## POST `application/x-www-form-urlencoded` request
|
||||||
|
|
||||||
```ruby
|
```ruby
|
||||||
require "httpx"
|
require "httpx"
|
||||||
@ -88,17 +88,13 @@ uri = URI.parse("http://example.com/search")
|
|||||||
response = HTTPX.post(uri, form: { "q" => "My query", "per_page" => "50" })
|
response = HTTPX.post(uri, form: { "q" => "My query", "per_page" => "50" })
|
||||||
```
|
```
|
||||||
|
|
||||||
## File upload - input type="file" style
|
## File `multipart/form-data` upload - input type="file" style
|
||||||
|
|
||||||
```ruby
|
```ruby
|
||||||
require "httpx"
|
require "httpx"
|
||||||
|
|
||||||
# uses http_form_data API: https://github.com/httprb/form_data
|
file_to_upload = Pathname.new("/path/to/your/testfile.txt")
|
||||||
|
HTTPX.plugin(:multipart).post("http://something.com/uploads", form: { name: file_to_upload })
|
||||||
path = "/path/to/your/testfile.txt"
|
|
||||||
HTTPX.plugin(:multipart).post("http://something.com/uploads", form: {
|
|
||||||
name: HTTP::FormData::File.new(path)
|
|
||||||
})
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## SSL/HTTPS request
|
## SSL/HTTPS request
|
||||||
@ -108,8 +104,7 @@ Update: There are some good reasons why this code example is bad. It introduces
|
|||||||
```ruby
|
```ruby
|
||||||
require "httpx"
|
require "httpx"
|
||||||
|
|
||||||
|
response = HTTPX.with(ssl: { verify_mode: OpenSSL::SSL::VERIFY_NONE }).get("https://secure.com/")
|
||||||
response = HTTPX.with(ssl: { verify_mode: OpenSSL::SSL::VERIFY_NONE }).get("https://secure.com/")
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## SSL/HTTPS request with PEM certificate
|
## SSL/HTTPS request with PEM certificate
|
||||||
@ -118,11 +113,11 @@ response = HTTPX.with(ssl: { verify_mode: OpenSSL::SSL::VERIFY_NONE }).get("htt
|
|||||||
require "httpx"
|
require "httpx"
|
||||||
|
|
||||||
pem = File.read("/path/to/my.pem")
|
pem = File.read("/path/to/my.pem")
|
||||||
HTTPX.with(ssl: {
|
HTTPX.with_ssl(
|
||||||
cert: OpenSSL::X509::Certificate.new(pem),
|
cert: OpenSSL::X509::Certificate.new(pem),
|
||||||
key: OpenSSL::PKey::RSA.new(pem),
|
key: OpenSSL::PKey::RSA.new(pem),
|
||||||
verify_mode: OpenSSL::SSL::VERIFY_PEER
|
verify_mode: OpenSSL::SSL::VERIFY_PEER,
|
||||||
}).get("https://secure.com/")
|
).get("https://secure.com/")
|
||||||
```
|
```
|
||||||
|
|
||||||
## Cookies
|
## Cookies
|
||||||
@ -132,8 +127,7 @@ require "httpx"
|
|||||||
|
|
||||||
HTTPX.plugin(:cookies).wrap do |client|
|
HTTPX.plugin(:cookies).wrap do |client|
|
||||||
session_response = client.get("https://translate.google.com/")
|
session_response = client.get("https://translate.google.com/")
|
||||||
response_cookies = session_response.cookie_jar
|
response = client.get("https://translate.google.com/#auto|en|Pardon")
|
||||||
response = client.cookies(response_cookies).get("https://translate.google.com/#auto|en|Pardon")
|
|
||||||
puts response
|
puts response
|
||||||
end
|
end
|
||||||
```
|
```
|
||||||
@ -143,9 +137,14 @@ end
|
|||||||
```ruby
|
```ruby
|
||||||
require "httpx"
|
require "httpx"
|
||||||
|
|
||||||
response = HTTPX.plugin(:compression).get("https://www.google.com")
|
response = HTTPX.get("https://www.google.com")
|
||||||
puts response.headers["content-encoding"] #=> "gzip"
|
puts response.headers["content-encoding"] #=> "gzip"
|
||||||
|
puts response #=> uncompressed payload
|
||||||
|
|
||||||
|
# uncompressed request payload
|
||||||
|
HTTPX.post("https://myapi.com/users", body: super_large_text_payload)
|
||||||
|
# gzip-compressed request payload
|
||||||
|
HTTPX.post("https://myapi.com/users", headers: { "content-encoding" => %w[gzip] }, body: super_large_text_payload)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Proxy
|
## Proxy
|
||||||
@ -171,7 +170,6 @@ HTTPX.get("https://google.com")
|
|||||||
require "httpx"
|
require "httpx"
|
||||||
HTTPX.with(resolver_class: :https).get("https://google.com")
|
HTTPX.with(resolver_class: :https).get("https://google.com")
|
||||||
|
|
||||||
|
|
||||||
# by default it uses cloudflare DoH server.
|
# by default it uses cloudflare DoH server.
|
||||||
# This example switches the resolver to Quad9's DoH server
|
# This example switches the resolver to Quad9's DoH server
|
||||||
|
|
||||||
@ -183,7 +181,9 @@ HTTPX.with(resolver_class: :https, resolver_options: {uri: "https://9.9.9.9/dns-
|
|||||||
```ruby
|
```ruby
|
||||||
require "httpx"
|
require "httpx"
|
||||||
|
|
||||||
HTTPX.plugin(:follow_redirects).with(follow_insecure_redirects: false, max_redirects: 4).get("https://www.google.com")
|
HTTPX.plugin(:follow_redirects)
|
||||||
|
.with(follow_insecure_redirects: false, max_redirects: 4)
|
||||||
|
.get("https://www.google.com")
|
||||||
```
|
```
|
||||||
|
|
||||||
## Timeouts
|
## Timeouts
|
||||||
@ -191,12 +191,12 @@ HTTPX.plugin(:follow_redirects).with(follow_insecure_redirects: false, max_redir
|
|||||||
```ruby
|
```ruby
|
||||||
require "httpx"
|
require "httpx"
|
||||||
|
|
||||||
HTTPX.with(timeout: {connect_timeout: 10, operation_timeout: 3}).get("https://google.com")
|
# full E2E request/response timeout, 10 sec to connect to peer
|
||||||
|
HTTPX.with(timeout: { connect_timeout: 10, request_timeout: 3 }).get("https://google.com")
|
||||||
```
|
```
|
||||||
|
|
||||||
## Retries
|
## Retries
|
||||||
|
|
||||||
|
|
||||||
```ruby
|
```ruby
|
||||||
require "httpx"
|
require "httpx"
|
||||||
HTTPX.plugin(:retries).max_retries(5).get("https://www.google.com")
|
HTTPX.plugin(:retries).max_retries(5).get("https://www.google.com")
|
||||||
@ -214,4 +214,3 @@ HTTPX.get("https://google.com") #=> udp://10.0.1.2:53...
|
|||||||
|
|
||||||
HTTPX.with(debug_level: 1, debug: $stderr).get("https://google.com")
|
HTTPX.with(debug_level: 1, debug: $stderr).get("https://google.com")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
94
Gemfile
94
Gemfile
@ -5,56 +5,42 @@ ruby RUBY_VERSION
|
|||||||
source "https://rubygems.org"
|
source "https://rubygems.org"
|
||||||
gemspec
|
gemspec
|
||||||
|
|
||||||
if RUBY_VERSION < "2.2.0"
|
|
||||||
gem "rake", "~> 12.3"
|
|
||||||
else
|
|
||||||
gem "rake", "~> 13.0"
|
gem "rake", "~> 13.0"
|
||||||
end
|
|
||||||
|
|
||||||
group :test do
|
group :test do
|
||||||
|
if RUBY_VERSION >= "3.2.0"
|
||||||
|
gem "datadog", "~> 2.0"
|
||||||
|
else
|
||||||
|
gem "ddtrace"
|
||||||
|
end
|
||||||
gem "http-form_data", ">= 2.0.0"
|
gem "http-form_data", ">= 2.0.0"
|
||||||
gem "minitest"
|
gem "minitest"
|
||||||
gem "minitest-proveit"
|
gem "minitest-proveit"
|
||||||
gem "ruby-ntlm"
|
|
||||||
gem "sentry-ruby" if RUBY_VERSION >= "2.4.0"
|
|
||||||
gem "spy"
|
|
||||||
if RUBY_VERSION < "2.3.0"
|
|
||||||
gem "webmock", "< 3.15.0"
|
|
||||||
elsif RUBY_VERSION < "2.4.0"
|
|
||||||
gem "webmock", "< 3.17.0"
|
|
||||||
else
|
|
||||||
gem "webmock"
|
|
||||||
end
|
|
||||||
gem "nokogiri"
|
gem "nokogiri"
|
||||||
|
gem "ruby-ntlm"
|
||||||
|
gem "sentry-ruby"
|
||||||
|
gem "spy"
|
||||||
|
gem "webmock"
|
||||||
gem "websocket-driver"
|
gem "websocket-driver"
|
||||||
|
|
||||||
gem "net-ssh", "~> 4.2.0" if RUBY_VERSION < "2.2.0"
|
|
||||||
|
|
||||||
gem "ddtrace"
|
|
||||||
|
|
||||||
platform :mri do
|
platform :mri do
|
||||||
if RUBY_VERSION >= "2.3.0"
|
|
||||||
if RUBY_VERSION < "2.5.0"
|
|
||||||
gem "google-protobuf", "< 3.19.2"
|
|
||||||
elsif RUBY_VERSION < "2.7.0"
|
|
||||||
gem "google-protobuf", "< 3.22.0"
|
|
||||||
end
|
|
||||||
if RUBY_VERSION <= "2.6.0"
|
|
||||||
gem "grpc", "< 1.49.0"
|
|
||||||
else
|
|
||||||
gem "grpc"
|
gem "grpc"
|
||||||
end
|
|
||||||
gem "logging"
|
gem "logging"
|
||||||
gem "marcel", require: false
|
gem "marcel", require: false
|
||||||
gem "mimemagic", require: false
|
gem "mimemagic", require: false
|
||||||
gem "ruby-filemagic", require: false
|
gem "ruby-filemagic", require: false
|
||||||
end
|
|
||||||
|
|
||||||
if RUBY_VERSION >= "3.0.0"
|
if RUBY_VERSION >= "3.0.0"
|
||||||
gem "multi_json", require: false
|
gem "multi_json", require: false
|
||||||
gem "oj", require: false
|
gem "oj", require: false
|
||||||
|
gem "rbs"
|
||||||
gem "yajl-ruby", require: false
|
gem "yajl-ruby", require: false
|
||||||
end
|
end
|
||||||
|
|
||||||
|
if RUBY_VERSION >= "3.4.0"
|
||||||
|
# TODO: remove this once websocket-driver-ruby declares this as dependency
|
||||||
|
gem "base64"
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
platform :mri, :truffleruby do
|
platform :mri, :truffleruby do
|
||||||
@ -65,63 +51,39 @@ group :test do
|
|||||||
gem "net-ssh-gateway"
|
gem "net-ssh-gateway"
|
||||||
end
|
end
|
||||||
|
|
||||||
platform :mri_21 do
|
|
||||||
gem "rbnacl"
|
|
||||||
end
|
|
||||||
|
|
||||||
platform :mri_23 do
|
|
||||||
if RUBY_VERSION >= "2.3.0"
|
|
||||||
gem "openssl", "< 2.0.6" # force usage of openssl version we patch against
|
|
||||||
end
|
|
||||||
gem "msgpack", "<= 1.3.3"
|
|
||||||
end
|
|
||||||
|
|
||||||
platform :jruby do
|
platform :jruby do
|
||||||
gem "jruby-openssl" # , git: "https://github.com/jruby/jruby-openssl.git", branch: "master"
|
|
||||||
gem "ruby-debug"
|
gem "ruby-debug"
|
||||||
end
|
end
|
||||||
|
|
||||||
gem "aws-sdk-s3"
|
gem "aws-sdk-s3"
|
||||||
gem "faraday"
|
gem "faraday"
|
||||||
gem "idnx" if RUBY_VERSION >= "2.4.0"
|
gem "faraday-multipart"
|
||||||
gem "multipart-post", "< 2.2.0" if RUBY_VERSION < "2.3.0"
|
gem "idnx"
|
||||||
gem "oga"
|
gem "oga"
|
||||||
|
|
||||||
if RUBY_VERSION >= "3.0.0"
|
gem "webrick" if RUBY_VERSION >= "3.0.0"
|
||||||
gem "rbs"
|
# https://github.com/TwP/logging/issues/247
|
||||||
gem "rubocop"
|
gem "syslog" if RUBY_VERSION >= "3.3.0"
|
||||||
gem "rubocop-performance"
|
# https://github.com/ffi/ffi/issues/1103
|
||||||
gem "webrick"
|
# ruby 2.7 only, it seems
|
||||||
|
gem "ffi", "< 1.17.0" if Gem::VERSION < "3.3.22"
|
||||||
end
|
end
|
||||||
|
|
||||||
|
group :lint do
|
||||||
|
gem "rubocop", "~> 1.59.0"
|
||||||
|
gem "rubocop-md"
|
||||||
|
gem "rubocop-performance", "~> 1.19.0"
|
||||||
end
|
end
|
||||||
|
|
||||||
group :coverage do
|
group :coverage do
|
||||||
if RUBY_VERSION < "2.2.0"
|
|
||||||
gem "simplecov", "< 0.11.0"
|
|
||||||
elsif RUBY_VERSION < "2.3"
|
|
||||||
gem "simplecov", "< 0.11.0"
|
|
||||||
elsif RUBY_VERSION < "2.4"
|
|
||||||
gem "simplecov", "< 0.19.0"
|
|
||||||
elsif RUBY_VERSION < "2.5"
|
|
||||||
gem "simplecov", "< 0.21.0"
|
|
||||||
else
|
|
||||||
gem "simplecov"
|
gem "simplecov"
|
||||||
end
|
end
|
||||||
end
|
|
||||||
|
|
||||||
group :assorted do
|
group :assorted do
|
||||||
if RUBY_VERSION < "2.2.0"
|
|
||||||
gem "pry", "~> 0.12.2"
|
|
||||||
else
|
|
||||||
gem "pry"
|
gem "pry"
|
||||||
end
|
|
||||||
|
|
||||||
platform :mri do
|
platform :mri do
|
||||||
if RUBY_VERSION < "2.2.0"
|
|
||||||
gem "pry-byebug", "~> 3.4.3"
|
|
||||||
else
|
|
||||||
gem "debug" if RUBY_VERSION >= "3.1.0"
|
gem "debug" if RUBY_VERSION >= "3.1.0"
|
||||||
gem "pry-byebug"
|
gem "pry-byebug"
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
|
||||||
|
48
LICENSE.txt
48
LICENSE.txt
@ -189,51 +189,3 @@
|
|||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
* lib/httpx/domain_name.rb
|
|
||||||
|
|
||||||
This file is derived from the implementation of punycode available at
|
|
||||||
here:
|
|
||||||
|
|
||||||
https://www.verisign.com/en_US/channel-resources/domain-registry-products/idn-sdks/index.xhtml
|
|
||||||
|
|
||||||
Copyright (C) 2000-2002 Verisign Inc., All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or
|
|
||||||
without modification, are permitted provided that the following
|
|
||||||
conditions are met:
|
|
||||||
|
|
||||||
1) Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
2) Redistributions in binary form must reproduce the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer in
|
|
||||||
the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
|
|
||||||
3) Neither the name of the VeriSign Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived
|
|
||||||
from this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
|
||||||
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
|
||||||
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
||||||
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
|
||||||
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
|
||||||
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
||||||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
|
||||||
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
This software is licensed under the BSD open source license. For more
|
|
||||||
information visit www.opensource.org.
|
|
||||||
|
|
||||||
Authors:
|
|
||||||
John Colosi (VeriSign)
|
|
||||||
Srikanth Veeramachaneni (VeriSign)
|
|
||||||
Nagesh Chigurupati (Verisign)
|
|
||||||
Praveen Srinivasan(Verisign)
|
|
34
README.md
34
README.md
@ -19,7 +19,7 @@ And also:
|
|||||||
|
|
||||||
* Compression (gzip, deflate, brotli)
|
* Compression (gzip, deflate, brotli)
|
||||||
* Streaming Requests
|
* Streaming Requests
|
||||||
* Authentication (Basic Auth, Digest Auth, NTLM)
|
* Auth (Basic Auth, Digest Auth, NTLM)
|
||||||
* Expect 100-continue
|
* Expect 100-continue
|
||||||
* Multipart Requests
|
* Multipart Requests
|
||||||
* Advanced Cookie handling
|
* Advanced Cookie handling
|
||||||
@ -46,7 +46,7 @@ And that's the simplest one there is. But you can also do:
|
|||||||
HTTPX.post("http://example.com", form: { user: "john", password: "pass" })
|
HTTPX.post("http://example.com", form: { user: "john", password: "pass" })
|
||||||
|
|
||||||
http = HTTPX.with(headers: { "x-my-name" => "joe" })
|
http = HTTPX.with(headers: { "x-my-name" => "joe" })
|
||||||
http.patch(("http://example.com/file", body: File.open("path/to/file")) # request body is streamed
|
http.patch("http://example.com/file", body: File.open("path/to/file")) # request body is streamed
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to do some more things with the response, you can get an `HTTPX::Response`:
|
If you want to do some more things with the response, you can get an `HTTPX::Response`:
|
||||||
@ -61,7 +61,7 @@ puts body #=> #<HTTPX::Response ...
|
|||||||
You can also send as many requests as you want simultaneously:
|
You can also send as many requests as you want simultaneously:
|
||||||
|
|
||||||
```ruby
|
```ruby
|
||||||
page1, page2, page3 =`HTTPX.get("https://news.ycombinator.com/news", "https://news.ycombinator.com/news?p=2", "https://news.ycombinator.com/news?p=3")
|
page1, page2, page3 = HTTPX.get("https://news.ycombinator.com/news", "https://news.ycombinator.com/news?p=2", "https://news.ycombinator.com/news?p=3")
|
||||||
```
|
```
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
@ -108,12 +108,12 @@ HTTPX.get(
|
|||||||
```ruby
|
```ruby
|
||||||
response = HTTPX.get("https://www.google.com", params: { q: "me" })
|
response = HTTPX.get("https://www.google.com", params: { q: "me" })
|
||||||
response = HTTPX.post("https://www.nghttp2.org/httpbin/post", form: { name: "John", age: "22" })
|
response = HTTPX.post("https://www.nghttp2.org/httpbin/post", form: { name: "John", age: "22" })
|
||||||
response = HTTPX.plugin(:basic_authentication)
|
response = HTTPX.plugin(:basic_auth)
|
||||||
.basic_authentication("user", "pass")
|
.basic_auth("user", "pass")
|
||||||
.get("https://www.google.com")
|
.get("https://www.google.com")
|
||||||
|
|
||||||
# more complex client objects can be cached, and are thread-safe
|
# more complex client objects can be cached, and are thread-safe
|
||||||
http = HTTPX.plugin(:compression).plugin(:expect).with(headers: { "x-pvt-token" => "TOKEN"})
|
http = HTTPX.plugin(:expect).with(headers: { "x-pvt-token" => "TOKEN" })
|
||||||
http.get("https://example.com") # the above options will apply
|
http.get("https://example.com") # the above options will apply
|
||||||
http.post("https://example2.com", form: { name: "John", age: "22" }) # same, plus the form POST body
|
http.post("https://example2.com", form: { name: "John", age: "22" }) # same, plus the form POST body
|
||||||
```
|
```
|
||||||
@ -122,11 +122,11 @@ http.post("https://example2.com", form: {name: "John", age: "22"}) # same, plus
|
|||||||
|
|
||||||
It ships with most features published as a plugin, making vanilla `httpx` lightweight and dependency-free, while allowing you to "pay for what you use"
|
It ships with most features published as a plugin, making vanilla `httpx` lightweight and dependency-free, while allowing you to "pay for what you use"
|
||||||
|
|
||||||
The plugin system is similar to the ones used by [sequel](https://github.com/jeremyevans/sequel), [roda](https://github.com/jeremyevans/roda) or [shrine](https://github.com/janko-m/shrine).
|
The plugin system is similar to the ones used by [sequel](https://github.com/jeremyevans/sequel), [roda](https://github.com/jeremyevans/roda) or [shrine](https://github.com/shrinerb/shrine).
|
||||||
|
|
||||||
### Advanced DNS features
|
### Advanced DNS features
|
||||||
|
|
||||||
`HTTPX` ships with custom DNS resolver implementations, including a native Happy Eyeballs resolver immplementation, and a DNS-over-HTTPS resolver.
|
`HTTPX` ships with custom DNS resolver implementations, including a native Happy Eyeballs resolver implementation, and a DNS-over-HTTPS resolver.
|
||||||
|
|
||||||
## User-driven test suite
|
## User-driven test suite
|
||||||
|
|
||||||
@ -134,9 +134,9 @@ The test suite runs against [httpbin proxied over nghttp2](https://nghttp2.org/h
|
|||||||
|
|
||||||
## Supported Rubies
|
## Supported Rubies
|
||||||
|
|
||||||
All Rubies greater or equal to 2.1, and always latest JRuby and Truffleruby.
|
All Rubies greater or equal to 2.7, and always latest JRuby and Truffleruby.
|
||||||
|
|
||||||
**Note**: This gem is tested against all latest patch versions, i.e. if you're using 2.2.0 and you experience some issue, please test it against 2.2.10 (latest patch version of 2.2) before creating an issue.
|
**Note**: This gem is tested against all latest patch versions, i.e. if you're using 3.3.0 and you experience some issue, please test it against 3.3.$latest before creating an issue.
|
||||||
|
|
||||||
## Resources
|
## Resources
|
||||||
| | |
|
| | |
|
||||||
@ -149,24 +149,14 @@ All Rubies greater or equal to 2.1, and always latest JRuby and Truffleruby.
|
|||||||
|
|
||||||
## Caveats
|
## Caveats
|
||||||
|
|
||||||
### ALPN support
|
|
||||||
|
|
||||||
ALPN negotiation is required for "auto" HTTP/2 "https" requests. This is available in ruby since version 2.3 .
|
|
||||||
|
|
||||||
### Known bugs
|
|
||||||
|
|
||||||
* Doesn't work with ruby 2.4.0 for Windows (see [#36](https://gitlab.com/os85/httpx/issues/36)).
|
|
||||||
* Using `total_timeout` along with the `:persistent` plugin [does not work as you might expect](https://gitlab.com/os85/httpx/-/wikis/Timeouts#total_timeout).
|
|
||||||
|
|
||||||
## Versioning Policy
|
## Versioning Policy
|
||||||
|
|
||||||
Although 0.x software, `httpx` is considered API-stable and production-ready, i.e. current API or options may be subject to deprecation and emit log warnings, but can only effectively be removed in a major version change.
|
`httpx` follows Semantic Versioning.
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
* Discuss your contribution in an issue
|
* Discuss your contribution in an issue
|
||||||
* Fork it
|
* Fork it
|
||||||
* Make your changes, add some tests
|
* Make your changes, add some tests (follow the instructions from [here](test/README.md))
|
||||||
* Ensure all tests pass (`docker-compose -f docker-compose.yml -f docker-compose-ruby-{RUBY_VERSION}.yml run httpx bundle exec rake test`)
|
|
||||||
* Open a Merge Request (that's Pull Request in Github-ish)
|
* Open a Merge Request (that's Pull Request in Github-ish)
|
||||||
* Wait for feedback
|
* Wait for feedback
|
||||||
|
1
Rakefile
1
Rakefile
@ -100,6 +100,7 @@ task :prepare_website => %w[rdoc prepare_jekyll_data] do
|
|||||||
header = "---\n" \
|
header = "---\n" \
|
||||||
"layout: #{layout}\n" \
|
"layout: #{layout}\n" \
|
||||||
"title: #{title}\n" \
|
"title: #{title}\n" \
|
||||||
|
"project: httpx\n" \
|
||||||
"---\n\n"
|
"---\n\n"
|
||||||
File.write(path, header + data)
|
File.write(path, header + data)
|
||||||
end
|
end
|
||||||
|
5
doc/release_notes/0_23_1.md
Normal file
5
doc/release_notes/0_23_1.md
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# 0.23.1
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* fixed regression causing dns candidate names not being tried after first one fails.
|
5
doc/release_notes/0_23_2.md
Normal file
5
doc/release_notes/0_23_2.md
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# 0.23.2
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* fix missing variable on code path in the native resolver.
|
6
doc/release_notes/0_23_3.md
Normal file
6
doc/release_notes/0_23_3.md
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# 0.23.3
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* native resolver: fix missing exception variable in the DNS error code path.
|
||||||
|
* native resolver: fixed short DNS packet handling when using TCP.
|
5
doc/release_notes/0_23_4.md
Normal file
5
doc/release_notes/0_23_4.md
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# 0.23.4
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* fix `Response::Body#read` which rewinds on every call.
|
48
doc/release_notes/0_24_0.md
Normal file
48
doc/release_notes/0_24_0.md
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
# 0.24.0
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
### `:oauth` plugin
|
||||||
|
|
||||||
|
The `:oauth` plugin manages the handling of a given OAuth session, in that it ships with convenience methods to generate a new access token, which it then injects in all requests.
|
||||||
|
|
||||||
|
More info under https://honeyryderchuck.gitlab.io/httpx/wiki/OAuth
|
||||||
|
|
||||||
|
### session callbacks
|
||||||
|
|
||||||
|
HTTP request/response lifecycle events have now the ability of being intercepted via public API callback methods:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
HTTPX.on_request_completed do |request|
|
||||||
|
puts "request to #{request.uri} sent"
|
||||||
|
end.get(...)
|
||||||
|
```
|
||||||
|
|
||||||
|
More info under https://honeyryderchuck.gitlab.io/httpx/wiki/Events to know which events and callback methods are supported.
|
||||||
|
|
||||||
|
### `:circuit_breaker` plugin `on_circuit_open` callback
|
||||||
|
|
||||||
|
A callback has been introduced for the `:circuit_breaker` plugin, which is triggered when a circuit is opened.
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
http = HTTPX.plugin(:circuit_breaker).on_circuit_open do |req|
|
||||||
|
puts "circuit opened for #{req.uri}"
|
||||||
|
end
|
||||||
|
http.get(...)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
Several `:response_cache` features have been improved:
|
||||||
|
|
||||||
|
* `:response_cache` plugin: response cache store has been made thread-safe.
|
||||||
|
* cached response sharing across threads is made safer, as stringio/tempfile instances are copied instead of shared (without copying the underling string/file).
|
||||||
|
* stale cached responses are eliminate on cache store lookup/store operations.
|
||||||
|
* already closed responses are evicted from the cache store.
|
||||||
|
* fallback for lack of compatible response "date" header has been fixed to return a `Time` object.
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* Ability to recover from errors happening during response chunk processing (required for overriding behaviour and response chunk callbacks); error bubbling up will result in the connection being closed.
|
||||||
|
* Happy eyeballs support for multi-homed early-resolved domain names (such as `localhost` under `/etc/hosts`) was broken, as it would try the first given IP; so, if given `::1` and connection would fail, it wouldn't try `127.0.0.1`, which would have succeeded.
|
||||||
|
* `:digest_authentication` plugin was removing the "algorithm" header on `-sess` declared algorithms, which is required for HTTP digest auth negotiation.
|
12
doc/release_notes/0_24_1.md
Normal file
12
doc/release_notes/0_24_1.md
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# 0.24.1
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
* datadog adapter: support `:service_name` configuration option.
|
||||||
|
* datadog adapter: set `:distributed_tracing` to `true` by default.
|
||||||
|
* `:proxy` plugin: when the proxy uri uses an unsupported scheme (i.e.: "scp://125.24.2.1"), a more user friendly error is raised (instead of the previous broken stacktrace).
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* datadog adapter: fix tracing enable call, which was wrongly calling `super`.
|
||||||
|
+ `:proxy` plugin: fix for bug which was turning off plugins overriding `HTTPX::Connection#send` (such as the datadog adapter).
|
12
doc/release_notes/0_24_2.md
Normal file
12
doc/release_notes/0_24_2.md
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# 0.24.2
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
* besides an array, `:resolver_options` can now receive a hash for `:nameserver`, which **must** be indexed by IP family (`Socket::AF_INET6` or `Socket::AF_INET`); each group of nameservers will be used for emitting DNS queries of that iP family.
|
||||||
|
* `:authentication` plugin: Added `#bearer_auth` helper, which receives a token, and sets it as `"Bearer $TOKEN` in the `"authorization"` header.
|
||||||
|
* `faraday` adapter: now implements `#build_connection` and `#close`, will now interact with `faraday` native timeouts (`:read`, `:write` and `:connect`).
|
||||||
|
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* fixed native resolver bug when queries involving intermediate alias would be kept after the original query and mess with re-queries.
|
12
doc/release_notes/0_24_3.md
Normal file
12
doc/release_notes/0_24_3.md
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# 0.24.3
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
* faraday adapter: reraise httpx timeout errors as faraday errors.
|
||||||
|
* faraday adapter: support `:bind` option, which expects a host and port to connect to.
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* faraday adapter: fix `#close` implementation using the wrong ivar.
|
||||||
|
* faraday adapter: fix usage of `requestt_timeout` translation of faraday timeouts into httpx timeouts.
|
||||||
|
* faraday adapter: `ssl: { verify: false }` was being ignored, and certification verification was still proceeding.
|
18
doc/release_notes/0_24_4.md
Normal file
18
doc/release_notes/0_24_4.md
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# 0.24.4
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
* `digest_authentication` plugin now supports passing HA1hashed with password HA1s (common to store in htdigest files for example) when setting the`:hashed` kwarg to `true` in the `.digest_auth` call.
|
||||||
|
* ex: `http.digest_auth(user, get_hashed_passwd_from_htdigest(user), hashed: true)`
|
||||||
|
* TLS session resumption is now supported
|
||||||
|
* whenever possible, `httpx` sessions will recycle used connections so that, in the case of TLS connections, the first session will keep being reusedd, thereby diminishing the overhead of subsequent TLS handshakes on the same host.
|
||||||
|
* TLS sessions are only reused in the scope of the same `httpx` session, unless the `:persistent` plugin is used, in which case, the persisted `httpx` session will always try to resume TLS sessions.
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* When explicitly using IP addresses in the URL host, TLS handshake will now verify tif he IP address is included in the certificate.
|
||||||
|
* IP address will keep not be used for SNI, as per RFC 6066, section 3.
|
||||||
|
* ex: `http.get("https://10.12.0.12/get")`
|
||||||
|
* if you want the prior behavior, set `HTTPX.with(ssl: {verify_hostname: false})`
|
||||||
|
* Turn TLS hostname verification on for `jruby` (it's turned off by default).
|
||||||
|
* if you want the prior behavior, set `HTTPX.with(ssl: {verify_hostname: false})`
|
6
doc/release_notes/0_24_5.md
Normal file
6
doc/release_notes/0_24_5.md
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# 0.24.5
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* fix for SSL handshake post connection SAN check using IPv6 address.
|
||||||
|
* fix bug in DoH impl when the request returned no answer.
|
5
doc/release_notes/0_24_6.md
Normal file
5
doc/release_notes/0_24_6.md
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# 0.24.6
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* fix Session class assertions not prepared for class overrides, which could break some plugins which override the Session class on load (such as `datadog` or `webmock` adapters).
|
10
doc/release_notes/0_24_7.md
Normal file
10
doc/release_notes/0_24_7.md
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# 0.24.6
|
||||||
|
|
||||||
|
## dependencies
|
||||||
|
|
||||||
|
`http-2-next` last supported version for the 0.x series is the last version before v1. This shoul ensure that older versions of `httpx` won't be affected by any of the recent breaking changes.
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* `grpc`: setup of rpc calls from camel-cased symbols has been fixed. As an improvement, the GRPC-enabled session will now support both snake-cased, as well as camel-cased calls.
|
||||||
|
* `datadog` adapter has now been patched to support the most recent breaking changes of `ddtrace` configuration DSL (`env_to_bool` is no longer supported).
|
60
doc/release_notes/1_0_0.md
Normal file
60
doc/release_notes/1_0_0.md
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
# 1.0.0
|
||||||
|
|
||||||
|
## Breaking changes
|
||||||
|
|
||||||
|
* the minimum supported ruby version is 2.7.0 .
|
||||||
|
* The fallback support for IDNA 2003 has been removed. If you require this feature, install the [idnx gem](https://github.com/HoneyryderChuck/idnx), which `httpx` automatically integrates with when available (and supports IDNA 2008).
|
||||||
|
* `:total_timeout` option has been removed (no session-wide timeout supported, use `:request_timeout`).
|
||||||
|
* `:read_timeout` and `:write_timeout` are now set to 60 seconds by default, and preferred over `:operation_timeout`;
|
||||||
|
* the exception being in the `:stream` plugin, as the response is theoretically endless (so `:read_timeout` is unset).
|
||||||
|
* The `:multipart` plugin is removed, as its functionality and API are now loaded by default (no API changes).
|
||||||
|
* The `:compression` plugin is removed, as its functionality and API are now loaded by default (no API changes).
|
||||||
|
* `:compression_threshold_size` was removed (formats in `"content-encoding"` request header will always encode the request body).
|
||||||
|
* the new `:compress_request_body` and `:decompress_response_body` can be set to `false` to (respectively) disable compression of passed input body, or decompression of the response body.
|
||||||
|
* `:retries` plugin: the `:retry_on` condition will **not** replace default retriable error checks, it will now instead be triggered **only if** no retryable error has been found.
|
||||||
|
|
||||||
|
### plugins
|
||||||
|
|
||||||
|
* `:authentication` plugin becomes `:auth`.
|
||||||
|
* `.authentication` helper becomes `.authorization`.
|
||||||
|
* `:basic_authentication` plugin becomes `:basic_auth`.
|
||||||
|
* `:basic_authentication` helper is removed.
|
||||||
|
* `:digest_authentication` plugin becomes `:digest_auth`.
|
||||||
|
* `:digest_authentication` helper is removed.
|
||||||
|
* `:ntlm_authentication` plugin becomes `:ntlm_auth`.
|
||||||
|
* `:ntlm_authentication` helper is removed.
|
||||||
|
* OAuth plugin: `:oauth_authentication` helper is rename to `:oauth_auth`.
|
||||||
|
* `:compression/brotli` plugin becomes `:brotli`.
|
||||||
|
|
||||||
|
### Support removed for deprecated APIs
|
||||||
|
|
||||||
|
* The deprecated `HTTPX::Client` constant lookup has been removed (use `HTTPX::Session` instead).
|
||||||
|
* The deprecated `HTTPX.timeout({...})` function has been removed (use `HTTPX.with(timeout: {...})` instead).
|
||||||
|
* The deprecated `HTTPX.headers({...})` function has been removed (use `HTTPX.with(headers: {...})` instead).
|
||||||
|
* The deprecated `HTTPX.plugins(...)` function has been removed (use `HTTPX.plugin(...).plugin(...)...` instead).
|
||||||
|
* The deprecated `:transport_options` option, which was only valid for UNIX connections, has been removed (use `:addresses` instead).
|
||||||
|
* The deprecated `def_option(...)` function, previously used to define additional options in plugins, has been removed (use `def option_$new_option)` instead).
|
||||||
|
* The deprecated `:loop_timeout` timeout option has been removed.
|
||||||
|
* `:stream` plugin: the deprecated `HTTPX::InstanceMethods::StreamResponse` has been removed (use `HTTPX::StreamResponse` instead).
|
||||||
|
* The deprecated usage of symbols to indicate HTTP verbs (i.e. `HTTPX.request(:get, ...)` or `HTTPX.build_request(:get, ...)`) is not supported anymore (use the upcase string always, i.e. `HTTPX.request("GET", ...)` or `HTTPX.build_request("GET", ...)`, instead).
|
||||||
|
* The deprecated `HTTPX::ErrorResponse#status` method has been removed (use `HTTPX::ErrorResponse#error` instead).
|
||||||
|
|
||||||
|
### dependencies
|
||||||
|
|
||||||
|
* `http-2-next` minimum supported version is 1.0.0.
|
||||||
|
* `:datadog` adapter only supports `ddtrace` gem 1.x or higher.
|
||||||
|
* `:faraday` adapter only supports `faraday` gem 1.x or higher.
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
* `circuit_breaker`: the drip rate of real request during the "half-open" stage of a circuit will reliably distribute real requests (as per the drip rate) over the `max_attempts`, before the circuit is closed.
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* Tempfiles are now correctly identified as file inputs for multipart requests.
|
||||||
|
* fixed `proxy` plugin behaviour when loaded with the `follow_redirects` plugin and processing a 305 response (request needs to be retried on a different proxy).
|
||||||
|
|
||||||
|
## Chore
|
||||||
|
|
||||||
|
* `:grpc` plugin: connection won't buffer requests before HTTP/2 handshake is commpleted, i.e. works the same as plain `httpx` HTTP/2 connection establishment.
|
||||||
|
* if you are relying on this, you can keep the old behavior this way: `HTTPX.plugin(:grpc, http2_settings: { wait_for_handshake: false })`.
|
5
doc/release_notes/1_0_1.md
Normal file
5
doc/release_notes/1_0_1.md
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# 1.0.1
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* do not try to inflate empty chunks (it triggered an error during response decoding).
|
7
doc/release_notes/1_0_2.md
Normal file
7
doc/release_notes/1_0_2.md
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
# 1.0.2
|
||||||
|
|
||||||
|
## bugfixes
|
||||||
|
|
||||||
|
* bump `http-2-next` to 1.0.1, which fixes a bug where http/2 connection interprets MAX_CONCURRENT_STREAMS as request cap.
|
||||||
|
* `grpc`: setup of rpc calls from camel-cased symbols has been fixed. As an improvement, the GRPC-enabled session will now support both snake-cased, as well as camel-cased calls.
|
||||||
|
* `datadog` adapter has now been patched to support the most recent breaking changes of `ddtrace` configuration DSL (`env_to_bool` is no longer supported).
|
32
doc/release_notes/1_1_0.md
Normal file
32
doc/release_notes/1_1_0.md
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
# 1.1.0
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
A function, `#peer_address`, was added to the response object, which returns the IP (either a string or an `IPAddr` object) from the socket used to get the response from.
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
response = HTTPX.get("https://example.com")
|
||||||
|
response.peer_address #=> #<IPAddr: IPv4:93.184.216.34/255.255.255.255>
|
||||||
|
```
|
||||||
|
|
||||||
|
error responses will also expose an IP address via `#peer_address` as long a connection happened before the error.
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
* A performance regression involving the new default timeouts has been fixed, which could cause significant overhead in "multiple requests in sequence" scenarios, and was clearly visible in benchmarks.
|
||||||
|
* this regression will still be seen in jruby due to a bug, which fix will be released in jruby 9.4.5.0.
|
||||||
|
* HTTP/1.1 connections are now set to handle as many requests as they can by default (instead of the past default of max 200, at which point they'd be recycled).
|
||||||
|
* tolerate the inexistence of `openssl` in the installed ruby, like `net-http` does.
|
||||||
|
* `on_connection_opened` and `on_connection_closed` will yield the `OpenSSL::SSL::SSLSocket` instance for `https` backed origins (instead of always the `Socket` instance).
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* when using the `:native` resolver (default option), a default of 1 for ndots is set, for systems which do not set one.
|
||||||
|
* replaced usage of `Float::INFINITY` with `nil` for timeout defaults, as the former can't be used in IO wait functions.
|
||||||
|
* `faraday` adapter timeout setup now maps to `:read_timeout` and `:write_timeout` options from `httpx`.
|
||||||
|
* fixed HTTP/1.1 connection recycling on number of max requests exhausted.
|
||||||
|
* `response.json` will now work when "content-type" header is set to "application/hal+json".
|
||||||
|
|
||||||
|
## Chore
|
||||||
|
|
||||||
|
* when using the `:cookies` plugin, a warning message to install the idnx message will only be emitted if the cookie domain is an IDN (this message was being shown all the time since v1 release).
|
17
doc/release_notes/1_1_1.md
Normal file
17
doc/release_notes/1_1_1.md
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# 1.1.1
|
||||||
|
|
||||||
|
## improvements
|
||||||
|
|
||||||
|
* (Re-)enabling default retries in DNS name queries; this had been disabled as a result of revamping timeouts, and resulted in queries only being sent once, which is very little for UDP-related traffic, and breaks if using DNs rate-limiting software. Retries the query just once, for now.
|
||||||
|
|
||||||
|
## bugfixes
|
||||||
|
|
||||||
|
* reset timers when adding new intervals, as these may be added as a result on after-select connection handling, and must wait for the next tick cycle (before the patch, they were triggering too soon).
|
||||||
|
* fixed "on close" callback leak on connection reuse, which caused linear performance regression in benchmarks performing one request per connection.
|
||||||
|
* fixed hanging connection when an HTTP/1.1 emitted a "connection: close" header but the server would not emit one (it closes the connection now).
|
||||||
|
* fixed recursive dns cached lookups which may have already expired, and created nil entries in the returned address list.
|
||||||
|
* dns system resolver is now able to retry on failure.
|
||||||
|
|
||||||
|
## chore
|
||||||
|
|
||||||
|
* remove duplicated callback unregitering connections.
|
12
doc/release_notes/1_1_2.md
Normal file
12
doc/release_notes/1_1_2.md
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# 1.1.2
|
||||||
|
|
||||||
|
## improvements
|
||||||
|
|
||||||
|
* only moving eden connections to idle when they're recycled.
|
||||||
|
|
||||||
|
## bugfixes
|
||||||
|
|
||||||
|
* skip closing a connection which is already closed during reset.
|
||||||
|
* sentry adapter: fixed `super` call which didn't have a super method (this prevented usinng sentry-enabled sessions with the `:retries` plugin).
|
||||||
|
* sentry adapter: fixing registering of sentry config.
|
||||||
|
* sentry adapter: do not propagate traces when relevant sdk options are disabled (such as `propagate_traces`).
|
18
doc/release_notes/1_1_3.md
Normal file
18
doc/release_notes/1_1_3.md
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# 1.1.3
|
||||||
|
|
||||||
|
## improvements
|
||||||
|
|
||||||
|
## security
|
||||||
|
|
||||||
|
* when using `:follow_redirects` plugin, the "authorization" header will be removed when following redirect responses to a different origin.
|
||||||
|
|
||||||
|
## bugfixes
|
||||||
|
|
||||||
|
* fixed `:stream` plugin not following redirect responses when used with the `:follow_redirects` plugin.
|
||||||
|
* fixed `:stream` plugin not doing content decoding when responses were p.ex. gzip-compressed.
|
||||||
|
* fixed bug preventing usage of IPv6 loopback or link-local addresses in the request URL in systems with no IPv6 internet connectivity (the request was left hanging).
|
||||||
|
* protect all code which may initiate a new connection from abrupt errors (such as internet turned off), as it was done on the initial request call.
|
||||||
|
|
||||||
|
## chore
|
||||||
|
|
||||||
|
internal usage of `mutex_m` has been removed (`mutex_m` is going to be deprecated in ruby 3.3).
|
6
doc/release_notes/1_1_4.md
Normal file
6
doc/release_notes/1_1_4.md
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# 1.1.4
|
||||||
|
|
||||||
|
## bugfixes
|
||||||
|
|
||||||
|
* datadog adapter: use `Gem::Version` to invoke the correct configuration API.
|
||||||
|
* stream plugin: do not preempt request enqueuing (this was making integration with the `:follow_redirects` plugin fail when set up with `webmock`).
|
12
doc/release_notes/1_1_5.md
Normal file
12
doc/release_notes/1_1_5.md
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# 1.1.5
|
||||||
|
|
||||||
|
## improvements
|
||||||
|
|
||||||
|
* pattern matching support for responses has been backported to ruby 2.7 as well.
|
||||||
|
|
||||||
|
## bugfixes
|
||||||
|
|
||||||
|
* `stream` plugin: fix for `HTTPX::StreamResponse#each_line` not yielding the last line of the payload when not delimiter-terminated.
|
||||||
|
* `stream` plugin: fix `webmock` adapter integration when methods calls would happen in the `HTTPX::StreamResponse#each` block.
|
||||||
|
* `stream` plugin: fix `:follow_redirects` plugin integration which was caching the redirect response and using it for method calls inside the `HTTPX::StreamResponse#each` block.
|
||||||
|
* "103 early hints" responses will be ignored when processing the response (it was causing the response returned by sesssions to hold its headers, instead of the following 200 response, while keeping the 200 response body).
|
49
doc/release_notes/1_2_0.md
Normal file
49
doc/release_notes/1_2_0.md
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
# 1.2.0
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
### `:ssrf_filter` plugin
|
||||||
|
|
||||||
|
The `:ssrf_filter` plugin prevents server-side request forgery attacks, by blocking requests to the internal network. This is useful when the URLs used to perform requests aren’t under the developer control (such as when they are inserted via a web application form).
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
http = HTTPX.plugin(:ssrf_filter)
|
||||||
|
|
||||||
|
# this works
|
||||||
|
response = http.get("https://example.com")
|
||||||
|
|
||||||
|
# this doesn't
|
||||||
|
response = http.get("http://localhost:3002")
|
||||||
|
response = http.get("http://[::1]:3002")
|
||||||
|
response = http.get("http://169.254.169.254/latest/meta-data/")
|
||||||
|
```
|
||||||
|
|
||||||
|
More info under https://honeyryderchuck.gitlab.io/httpx/wiki/SSRF-Filter
|
||||||
|
|
||||||
|
### `:callbacks` plugin
|
||||||
|
|
||||||
|
The session callbacks introduced in v0.24.0 are in its own plugin. Older code will still work and emit a deprecation warning.
|
||||||
|
|
||||||
|
More info under https://honeyryderchuck.gitlab.io/httpx/wiki/Callbacks
|
||||||
|
|
||||||
|
### `:redirect_on` option for `:follow_redirects` plugin
|
||||||
|
|
||||||
|
This option allows passing a callback which, when returning `false`, can interrupt the redirect loop.
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
http = HTTPX.plugin(:follow_redirects).with(redirect_on: ->(location_uri) { BLACKLIST_HOSTS.include?(location_uri.host) })
|
||||||
|
```
|
||||||
|
|
||||||
|
### `:close_on_handshake_timeout` timeout
|
||||||
|
|
||||||
|
A new `:timeout` option, `:close_handshake_timeout`, is added, which monitors connection readiness when performing HTTP/2 connection termination handshake.
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
* Internal "eden connections" concept was removed, and connection objects are now kept-and-reused during the lifetime of a session, even when closed. This simplified connectio pool implementation and improved performance.
|
||||||
|
* request using `:proxy` and `:retries` plugin enabled sessions will now retry on proxy connection establishment related errors.
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* webmock adapter: mocked responses storing decoded payloads won't try to decode them again (fixes vcr/webmock integrations).
|
||||||
|
* webmock adapter: fix issue related with making real requests over webmock-enabled connection.
|
6
doc/release_notes/1_2_1.md
Normal file
6
doc/release_notes/1_2_1.md
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# 1.2.1
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* DoH resolver: try resolving other candidates on "domain not found" error (same behaviour as with native resolver).
|
||||||
|
* Allow HTTP/2 connections to exit cleanly when TLS session gets corrupted and termination handshake can't be performed.
|
10
doc/release_notes/1_2_2.md
Normal file
10
doc/release_notes/1_2_2.md
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# 1.2.2
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* only raise "unknown option" error when option is not supported, not anymore when error happens in the setup of a support option.
|
||||||
|
* usage of `HTTPX::Session#wrap` within a thread with other sessions using the `:persistent` plugin won't inadvertedly terminate its open connections.
|
||||||
|
* terminate connections on `IOError` (`SocketError` does not cover them).
|
||||||
|
* terminate connections on HTTP/2 protocol and handshake errors, which happen during establishment or termination of a HTTP/2 connection (they were being previously kept around, although they'd irrecoverable).
|
||||||
|
* `:oauth` plugin: fixing check preventing the OAuth metadata server integration path to be exercised.
|
||||||
|
* fix instantiation of the options headers object with the wrong headers class.
|
16
doc/release_notes/1_2_3.md
Normal file
16
doc/release_notes/1_2_3.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# 1.2.3
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
* `:retries` plugin: allow `:max_retries` set to 0 (allows for a soft disable of retries when using the faraday adapter).
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* `:oauth` plugin: fix for default auth method being ignored when setting grant type and scope as options only.
|
||||||
|
* ensure happy eyeballs-initiated cloned connections also set session callbacks (caused issues when server would respond with a 421 response, an event requiring a valid internal callback).
|
||||||
|
* native resolver cleanly transitions from tcp to udp after truncated DNS query (causing issues on follow-up CNAME resolution).
|
||||||
|
* elapsing timeouts now guard against mutation of callbacks while looping (prevents skipping callbacks in situations where a previous one would remove itself from the collection).
|
||||||
|
|
||||||
|
## Chore
|
||||||
|
|
||||||
|
* datadog adapter: do not call `.lazy` on options (avoids deprecation warning, to be removed in ddtrace 2.0)
|
8
doc/release_notes/1_2_4.md
Normal file
8
doc/release_notes/1_2_4.md
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# 1.2.4
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* fixed issue related to inability to buffer payload to error responses (which may happen on certain error handling situations).
|
||||||
|
* fixed recovery from a lost persistent connection leaving process due to ping being sent while still marked as inactive.
|
||||||
|
* fixed datadog integration, which was not generating new spans on retried requests (when `:retries` plugin is enabled).
|
||||||
|
* fixed splitting strings into key value pairs in cases where the value would contain a "=", such as in certain base64 payloads.
|
7
doc/release_notes/1_2_5.md
Normal file
7
doc/release_notes/1_2_5.md
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
# 1.2.5
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* fix for usage of correct `last-modified` header in `response_cache` plugin.
|
||||||
|
* fix usage of decoding helper methods (i.e. `response.json`) with `response_cache` plugin.
|
||||||
|
* `stream` plugin: reverted back to yielding buffered payloads for streamed responses (broke `down` integration)
|
13
doc/release_notes/1_2_6.md
Normal file
13
doc/release_notes/1_2_6.md
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# 1.2.6
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
* `native` resolver: when timing out on DNS query for an alias, retry the DNS query for the alias (instead of the original hostname).
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* `faraday` adapter: set `env` options on the request object, so they are available in the request object when yielded.
|
||||||
|
* `follow_redirects` plugin: remove body-related headers (`content-length`, `content-type`) on POST-to-GET redirects.
|
||||||
|
* `follow_redirects` plugin: maintain verb (and body) of original request when the response status code is 307.
|
||||||
|
* `native` resolver: when timing out on TCP-based name resolution, downgrade to UDP before retrying.
|
||||||
|
* `rate_limiter` plugin: do not try fetching the retry-after of error responses.
|
18
doc/release_notes/1_3_0.md
Normal file
18
doc/release_notes/1_3_0.md
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# 1.3.0
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
`http-2` v1.0.0 is replacing `http-2-next` as the HTTP/2 parser.
|
||||||
|
|
||||||
|
`http-2-next` was forked from `http-2` 5 years ago; its improvements have been merged back to `http-2` recently though, so `http-2-next` willl therefore no longer be maintained.
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
Request-specific options (`:params`, `:form`, `:json` and `:xml`) are now separately kept by the request, which allows them to share `HTTPX::Options`, and reduce the number of copying / allocations.
|
||||||
|
|
||||||
|
This means that `HTTPX::Options` will throw an error if you initialize an object which such keys; this should not happen, as this class is considered internal and you should not be using it directly.
|
||||||
|
|
||||||
|
## Fixes
|
||||||
|
|
||||||
|
* support for the `datadog` gem v2.0.0 in its adapter has been unblocked, now that the gem has been released.
|
||||||
|
* loading the `:cookies` plugin was making the `Session#build_request` private.
|
17
doc/release_notes/1_3_1.md
Normal file
17
doc/release_notes/1_3_1.md
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# 1.3.1
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
* `:request_timeout` will be applied to all HTTP interactions until the final responses returned to the caller. That includes:
|
||||||
|
* all redirect requests/responses (when using the `:follow_redirects` plugin)
|
||||||
|
* all retried requests/responses (when using the `:retries` plugin)
|
||||||
|
* intermediate requests (such as "100-continue")
|
||||||
|
* faraday adapter: allow further plugins of internal session (ex: `builder.adapter(:httpx) { |sess| sess.plugin(:follow_redirects) }...`)
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* fix connection leak on proxy auth failed (407) handling
|
||||||
|
* fix busy loop on deferred requests for the duration interval
|
||||||
|
* do not further enqueue deferred requests if they have terminated meanwhile.
|
||||||
|
* fix busy loop caused by coalescing connections when one of them is on the DNS resolution phase still.
|
||||||
|
* faraday adapter: on parallel mode, skip calling `on_complete` when not defined.
|
6
doc/release_notes/1_3_2.md
Normal file
6
doc/release_notes/1_3_2.md
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# 1.3.2
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* Prevent `NoMethodError` in an edge case when the `:proxy` plugin is autoloaded via env vars and webmock adapter are used in tandem, and a real request fails.
|
||||||
|
* raise invalid uri error if passed request uri does not contain the host part (ex: `"https:/get"`)
|
5
doc/release_notes/1_3_3.md
Normal file
5
doc/release_notes/1_3_3.md
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# 1.3.3
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* fixing a regression introduced in 1.3.2 associated with the webmock adapter, which expects matchable request bodies to be strings
|
6
doc/release_notes/1_3_4.md
Normal file
6
doc/release_notes/1_3_4.md
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# 1.3.4
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* webmock adapter: fix tempfile usage in multipart requests.
|
||||||
|
* fix: fallback to binary encoding when parsing incoming invalid charset in HTTP "content-type" header.
|
43
doc/release_notes/1_4_0.md
Normal file
43
doc/release_notes/1_4_0.md
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# 1.4.0
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
### `:content_digest` plugin
|
||||||
|
|
||||||
|
The `:content_digest` can be used to calculate the digest of request payloads and set them in the `"content-digest"` header; it can also validate the integrity of responses which declare the same `"content-digest"` header.
|
||||||
|
|
||||||
|
More info under https://honeyryderchuck.gitlab.io/httpx/wiki/Content-Digest
|
||||||
|
|
||||||
|
## Per-session connection pools
|
||||||
|
|
||||||
|
This architectural changes moves away from per-thread shared connection pools, and into per-session (also thread-safe) connection pools. Unlike before, this enables connections from a session to be reused across threads, as well as limiting the number of connections that can be open on a given origin peer. This fixes long-standing issues, such as reusing connections under a fiber scheduler loop (such as the one from the gem `async`).
|
||||||
|
|
||||||
|
A new `:pool_options` option is introduced, which can be passed an hash with the following sub-options:
|
||||||
|
|
||||||
|
* `:max_connections_per_origin`: maximum number of connections a pool allows (unbounded by default, for backwards compatibility).
|
||||||
|
* `:pool_timeout`: the number of seconds a session will wait for a connection to be checked out (default: 5)
|
||||||
|
|
||||||
|
More info under https://honeyryderchuck.gitlab.io/httpx/wiki/Connection-Pools
|
||||||
|
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
* `:aws_sigv4` plugin: improved digest calculation on compressed request bodies by buffering content to a tempfile.
|
||||||
|
* `HTTPX::Response#json` will parse payload from extended json MIME types (like `application/ld+json`, `application/hal+json`, ...).
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* `:aws_sigv4` plugin: do not try to rewind a request body which yields chunks.
|
||||||
|
* fixed request encoding when `:json` param is passed, and the `oj` gem is used (by using the `:compat` flag).
|
||||||
|
* native resolver: on message truncation, bubble up tcp handshake errors as resolve errors.
|
||||||
|
* allow `HTTPX::Response#json` to accept extended JSON mime types (such as responses with `content-type: application/ld+json`)
|
||||||
|
|
||||||
|
## Chore
|
||||||
|
|
||||||
|
* default options are now fully frozen (in case anyone relies on overriding them).
|
||||||
|
|
||||||
|
### `:xml` plugin
|
||||||
|
|
||||||
|
XML encoding/decoding (via `:xml` request param, and `HTTPX::Response#xml`) is now available via the `:xml` plugin.
|
||||||
|
|
||||||
|
Using `HTTPX::Response#xml` without the plugin will issue a deprecation warning.
|
19
doc/release_notes/1_4_1.md
Normal file
19
doc/release_notes/1_4_1.md
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# 1.4.1
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* several `datadog` integration bugfixes
|
||||||
|
* only load the `datadog` integration when the `datadog` sdk is loaded (and not other gems that may define the `Datadog` module, like `dogstatsd`)
|
||||||
|
* do not trace if datadog integration is loaded but disabled
|
||||||
|
* distributed headers are now sent along (when the configuration is enabled, which it is by default)
|
||||||
|
* fix for handling multiple `GOAWAY` frames coming from the server (node.js servers seem to send multiple frames on connection timeout)
|
||||||
|
* fix regression for when a url is used with `httpx` which is not `http://` or `https://` (should raise `HTTPX::UnsupportedSchemaError`)
|
||||||
|
* worked around `IO.copy_stream` which was emitting incorrect bytes for HTTP/2 requests which bodies larger than the maximum supported frame size.
|
||||||
|
* multipart requests: make sure that a body declared as `Pathname` is opened for reading in binary mode.
|
||||||
|
* `webmock` integration: ensure that request events are emitted (such as plugins and integrations relying in it, such as `datadog` and the OTel integration)
|
||||||
|
* native resolver: do not propagate successful name resolutions for connections which were already closed.
|
||||||
|
* native resolver: fixed name resolution stalling, in a multi-request to multi-origin scenario, when a resolution timeout would happen.
|
||||||
|
|
||||||
|
## Chore
|
||||||
|
|
||||||
|
* refactor of the happy eyeballs and connection coalescing logic to not rely on callbacks, and instead on instance variable management (makes code more straightforward to read).
|
20
doc/release_notes/1_4_2.md
Normal file
20
doc/release_notes/1_4_2.md
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
# 1.4.2
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* faraday: use default reason when none is matched by Net::HTTP::STATUS_CODES
|
||||||
|
* native resolver: keep sending DNS queries if the socket is available, to avoid busy loops on select
|
||||||
|
* native resolver fixes for Happy Eyeballs v2
|
||||||
|
* do not apply resolution delay if the IPv4 IP was not resolved via DNS
|
||||||
|
* ignore ALIAS if DNS response carries IP answers
|
||||||
|
* do not try to query for names already awaiting answer from the resolver
|
||||||
|
* make sure all types of errors are propagated to connections
|
||||||
|
* make sure next candidate is picked up if receiving NX_DOMAIN_NOT_FOUND error from resolver
|
||||||
|
* raise error happening before any request is flushed to respective connections (avoids loop on non-actionable selector termination).
|
||||||
|
* fix "NoMethodError: undefined method `after' for nil:NilClass", happening for requests flushed into persistent connections which errored, and were retried in a different connection before triggering the timeout callbacks from the previously-closed connection.
|
||||||
|
|
||||||
|
|
||||||
|
## Chore
|
||||||
|
|
||||||
|
* Refactor of timers to allow for explicit and more performant single timer interval cancellation.
|
||||||
|
* default log message restructured to include info about process, thread and caller.
|
11
doc/release_notes/1_4_3.md
Normal file
11
doc/release_notes/1_4_3.md
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# 1.4.3
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* `webmock` adapter: reassign headers to signature after callbacks are called (these may change the headers before virtual send).
|
||||||
|
* do not close request (and its body) right after sending, instead only on response close
|
||||||
|
* prevents retries from failing under the `:retries` plugin
|
||||||
|
* fixes issue when using `faraday-multipart` request bodies
|
||||||
|
* retry request with HTTP/1 when receiving an HTTP/2 GOAWAY frame with `HTTP_1_1_REQUIRED` error code.
|
||||||
|
* fix wrong method call on HTTP/2 PING frame with unrecognized code.
|
||||||
|
* fix EOFError issues on connection termination for long running connections which may have already been terminated by peer and were wrongly trying to complete the HTTP/2 termination handshake.
|
14
doc/release_notes/1_4_4.md
Normal file
14
doc/release_notes/1_4_4.md
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# 1.4.4
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
* `:stream` plugin: response will now be partially buffered in order to i.e. inspect response status or headers on the response body without buffering the full response
|
||||||
|
* this fixes an issue in the `down` gem integration when used with the `:max_size` option.
|
||||||
|
* do not unnecessarily probe for connection liveness if no more requests are inflight, including failed ones.
|
||||||
|
* when using persistent connections, do not probe for liveness right after reconnecting after a keep alive timeout.
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* `:persistent` plugin: do not exhaust retry attempts when probing for (and failing) connection liveness.
|
||||||
|
* since the introduction of per-session connection pools, and consequentially due to the possibility of multiple inactive connections for the same origin being in the pool, which may have been terminated by the peer server, requests would fail before being able to establish a new connection.
|
||||||
|
* prevent retrying to connect the TCP socket object when an SSLSocket object is already in place and connecting.
|
126
doc/release_notes/1_5_0.md
Normal file
126
doc/release_notes/1_5_0.md
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
# 1.5.0
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
### `:stream_bidi` plugin
|
||||||
|
|
||||||
|
The `:stream_bidi` plugin enables bidirectional streaming support (an HTTP/2 only feature!). It builds on top of the `:stream` plugin, and uses its block-based syntax to process incoming frames, while allowing the user to pipe more data to the request (from the same, or another thread/fiber).
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
http = HTTPX.plugin(:stream_bidi)
|
||||||
|
request = http.build_request(
|
||||||
|
"POST",
|
||||||
|
"https://your-origin.com/stream",
|
||||||
|
headers: { "content-type" => "application/x-ndjson" },
|
||||||
|
body: ["{\"message\":\"started\"}\n"]
|
||||||
|
)
|
||||||
|
|
||||||
|
chunks = []
|
||||||
|
|
||||||
|
response = http.request(request, stream: true)
|
||||||
|
|
||||||
|
Thread.start do
|
||||||
|
response.each do |chunk|
|
||||||
|
handle_data(chunk)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# now send data...
|
||||||
|
request << "{\"message\":\"foo\"}\n"
|
||||||
|
request << "{\"message\":\"bar\"}\n"
|
||||||
|
# ...
|
||||||
|
```
|
||||||
|
|
||||||
|
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Stream-Bidi
|
||||||
|
|
||||||
|
### `:query` plugin
|
||||||
|
|
||||||
|
The `:query` plugin adds public methods supporting the `QUERY` HTTP verb:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
http = HTTPX.plugin(:query)
|
||||||
|
|
||||||
|
http.query("https://example.com/gquery", body: "foo=bar") # QUERY /gquery ....
|
||||||
|
```
|
||||||
|
|
||||||
|
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Query
|
||||||
|
|
||||||
|
this functionality was added as a plugin for explicit opt-in, as it's experimental (RFC for the new HTTP verb is still in draft).
|
||||||
|
|
||||||
|
### `:response_cache` plugin filesystem based store
|
||||||
|
|
||||||
|
The `:response_cache` plugin supports setting the filesystem as the response cache store (instead of just storing them in memory, which is the default `:store`).
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
# cache store in the filesystem, writes to the temporary directory from the OS
|
||||||
|
http = HTTPX.plugin(:response_cache, response_cache_store: :file_store)
|
||||||
|
# if you want a separate location
|
||||||
|
http = HTTPX.plugin(:response_cache).with(response_cache_store: HTTPX::Plugins::ResponseCache::FileStore.new("/path/to/dir"))
|
||||||
|
```
|
||||||
|
|
||||||
|
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Response-Cache#:file_store
|
||||||
|
|
||||||
|
### `:close_on_fork` option
|
||||||
|
|
||||||
|
A new option `:close_on_fork` can be used to ensure that a session object which may have open connections will not leak them in case the process is forked (this can be the case of `:persistent` plugin enabled sessions which have add usage before fork):
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
http = HTTPX.plugin(:persistent, close_on_fork: true)
|
||||||
|
|
||||||
|
# http may have open connections here
|
||||||
|
fork do
|
||||||
|
# http has no connections here
|
||||||
|
end
|
||||||
|
```
|
||||||
|
|
||||||
|
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Connection-Pools#Fork-Safety .
|
||||||
|
|
||||||
|
### `:debug_redact` option
|
||||||
|
|
||||||
|
The `:debug_redact` option will, when enabled, replace parts of the debug logs (enabled via `:debug` and `:debug_level` options) which may contain sensitive information, with the `"[REDACTED]"` placeholder.
|
||||||
|
|
||||||
|
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Debugging .
|
||||||
|
|
||||||
|
### `:max_connections` pool option
|
||||||
|
|
||||||
|
A new `:max_connections` pool option (settable under `:pool_options`) can be used to defined the maximum number **overall** of connections for a pool ("in-transit" or "at-rest"); this complements, and supersedes when used, the already existing `:max_connections_per_origin`, which does the same per connection origin.
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
HTTPX.with(pool_options: { max_connections: 100 })
|
||||||
|
```
|
||||||
|
|
||||||
|
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Connection-Pools .
|
||||||
|
|
||||||
|
### Subplugins
|
||||||
|
|
||||||
|
An enhancement to the plugins architecture, it allows plugins to define submodules ("subplugins") which are loaded if another plugin is in use, or is loaded afterwards.
|
||||||
|
|
||||||
|
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Custom-Plugins#Subplugins .
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
* `:persistent` plugin: several improvements around reconnections of failure:
|
||||||
|
* reconnections will only happen for "connection broken" errors (and will discard reconnection on timeouts)
|
||||||
|
* reconnections won't exhaust retries
|
||||||
|
* `:response_cache` plugin: several improements:
|
||||||
|
* return cached response if not stale, send conditional request otherwise (it was always doing the latter).
|
||||||
|
* consider immutable (i.e. `"Cache-Control: immutable"`) responses as never stale.
|
||||||
|
* `:datadog` adapter: decorate spans with more tags (header, kind, component, etc...)
|
||||||
|
* timers operations have been improved to use more efficient algorithms and reduce object creation.
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* ensure that setting request timeouts happens before the request is buffered (the latter could trigger a state transition required by the former).
|
||||||
|
* `:response_cache` plugin: fix `"Vary"` header handling by supporting a new plugin option, `:supported_vary_headers`, which defines which headers are taken into account for cache key calculation.
|
||||||
|
* fixed query string encoded value when passed an empty hash to the `:query` param and the URL already contains query string.
|
||||||
|
* `:callbacks` plugin: ensure the callbacks from a session are copied when a new session is derived from it (via a `.plugin` call, for example).
|
||||||
|
* `:callbacks` plugin: errors raised from hostname resolution should bubble up to user code.
|
||||||
|
* fixed connection coalescing selector monitoring in cases where the coalescable connecton is cloned, while other branches were simplified.
|
||||||
|
* clear the connection write buffer in corner cases where the remaining bytes may be interpreted as GOAWAY handshake frame (and may cause unintended writes to connections already identified as broken).
|
||||||
|
* remove idle connections from the selector when an error happens before the state changes (this may happen if the thread is interrupted during name resolution).
|
||||||
|
|
||||||
|
## Chore
|
||||||
|
|
||||||
|
`httpx` makes extensive use of features introduced in ruby 3.4, such as `Module#set_temporary_name` for otherwise plugin-generated anonymous classes (improves debugging and issue reporting), or `String#append_as_bytes` for a small but non-negligible perf boost in buffer operations. It falls back to the previous behaviour when used with ruby 3.3 or lower.
|
||||||
|
|
||||||
|
Also, and in preparation for the incoming ruby 3.5 release, dependency of the `cgi` gem (which will be removed from stdlib) was removed.
|
6
doc/release_notes/1_5_1.md
Normal file
6
doc/release_notes/1_5_1.md
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# 1.5.1
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* connection errors on persistent connections which have just been checked out from the pool no longer account for retries bookkeeping; the assumption should be that, if a connection has been checked into the pool in an open state, chances are, when it eventually gets checked out, it may be corrupt. This issue was more exacerbated in `:persistent` plugin connections, which by design have a retry of 1, thus failing often immediately after check out without a legitimate request try.
|
||||||
|
* native resolver: fix issue with process interrupts during DNS request, which caused a busy loop when closing the selector.
|
@ -1,7 +1,7 @@
|
|||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
httpx:
|
httpx:
|
||||||
image: jruby:9.3
|
image: jruby:9.4
|
||||||
environment:
|
environment:
|
||||||
- JRUBY_OPTS=--debug
|
- JRUBY_OPTS=--debug
|
||||||
entrypoint:
|
entrypoint:
|
||||||
|
@ -1,4 +0,0 @@
|
|||||||
version: '3'
|
|
||||||
services:
|
|
||||||
httpx:
|
|
||||||
image: ruby:2.1
|
|
@ -1,4 +0,0 @@
|
|||||||
version: '3'
|
|
||||||
services:
|
|
||||||
httpx:
|
|
||||||
image: ruby:2.2
|
|
@ -1,8 +0,0 @@
|
|||||||
version: '3'
|
|
||||||
services:
|
|
||||||
httpx:
|
|
||||||
image: ruby:2.3
|
|
||||||
environment:
|
|
||||||
- HTTPBIN_COALESCING_HOST=another
|
|
||||||
links:
|
|
||||||
- "nghttp2:another"
|
|
@ -1,8 +0,0 @@
|
|||||||
version: '3'
|
|
||||||
services:
|
|
||||||
httpx:
|
|
||||||
image: ruby:2.4
|
|
||||||
environment:
|
|
||||||
- HTTPBIN_COALESCING_HOST=another
|
|
||||||
links:
|
|
||||||
- "nghttp2:another"
|
|
@ -1,8 +0,0 @@
|
|||||||
version: '3'
|
|
||||||
services:
|
|
||||||
httpx:
|
|
||||||
image: ruby:2.5
|
|
||||||
environment:
|
|
||||||
- HTTPBIN_COALESCING_HOST=another
|
|
||||||
links:
|
|
||||||
- "nghttp2:another"
|
|
@ -5,13 +5,11 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
- HTTPBIN_COALESCING_HOST=another
|
- HTTPBIN_COALESCING_HOST=another
|
||||||
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
||||||
links:
|
|
||||||
- "nghttp2:another"
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- doh
|
- doh
|
||||||
|
|
||||||
doh:
|
doh:
|
||||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||||
depends_on:
|
depends_on:
|
||||||
- doh-proxy
|
- doh-proxy
|
||||||
entrypoint:
|
entrypoint:
|
||||||
|
@ -5,13 +5,11 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
- HTTPBIN_COALESCING_HOST=another
|
- HTTPBIN_COALESCING_HOST=another
|
||||||
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
||||||
links:
|
|
||||||
- "nghttp2:another"
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- doh
|
- doh
|
||||||
|
|
||||||
doh:
|
doh:
|
||||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||||
depends_on:
|
depends_on:
|
||||||
- doh-proxy
|
- doh-proxy
|
||||||
entrypoint:
|
entrypoint:
|
||||||
|
@ -5,13 +5,11 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
- HTTPBIN_COALESCING_HOST=another
|
- HTTPBIN_COALESCING_HOST=another
|
||||||
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
||||||
links:
|
|
||||||
- "nghttp2:another"
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- doh
|
- doh
|
||||||
|
|
||||||
doh:
|
doh:
|
||||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||||
depends_on:
|
depends_on:
|
||||||
- doh-proxy
|
- doh-proxy
|
||||||
entrypoint:
|
entrypoint:
|
||||||
|
@ -5,13 +5,11 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
- HTTPBIN_COALESCING_HOST=another
|
- HTTPBIN_COALESCING_HOST=another
|
||||||
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
||||||
links:
|
|
||||||
- "nghttp2:another"
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- doh
|
- doh
|
||||||
|
|
||||||
doh:
|
doh:
|
||||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||||
depends_on:
|
depends_on:
|
||||||
- doh-proxy
|
- doh-proxy
|
||||||
entrypoint:
|
entrypoint:
|
||||||
|
@ -1,17 +1,15 @@
|
|||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
httpx:
|
httpx:
|
||||||
image: ruby:2.6
|
image: ruby:3.3
|
||||||
environment:
|
environment:
|
||||||
- HTTPBIN_COALESCING_HOST=another
|
- HTTPBIN_COALESCING_HOST=another
|
||||||
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
||||||
links:
|
|
||||||
- "nghttp2:another"
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- doh
|
- doh
|
||||||
|
|
||||||
doh:
|
doh:
|
||||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||||
depends_on:
|
depends_on:
|
||||||
- doh-proxy
|
- doh-proxy
|
||||||
entrypoint:
|
entrypoint:
|
23
docker-compose-ruby-3.4.yml
Normal file
23
docker-compose-ruby-3.4.yml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
httpx:
|
||||||
|
image: ruby:3.4
|
||||||
|
environment:
|
||||||
|
- HTTPBIN_COALESCING_HOST=another
|
||||||
|
- HTTPX_RESOLVER_URI=https://doh/dns-query
|
||||||
|
depends_on:
|
||||||
|
- doh
|
||||||
|
|
||||||
|
doh:
|
||||||
|
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||||
|
depends_on:
|
||||||
|
- doh-proxy
|
||||||
|
entrypoint: /usr/local/bin/nghttpx
|
||||||
|
volumes:
|
||||||
|
- ./test/support/ci:/home
|
||||||
|
command: --conf /home/doh-nghttp.conf --no-ocsp --frontend '*,443'
|
||||||
|
|
||||||
|
doh-proxy:
|
||||||
|
image: publicarray/doh-proxy
|
||||||
|
environment:
|
||||||
|
- "UNBOUND_SERVICE_HOST=127.0.0.11"
|
@ -1,7 +1,7 @@
|
|||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
httpx:
|
httpx:
|
||||||
image: ghcr.io/graalvm/truffleruby:latest
|
image: ghcr.io/graalvm/truffleruby-community:latest
|
||||||
entrypoint:
|
entrypoint:
|
||||||
- bash
|
- bash
|
||||||
- /home/test/support/ci/build.sh
|
- /home/test/support/ci/build.sh
|
@ -26,6 +26,7 @@ services:
|
|||||||
- AMZ_HOST=aws:4566
|
- AMZ_HOST=aws:4566
|
||||||
- WEBDAV_HOST=webdav
|
- WEBDAV_HOST=webdav
|
||||||
- DD_INSTRUMENTATION_TELEMETRY_ENABLED=false
|
- DD_INSTRUMENTATION_TELEMETRY_ENABLED=false
|
||||||
|
- GRPC_VERBOSITY=ERROR
|
||||||
image: ruby:alpine
|
image: ruby:alpine
|
||||||
privileged: true
|
privileged: true
|
||||||
depends_on:
|
depends_on:
|
||||||
@ -37,13 +38,10 @@ services:
|
|||||||
- aws
|
- aws
|
||||||
- ws-echo-server
|
- ws-echo-server
|
||||||
- webdav
|
- webdav
|
||||||
|
- altsvc-nghttp2
|
||||||
volumes:
|
volumes:
|
||||||
- ./:/home
|
- ./:/home
|
||||||
links:
|
entrypoint: /home/test/support/ci/build.sh
|
||||||
- "altsvc-nghttp2:another2"
|
|
||||||
- "aws:test.aws"
|
|
||||||
entrypoint:
|
|
||||||
/home/test/support/ci/build.sh
|
|
||||||
|
|
||||||
sshproxy:
|
sshproxy:
|
||||||
image: connesc/ssh-gateway
|
image: connesc/ssh-gateway
|
||||||
@ -51,8 +49,6 @@ services:
|
|||||||
- ./test/support/ssh:/config
|
- ./test/support/ssh:/config
|
||||||
depends_on:
|
depends_on:
|
||||||
- nghttp2
|
- nghttp2
|
||||||
links:
|
|
||||||
- "nghttp2:another"
|
|
||||||
|
|
||||||
socksproxy:
|
socksproxy:
|
||||||
image: qautomatron/docker-3proxy
|
image: qautomatron/docker-3proxy
|
||||||
@ -61,8 +57,6 @@ services:
|
|||||||
- "3129:3129"
|
- "3129:3129"
|
||||||
volumes:
|
volumes:
|
||||||
- ./test/support/ci:/etc/3proxy
|
- ./test/support/ci:/etc/3proxy
|
||||||
links:
|
|
||||||
- "nghttp2:another"
|
|
||||||
|
|
||||||
httpproxy:
|
httpproxy:
|
||||||
image: sameersbn/squid:3.5.27-2
|
image: sameersbn/squid:3.5.27-2
|
||||||
@ -72,56 +66,53 @@ services:
|
|||||||
- ./test/support/ci/squid/proxy.conf:/etc/squid/squid.conf
|
- ./test/support/ci/squid/proxy.conf:/etc/squid/squid.conf
|
||||||
- ./test/support/ci/squid/proxy-users-basic.txt:/etc/squid/proxy-users-basic.txt
|
- ./test/support/ci/squid/proxy-users-basic.txt:/etc/squid/proxy-users-basic.txt
|
||||||
- ./test/support/ci/squid/proxy-users-digest.txt:/etc/squid/proxy-users-digest.txt
|
- ./test/support/ci/squid/proxy-users-digest.txt:/etc/squid/proxy-users-digest.txt
|
||||||
links:
|
command: -d 3
|
||||||
- "nghttp2:another"
|
|
||||||
command:
|
|
||||||
-d 3
|
|
||||||
|
|
||||||
http2proxy:
|
http2proxy:
|
||||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||||
ports:
|
ports:
|
||||||
- 3300:80
|
- 3300:80
|
||||||
depends_on:
|
depends_on:
|
||||||
- httpproxy
|
- httpproxy
|
||||||
entrypoint:
|
entrypoint: /usr/local/bin/nghttpx
|
||||||
/usr/local/bin/nghttpx
|
command: --no-ocsp --frontend '*,80;no-tls' --backend 'httpproxy,3128' --http2-proxy
|
||||||
command:
|
|
||||||
--no-ocsp --frontend '*,80;no-tls' --backend 'httpproxy,3128' --http2-proxy
|
|
||||||
|
|
||||||
nghttp2:
|
nghttp2:
|
||||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||||
ports:
|
ports:
|
||||||
- 80:80
|
- 80:80
|
||||||
- 443:443
|
- 443:443
|
||||||
depends_on:
|
depends_on:
|
||||||
- httpbin
|
- httpbin
|
||||||
entrypoint:
|
entrypoint: /usr/local/bin/nghttpx
|
||||||
/usr/local/bin/nghttpx
|
|
||||||
volumes:
|
volumes:
|
||||||
- ./test/support/ci:/home
|
- ./test/support/ci:/home
|
||||||
command:
|
command: --conf /home/nghttp.conf --no-ocsp --frontend '*,80;no-tls' --frontend '*,443'
|
||||||
--conf /home/nghttp.conf --no-ocsp --frontend '*,80;no-tls' --frontend '*,443'
|
networks:
|
||||||
|
default:
|
||||||
|
aliases:
|
||||||
|
- another
|
||||||
|
|
||||||
altsvc-nghttp2:
|
altsvc-nghttp2:
|
||||||
image: registry.gitlab.com/os85/httpx/nghttp2:1
|
image: registry.gitlab.com/os85/httpx/nghttp2:3
|
||||||
ports:
|
ports:
|
||||||
- 81:80
|
- 81:80
|
||||||
- 444:443
|
- 444:443
|
||||||
depends_on:
|
depends_on:
|
||||||
- httpbin
|
- httpbin
|
||||||
entrypoint:
|
entrypoint: /usr/local/bin/nghttpx
|
||||||
/usr/local/bin/nghttpx
|
|
||||||
volumes:
|
volumes:
|
||||||
- ./test/support/ci:/home
|
- ./test/support/ci:/home
|
||||||
command:
|
command: --conf /home/nghttp.conf --no-ocsp --frontend '*,80;no-tls' --frontend '*,443' --altsvc "h2,443,nghttp2"
|
||||||
--conf /home/nghttp.conf --no-ocsp --frontend '*,80;no-tls' --frontend '*,443' --altsvc "h2,443,nghttp2"
|
networks:
|
||||||
|
default:
|
||||||
|
aliases:
|
||||||
|
- another2
|
||||||
httpbin:
|
httpbin:
|
||||||
environment:
|
environment:
|
||||||
- DEBUG=True
|
- DEBUG=True
|
||||||
image: citizenstig/httpbin
|
image: citizenstig/httpbin
|
||||||
command:
|
command: gunicorn --bind=0.0.0.0:8000 --workers=6 --access-logfile - --error-logfile - --log-level debug --capture-output httpbin:app
|
||||||
gunicorn --bind=0.0.0.0:8000 --workers=6 --access-logfile - --error-logfile - --log-level debug --capture-output httpbin:app
|
|
||||||
|
|
||||||
aws:
|
aws:
|
||||||
image: localstack/localstack
|
image: localstack/localstack
|
||||||
@ -133,6 +124,10 @@ services:
|
|||||||
- 4566:4566
|
- 4566:4566
|
||||||
volumes:
|
volumes:
|
||||||
- ./test/support/ci/aws:/docker-entrypoint-initaws.d
|
- ./test/support/ci/aws:/docker-entrypoint-initaws.d
|
||||||
|
networks:
|
||||||
|
default:
|
||||||
|
aliases:
|
||||||
|
- test.aws
|
||||||
|
|
||||||
ws-echo-server:
|
ws-echo-server:
|
||||||
environment:
|
environment:
|
||||||
|
@ -1,11 +1,20 @@
|
|||||||
require "httpx"
|
require "httpx"
|
||||||
|
|
||||||
|
if ARGV.empty?
|
||||||
URLS = %w[https://nghttp2.org/httpbin/get] * 1
|
URLS = %w[https://nghttp2.org/httpbin/get] * 1
|
||||||
|
else
|
||||||
|
URLS = ARGV
|
||||||
|
end
|
||||||
|
|
||||||
responses = HTTPX.get(*URLS)
|
responses = HTTPX.get(*URLS)
|
||||||
Array(responses).each(&:raise_for_status)
|
Array(responses).each do |res|
|
||||||
puts "Status: \n"
|
puts "URI: #{res.uri}"
|
||||||
puts Array(responses).map(&:status)
|
case res
|
||||||
puts "Payload: \n"
|
when HTTPX::ErrorResponse
|
||||||
puts Array(responses).map(&:to_s)
|
puts "error: #{res.error}"
|
||||||
|
puts res.error.backtrace
|
||||||
|
else
|
||||||
|
puts "STATUS: #{res.status}"
|
||||||
|
puts res.to_s[0..2048]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
@ -17,23 +17,49 @@ end
|
|||||||
|
|
||||||
Signal.trap("INFO") { print_status } unless ENV.key?("CI")
|
Signal.trap("INFO") { print_status } unless ENV.key?("CI")
|
||||||
|
|
||||||
|
PAGES = (ARGV.first || 10).to_i
|
||||||
|
|
||||||
Thread.start do
|
Thread.start do
|
||||||
frontpage = HTTPX.get("https://news.ycombinator.com").to_s
|
page_links = []
|
||||||
|
HTTPX.wrap do |http|
|
||||||
|
PAGES.times.each do |i|
|
||||||
|
frontpage = http.get("https://news.ycombinator.com?p=#{i+1}").to_s
|
||||||
|
|
||||||
html = Oga.parse_html(frontpage)
|
html = Oga.parse_html(frontpage)
|
||||||
|
|
||||||
links = html.css('.itemlist a.storylink').map{|link| link.get('href') }
|
links = html.css('.athing .title a').map{|link| link.get('href') }.select { |link| URI(link).absolute? }
|
||||||
|
|
||||||
links = links.select {|l| l.start_with?("https") }
|
links = links.select {|l| l.start_with?("https") }
|
||||||
|
|
||||||
puts links
|
puts "for page #{i+1}: #{links.size} links"
|
||||||
|
page_links.concat(links)
|
||||||
responses = HTTPX.get(*links)
|
|
||||||
|
|
||||||
links.each_with_index do |l, i|
|
|
||||||
puts "#{responses[i].status}: #{l}"
|
|
||||||
end
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
puts "requesting #{page_links.size} links:"
|
||||||
|
responses = HTTPX.get(*page_links)
|
||||||
|
|
||||||
|
# page_links.each_with_index do |l, i|
|
||||||
|
# puts "#{responses[i].status}: #{l}"
|
||||||
|
# end
|
||||||
|
|
||||||
|
responses, error_responses = responses.partition { |r| r.is_a?(HTTPX::Response) }
|
||||||
|
puts "#{responses.size} responses (from #{page_links.size})"
|
||||||
|
puts "by group:"
|
||||||
|
responses.group_by(&:status).each do |st, res|
|
||||||
|
res.each do |r|
|
||||||
|
puts "#{st}: #{r.uri}"
|
||||||
|
end
|
||||||
|
end unless responses.empty?
|
||||||
|
|
||||||
|
unless error_responses.empty?
|
||||||
|
puts "error responses (#{error_responses.size})"
|
||||||
|
error_responses.group_by{ |r| r.error.class }.each do |kl, res|
|
||||||
|
res.each do |r|
|
||||||
|
puts "#{r.uri}: #{r.error}"
|
||||||
|
puts r.error.backtrace&.join("\n")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
end.join
|
end.join
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
require "httpx"
|
require "httpx"
|
||||||
require "oga"
|
require "oga"
|
||||||
|
|
||||||
http = HTTPX.plugin(:compression).plugin(:persistent).with(timeout: { operation_timeut: 5, connect_timeout: 5})
|
http = HTTPX.plugin(:persistent).with(timeout: { request_timeout: 5 })
|
||||||
|
|
||||||
PAGES = (ARGV.first || 10).to_i
|
PAGES = (ARGV.first || 10).to_i
|
||||||
pages = PAGES.times.map do |page|
|
pages = PAGES.times.map do |page|
|
||||||
@ -16,10 +16,11 @@ Array(http.get(*pages)).each_with_index.map do |response, i|
|
|||||||
end
|
end
|
||||||
html = Oga.parse_html(response.to_s)
|
html = Oga.parse_html(response.to_s)
|
||||||
# binding.irb
|
# binding.irb
|
||||||
page_links = html.css('.itemlist a.titlelink').map{|link| link.get('href') }
|
page_links = html.css('.athing .title a').map{|link| link.get('href') }.select { |link| URI(link).absolute? }
|
||||||
puts "page(#{i+1}): #{page_links.size}"
|
puts "page(#{i+1}): #{page_links.size}"
|
||||||
if page_links.size == 0
|
if page_links.size == 0
|
||||||
puts "error(#{response.status}) on page #{i+1}"
|
puts "error(#{response.status}) on page #{i+1}"
|
||||||
|
next
|
||||||
end
|
end
|
||||||
# page_links.each do |link|
|
# page_links.each do |link|
|
||||||
# puts "link: #{link}"
|
# puts "link: #{link}"
|
||||||
@ -31,6 +32,11 @@ end
|
|||||||
links = links.each_with_index do |pages, i|
|
links = links.each_with_index do |pages, i|
|
||||||
puts "Page: #{i+1}\t Links: #{pages.size}"
|
puts "Page: #{i+1}\t Links: #{pages.size}"
|
||||||
pages.each do |page|
|
pages.each do |page|
|
||||||
puts "URL: #{page.uri} (#{page.status})"
|
case page
|
||||||
|
in status:
|
||||||
|
puts "URL: #{page.uri} (#{status})"
|
||||||
|
in error:
|
||||||
|
puts "URL: #{page.uri} (#{error.message})"
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
@ -7,8 +7,8 @@
|
|||||||
#
|
#
|
||||||
require "httpx"
|
require "httpx"
|
||||||
|
|
||||||
URLS = %w[http://badipv4.test.ipv6friday.org/] * 1
|
# URLS = %w[https://ipv4.test-ipv6.com] * 1
|
||||||
# URLS = %w[http://badipv6.test.ipv6friday.org/] * 1
|
URLS = %w[https://ipv6.test-ipv6.com] * 1
|
||||||
|
|
||||||
responses = HTTPX.get(*URLS, ssl: { verify_mode: OpenSSL::SSL::VERIFY_NONE})
|
responses = HTTPX.get(*URLS, ssl: { verify_mode: OpenSSL::SSL::VERIFY_NONE})
|
||||||
|
|
||||||
|
@ -6,11 +6,9 @@ include HTTPX
|
|||||||
URLS = %w[http://nghttp2.org https://nghttp2.org/blog/]# * 3
|
URLS = %w[http://nghttp2.org https://nghttp2.org/blog/]# * 3
|
||||||
|
|
||||||
client = HTTPX.plugin(:proxy)
|
client = HTTPX.plugin(:proxy)
|
||||||
client = client.with_proxy(uri: "http://61.7.174.110:54132")
|
client = client.with_proxy(uri: "http://134.209.29.120:8080")
|
||||||
responses = client.get(URLS)
|
responses = client.get(*URLS)
|
||||||
puts responses.map(&:status)
|
puts responses.map(&:status)
|
||||||
|
|
||||||
# response = client.get(URLS.first)
|
# response = client.get(URLS.first)
|
||||||
# puts response.status
|
# puts response.status
|
||||||
|
|
||||||
|
|
||||||
|
8
examples/resolv/addrinfo.rb
Normal file
8
examples/resolv/addrinfo.rb
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
require "socket"
|
||||||
|
|
||||||
|
puts Process.pid
|
||||||
|
sleep 10
|
||||||
|
puts Addrinfo.getaddrinfo("www.google.com", 80).inspect
|
||||||
|
sleep 10
|
||||||
|
puts Addrinfo.getaddrinfo("www.google.com", 80).inspect
|
||||||
|
sleep 60
|
40
examples/resolv/resolv_tcp.rb
Normal file
40
examples/resolv/resolv_tcp.rb
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require "resolv"
|
||||||
|
require "httpx"
|
||||||
|
|
||||||
|
host = "127.0.0.11"
|
||||||
|
port = 53
|
||||||
|
|
||||||
|
# srv_hostname = "aerserv-bc-us-east.bidswitch.net"
|
||||||
|
record_type = Resolv::DNS::Resource::IN::A
|
||||||
|
|
||||||
|
# # addresses = nil
|
||||||
|
# # Resolv::DNS.open(nameserver: host) do |dns|
|
||||||
|
# # require "pry-byebug"; binding.pry
|
||||||
|
# # addresses = dns.getresources(srv_hostname, record_type)
|
||||||
|
# # end
|
||||||
|
|
||||||
|
# message_id = 1
|
||||||
|
# buffer = HTTPX::Resolver.encode_dns_query(srv_hostname, type: record_type, message_id: message_id)
|
||||||
|
|
||||||
|
# io = TCPSocket.new(host, port)
|
||||||
|
# buffer[0, 2] = [buffer.size, message_id].pack("nn")
|
||||||
|
# io.write(buffer.to_s)
|
||||||
|
# data, _ = io.readpartial(2048)
|
||||||
|
# size = data[0, 2].unpack1("n")
|
||||||
|
# answer = data[2..-1]
|
||||||
|
# answer << io.readpartial(size) if size > answer.bytesize
|
||||||
|
|
||||||
|
# addresses = HTTPX::Resolver.decode_dns_answer(answer)
|
||||||
|
|
||||||
|
# puts "(#{srv_hostname}) addresses: #{addresses}"
|
||||||
|
|
||||||
|
srv_hostname = "www.sfjewjfwigiewpgwwg-native-1.com"
|
||||||
|
socket = UDPSocket.new
|
||||||
|
buffer = HTTPX::Resolver.encode_dns_query(srv_hostname, type: record_type)
|
||||||
|
socket.send(buffer.to_s, 0, host, port)
|
||||||
|
recv, _ = socket.recvfrom(512)
|
||||||
|
puts "received #{recv.bytesize} bytes..."
|
||||||
|
addresses = HTTPX::Resolver.decode_dns_answer(recv)
|
||||||
|
puts "(#{srv_hostname}) addresses: #{addresses}"
|
23
examples/resolv/srv_record.rb
Normal file
23
examples/resolv/srv_record.rb
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
require "httpx"
|
||||||
|
|
||||||
|
host = "1.1.1.1"
|
||||||
|
port = 53
|
||||||
|
|
||||||
|
hostname = "google.com"
|
||||||
|
srv_hostname = "_https._tcp.#{hostname}"
|
||||||
|
record_type = Resolv::DNS::Resource::IN::SRV
|
||||||
|
|
||||||
|
addresses = nil
|
||||||
|
Resolv::DNS.open(nameserver: host) do |dns|
|
||||||
|
addresses = dns.getresources(srv_hostname, record_type)
|
||||||
|
end
|
||||||
|
|
||||||
|
# buffer = HTTPX::Resolver.encode_dns_query(hostname, type: record_type)
|
||||||
|
|
||||||
|
# io = UDPSocket.new(Socket::AF_INET)
|
||||||
|
# size = io.send(buffer.to_s, 0, Socket.sockaddr_in(port, host.to_s))
|
||||||
|
# data, _ = io.recvfrom(2048)
|
||||||
|
|
||||||
|
# addresses = HTTPX::Resolver.decode_dns_answer(data)
|
||||||
|
|
||||||
|
puts "(#{hostname}) addresses: #{addresses}"
|
@ -20,10 +20,10 @@ Gem::Specification.new do |gem|
|
|||||||
|
|
||||||
gem.metadata = {
|
gem.metadata = {
|
||||||
"bug_tracker_uri" => "https://gitlab.com/os85/httpx/issues",
|
"bug_tracker_uri" => "https://gitlab.com/os85/httpx/issues",
|
||||||
"changelog_uri" => "https://os85.gitlab.io/httpx/#release-notes",
|
"changelog_uri" => "https://honeyryderchuck.gitlab.io/httpx/#release-notes",
|
||||||
"documentation_uri" => "https://os85.gitlab.io/httpx/rdoc/",
|
"documentation_uri" => "https://honeyryderchuck.gitlab.io/httpx/rdoc/",
|
||||||
"source_code_uri" => "https://gitlab.com/os85/httpx",
|
"source_code_uri" => "https://gitlab.com/os85/httpx",
|
||||||
"homepage_uri" => "https://os85.gitlab.io/httpx/",
|
"homepage_uri" => "https://honeyryderchuck.gitlab.io/httpx/",
|
||||||
"rubygems_mfa_required" => "true",
|
"rubygems_mfa_required" => "true",
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -32,5 +32,7 @@ Gem::Specification.new do |gem|
|
|||||||
|
|
||||||
gem.require_paths = ["lib"]
|
gem.require_paths = ["lib"]
|
||||||
|
|
||||||
gem.add_runtime_dependency "http-2-next", ">= 0.4.1"
|
gem.add_runtime_dependency "http-2", ">= 1.0.0"
|
||||||
|
|
||||||
|
gem.required_ruby_version = ">= 2.7.0"
|
||||||
end
|
end
|
||||||
|
133
integration_tests/datadog_helpers.rb
Normal file
133
integration_tests/datadog_helpers.rb
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
module DatadogHelpers
|
||||||
|
DATADOG_VERSION = defined?(DDTrace) ? DDTrace::VERSION : Datadog::VERSION
|
||||||
|
ERROR_TAG = if Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("1.8.0")
|
||||||
|
"error.message"
|
||||||
|
else
|
||||||
|
"error.msg"
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def verify_instrumented_request(status, verb:, uri:, span: fetch_spans.first, service: datadog_service_name.to_s, error: nil)
|
||||||
|
if Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("2.0.0")
|
||||||
|
assert span.type == "http"
|
||||||
|
else
|
||||||
|
assert span.span_type == "http"
|
||||||
|
end
|
||||||
|
assert span.name == "#{datadog_service_name}.request"
|
||||||
|
assert span.service == service
|
||||||
|
|
||||||
|
assert span.get_tag("out.host") == uri.host
|
||||||
|
assert span.get_tag("out.port") == 80
|
||||||
|
assert span.get_tag("http.method") == verb
|
||||||
|
assert span.get_tag("http.url") == uri.path
|
||||||
|
|
||||||
|
if status && status >= 400
|
||||||
|
verify_http_error_span(span, status, error)
|
||||||
|
elsif error
|
||||||
|
verify_error_span(span)
|
||||||
|
else
|
||||||
|
assert span.status.zero?
|
||||||
|
assert span.get_tag("http.status_code") == status.to_s
|
||||||
|
# peer service
|
||||||
|
# assert span.get_tag("peer.service") == span.service
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def verify_http_error_span(span, status, error)
|
||||||
|
assert span.get_tag("http.status_code") == status.to_s
|
||||||
|
assert span.get_tag("error.type") == error
|
||||||
|
assert !span.get_tag(ERROR_TAG).nil?
|
||||||
|
assert span.status == 1
|
||||||
|
end
|
||||||
|
|
||||||
|
def verify_error_span(span)
|
||||||
|
assert span.get_tag("error.type") == "HTTPX::NativeResolveError"
|
||||||
|
assert !span.get_tag(ERROR_TAG).nil?
|
||||||
|
assert span.status == 1
|
||||||
|
end
|
||||||
|
|
||||||
|
def verify_no_distributed_headers(request_headers)
|
||||||
|
assert !request_headers.key?("x-datadog-parent-id")
|
||||||
|
assert !request_headers.key?("x-datadog-trace-id")
|
||||||
|
assert !request_headers.key?("x-datadog-sampling-priority")
|
||||||
|
end
|
||||||
|
|
||||||
|
def verify_distributed_headers(request_headers, span: fetch_spans.first, sampling_priority: 1)
|
||||||
|
if Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("2.0.0")
|
||||||
|
assert request_headers["x-datadog-parent-id"] == span.id.to_s
|
||||||
|
else
|
||||||
|
assert request_headers["x-datadog-parent-id"] == span.span_id.to_s
|
||||||
|
end
|
||||||
|
assert request_headers["x-datadog-trace-id"] == trace_id(span)
|
||||||
|
assert request_headers["x-datadog-sampling-priority"] == sampling_priority.to_s
|
||||||
|
end
|
||||||
|
|
||||||
|
if Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("1.17.0")
|
||||||
|
def trace_id(span)
|
||||||
|
Datadog::Tracing::Utils::TraceId.to_low_order(span.trace_id).to_s
|
||||||
|
end
|
||||||
|
else
|
||||||
|
def trace_id(span)
|
||||||
|
span.trace_id.to_s
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def verify_analytics_headers(span, sample_rate: nil)
|
||||||
|
assert span.get_metric("_dd1.sr.eausr") == sample_rate
|
||||||
|
end
|
||||||
|
|
||||||
|
def set_datadog(options = {}, &blk)
|
||||||
|
Datadog.configure do |c|
|
||||||
|
c.tracing.instrument(datadog_service_name, options, &blk)
|
||||||
|
end
|
||||||
|
|
||||||
|
tracer # initialize tracer patches
|
||||||
|
end
|
||||||
|
|
||||||
|
def tracer
|
||||||
|
@tracer ||= begin
|
||||||
|
tr = Datadog::Tracing.send(:tracer)
|
||||||
|
def tr.write(trace)
|
||||||
|
@traces ||= []
|
||||||
|
@traces << trace
|
||||||
|
end
|
||||||
|
tr
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def trace_with_sampling_priority(priority)
|
||||||
|
tracer.trace("foo.bar") do
|
||||||
|
tracer.active_trace.sampling_priority = priority
|
||||||
|
yield
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Returns spans and caches it (similar to +let(:spans)+).
|
||||||
|
def spans
|
||||||
|
@spans ||= fetch_spans
|
||||||
|
end
|
||||||
|
|
||||||
|
# Retrieves and sorts all spans in the current tracer instance.
|
||||||
|
# This method does not cache its results.
|
||||||
|
def fetch_spans
|
||||||
|
spans = (tracer.instance_variable_get(:@traces) || []).map(&:spans)
|
||||||
|
spans.flatten.sort! do |a, b|
|
||||||
|
if a.name == b.name
|
||||||
|
if a.resource == b.resource
|
||||||
|
if a.start_time == b.start_time
|
||||||
|
a.end_time <=> b.end_time
|
||||||
|
else
|
||||||
|
a.start_time <=> b.start_time
|
||||||
|
end
|
||||||
|
else
|
||||||
|
a.resource <=> b.resource
|
||||||
|
end
|
||||||
|
else
|
||||||
|
a.name <=> b.name
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
@ -1,51 +1,60 @@
|
|||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
begin
|
||||||
|
# upcoming 2.0
|
||||||
|
require "datadog"
|
||||||
|
rescue LoadError
|
||||||
require "ddtrace"
|
require "ddtrace"
|
||||||
|
end
|
||||||
|
|
||||||
require "test_helper"
|
require "test_helper"
|
||||||
require "support/http_helpers"
|
require "support/http_helpers"
|
||||||
require "httpx/adapters/datadog"
|
require "httpx/adapters/datadog"
|
||||||
|
require_relative "datadog_helpers"
|
||||||
|
|
||||||
class DatadogTest < Minitest::Test
|
class DatadogTest < Minitest::Test
|
||||||
include HTTPHelpers
|
include HTTPHelpers
|
||||||
|
include DatadogHelpers
|
||||||
|
|
||||||
def test_datadog_successful_get_request
|
def test_datadog_successful_get_request
|
||||||
set_datadog
|
set_datadog
|
||||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||||
|
|
||||||
response = HTTPX.get(uri)
|
response = HTTPX.get(uri)
|
||||||
verify_status(response, 200)
|
verify_status(response, 200)
|
||||||
|
|
||||||
assert !fetch_spans.empty?, "expected to have spans"
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
verify_instrumented_request(response, verb: "GET", uri: uri)
|
verify_instrumented_request(response.status, verb: "GET", uri: uri)
|
||||||
verify_distributed_headers(response)
|
verify_distributed_headers(request_headers(response))
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_datadog_successful_post_request
|
def test_datadog_successful_post_request
|
||||||
set_datadog
|
set_datadog
|
||||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
uri = URI(build_uri("/post", "http://#{httpbin}"))
|
||||||
|
|
||||||
response = HTTPX.post(uri, body: "bla")
|
response = HTTPX.post(uri, body: "bla")
|
||||||
verify_status(response, 200)
|
verify_status(response, 200)
|
||||||
|
|
||||||
assert !fetch_spans.empty?, "expected to have spans"
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
verify_instrumented_request(response, verb: "POST", uri: uri)
|
verify_instrumented_request(response.status, verb: "POST", uri: uri)
|
||||||
verify_distributed_headers(response)
|
verify_distributed_headers(request_headers(response))
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_datadog_successful_multiple_requests
|
def test_datadog_successful_multiple_requests
|
||||||
set_datadog
|
set_datadog
|
||||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
get_uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||||
|
post_uri = URI(build_uri("/post", "http://#{httpbin}"))
|
||||||
|
|
||||||
get_response, post_response = HTTPX.request([["GET", uri], ["POST", uri]])
|
get_response, post_response = HTTPX.request([["GET", get_uri], ["POST", post_uri]])
|
||||||
verify_status(get_response, 200)
|
verify_status(get_response, 200)
|
||||||
verify_status(post_response, 200)
|
verify_status(post_response, 200)
|
||||||
|
|
||||||
assert fetch_spans.size == 2, "expected to have 2 spans"
|
assert fetch_spans.size == 2, "expected to have 2 spans"
|
||||||
get_span, post_span = fetch_spans
|
get_span, post_span = fetch_spans
|
||||||
verify_instrumented_request(get_response, span: get_span, verb: "GET", uri: uri)
|
verify_instrumented_request(get_response.status, span: get_span, verb: "GET", uri: get_uri)
|
||||||
verify_instrumented_request(post_response, span: post_span, verb: "POST", uri: uri)
|
verify_instrumented_request(post_response.status, span: post_span, verb: "POST", uri: post_uri)
|
||||||
verify_distributed_headers(get_response, span: get_span)
|
verify_distributed_headers(request_headers(get_response), span: get_span)
|
||||||
verify_distributed_headers(post_response, span: post_span)
|
verify_distributed_headers(request_headers(post_response), span: post_span)
|
||||||
verify_analytics_headers(get_span)
|
verify_analytics_headers(get_span)
|
||||||
verify_analytics_headers(post_span)
|
verify_analytics_headers(post_span)
|
||||||
end
|
end
|
||||||
@ -58,8 +67,7 @@ class DatadogTest < Minitest::Test
|
|||||||
verify_status(response, 500)
|
verify_status(response, 500)
|
||||||
|
|
||||||
assert !fetch_spans.empty?, "expected to have spans"
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
verify_instrumented_request(response, verb: "GET", uri: uri)
|
verify_instrumented_request(response.status, verb: "GET", uri: uri, error: "HTTPX::HTTPError")
|
||||||
verify_distributed_headers(response)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_datadog_client_error_request
|
def test_datadog_client_error_request
|
||||||
@ -70,8 +78,7 @@ class DatadogTest < Minitest::Test
|
|||||||
verify_status(response, 404)
|
verify_status(response, 404)
|
||||||
|
|
||||||
assert !fetch_spans.empty?, "expected to have spans"
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
verify_instrumented_request(response, verb: "GET", uri: uri)
|
verify_instrumented_request(response.status, verb: "GET", uri: uri, error: "HTTPX::HTTPError")
|
||||||
verify_distributed_headers(response)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_datadog_some_other_error
|
def test_datadog_some_other_error
|
||||||
@ -82,12 +89,11 @@ class DatadogTest < Minitest::Test
|
|||||||
assert response.is_a?(HTTPX::ErrorResponse), "response should contain errors"
|
assert response.is_a?(HTTPX::ErrorResponse), "response should contain errors"
|
||||||
|
|
||||||
assert !fetch_spans.empty?, "expected to have spans"
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
verify_instrumented_request(response, verb: "GET", uri: uri, error: "HTTPX::NativeResolveError")
|
verify_instrumented_request(nil, verb: "GET", uri: uri, error: "HTTPX::NativeResolveError")
|
||||||
verify_distributed_headers(response)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_datadog_host_config
|
def test_datadog_host_config
|
||||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||||
set_datadog(describe: /#{uri.host}/) do |http|
|
set_datadog(describe: /#{uri.host}/) do |http|
|
||||||
http.service_name = "httpbin"
|
http.service_name = "httpbin"
|
||||||
http.split_by_domain = false
|
http.split_by_domain = false
|
||||||
@ -97,12 +103,12 @@ class DatadogTest < Minitest::Test
|
|||||||
verify_status(response, 200)
|
verify_status(response, 200)
|
||||||
|
|
||||||
assert !fetch_spans.empty?, "expected to have spans"
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
verify_instrumented_request(response, service: "httpbin", verb: "GET", uri: uri)
|
verify_instrumented_request(response.status, service: "httpbin", verb: "GET", uri: uri)
|
||||||
verify_distributed_headers(response)
|
verify_distributed_headers(request_headers(response))
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_datadog_split_by_domain
|
def test_datadog_split_by_domain
|
||||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||||
set_datadog do |http|
|
set_datadog do |http|
|
||||||
http.split_by_domain = true
|
http.split_by_domain = true
|
||||||
end
|
end
|
||||||
@ -111,13 +117,13 @@ class DatadogTest < Minitest::Test
|
|||||||
verify_status(response, 200)
|
verify_status(response, 200)
|
||||||
|
|
||||||
assert !fetch_spans.empty?, "expected to have spans"
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
verify_instrumented_request(response, service: uri.host, verb: "GET", uri: uri)
|
verify_instrumented_request(response.status, service: uri.host, verb: "GET", uri: uri)
|
||||||
verify_distributed_headers(response)
|
verify_distributed_headers(request_headers(response))
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_datadog_distributed_headers_disabled
|
def test_datadog_distributed_headers_disabled
|
||||||
set_datadog(distributed_tracing: false)
|
set_datadog(distributed_tracing: false)
|
||||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||||
|
|
||||||
sampling_priority = 10
|
sampling_priority = 10
|
||||||
response = trace_with_sampling_priority(sampling_priority) do
|
response = trace_with_sampling_priority(sampling_priority) do
|
||||||
@ -127,14 +133,14 @@ class DatadogTest < Minitest::Test
|
|||||||
|
|
||||||
assert !fetch_spans.empty?, "expected to have spans"
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
span = fetch_spans.last
|
span = fetch_spans.last
|
||||||
verify_instrumented_request(response, span: span, verb: "GET", uri: uri)
|
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||||
verify_no_distributed_headers(response)
|
verify_no_distributed_headers(request_headers(response))
|
||||||
verify_analytics_headers(span)
|
verify_analytics_headers(span)
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_datadog_distributed_headers_sampling_priority
|
def test_datadog_distributed_headers_sampling_priority
|
||||||
set_datadog
|
set_datadog
|
||||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||||
|
|
||||||
sampling_priority = 10
|
sampling_priority = 10
|
||||||
response = trace_with_sampling_priority(sampling_priority) do
|
response = trace_with_sampling_priority(sampling_priority) do
|
||||||
@ -145,37 +151,51 @@ class DatadogTest < Minitest::Test
|
|||||||
|
|
||||||
assert !fetch_spans.empty?, "expected to have spans"
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
span = fetch_spans.last
|
span = fetch_spans.last
|
||||||
verify_instrumented_request(response, span: span, verb: "GET", uri: uri)
|
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||||
verify_distributed_headers(response, span: span, sampling_priority: sampling_priority)
|
verify_distributed_headers(request_headers(response), span: span, sampling_priority: sampling_priority)
|
||||||
verify_analytics_headers(span)
|
verify_analytics_headers(span)
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_datadog_analytics_enabled
|
def test_datadog_analytics_enabled
|
||||||
set_datadog(analytics_enabled: true)
|
set_datadog(analytics_enabled: true)
|
||||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||||
|
|
||||||
response = HTTPX.get(uri)
|
response = HTTPX.get(uri)
|
||||||
verify_status(response, 200)
|
verify_status(response, 200)
|
||||||
|
|
||||||
assert !fetch_spans.empty?, "expected to have spans"
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
span = fetch_spans.last
|
span = fetch_spans.last
|
||||||
verify_instrumented_request(response, span: span, verb: "GET", uri: uri)
|
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||||
verify_analytics_headers(span, sample_rate: 1.0)
|
verify_analytics_headers(span, sample_rate: 1.0)
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_datadog_analytics_sample_rate
|
def test_datadog_analytics_sample_rate
|
||||||
set_datadog(analytics_enabled: true, analytics_sample_rate: 0.5)
|
set_datadog(analytics_enabled: true, analytics_sample_rate: 0.5)
|
||||||
uri = URI(build_uri("/status/200", "http://#{httpbin}"))
|
uri = URI(build_uri("/get", "http://#{httpbin}"))
|
||||||
|
|
||||||
response = HTTPX.get(uri)
|
response = HTTPX.get(uri)
|
||||||
verify_status(response, 200)
|
verify_status(response, 200)
|
||||||
|
|
||||||
assert !fetch_spans.empty?, "expected to have spans"
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
span = fetch_spans.last
|
span = fetch_spans.last
|
||||||
verify_instrumented_request(response, span: span, verb: "GET", uri: uri)
|
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||||
verify_analytics_headers(span, sample_rate: 0.5)
|
verify_analytics_headers(span, sample_rate: 0.5)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_datadog_per_request_span_with_retries
|
||||||
|
set_datadog
|
||||||
|
uri = URI(build_uri("/status/404", "http://#{httpbin}"))
|
||||||
|
|
||||||
|
http = HTTPX.plugin(:retries, max_retries: 2, retry_on: ->(r) { r.status == 404 })
|
||||||
|
response = http.get(uri)
|
||||||
|
verify_status(response, 404)
|
||||||
|
|
||||||
|
assert fetch_spans.size == 3, "expected to 3 spans"
|
||||||
|
fetch_spans.each do |span|
|
||||||
|
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri, error: "HTTPX::HTTPError")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
def setup
|
def setup
|
||||||
@ -186,144 +206,15 @@ class DatadogTest < Minitest::Test
|
|||||||
def teardown
|
def teardown
|
||||||
super
|
super
|
||||||
Datadog.registry[:httpx].reset_configuration!
|
Datadog.registry[:httpx].reset_configuration!
|
||||||
|
Datadog.configuration.tracing[:httpx].enabled = false
|
||||||
end
|
end
|
||||||
|
|
||||||
def verify_instrumented_request(response, verb:, uri:, span: fetch_spans.first, service: "httpx", error: nil)
|
def datadog_service_name
|
||||||
assert span.span_type == "http"
|
:httpx
|
||||||
assert span.name == "httpx.request"
|
|
||||||
assert span.service == service
|
|
||||||
|
|
||||||
assert span.get_tag("out.host") == uri.host
|
|
||||||
assert span.get_tag("out.port") == "80"
|
|
||||||
assert span.get_tag("http.method") == verb
|
|
||||||
assert span.get_tag("http.url") == uri.path
|
|
||||||
|
|
||||||
error_tag = if defined?(::DDTrace) && Gem::Version.new(::DDTrace::VERSION::STRING) >= Gem::Version.new("1.8.0")
|
|
||||||
"error.message"
|
|
||||||
else
|
|
||||||
"error.msg"
|
|
||||||
end
|
end
|
||||||
|
|
||||||
if error
|
def request_headers(response)
|
||||||
assert span.get_tag("error.type") == "HTTPX::NativeResolveError"
|
body = json_body(response)
|
||||||
assert !span.get_tag(error_tag).nil?
|
body["headers"].transform_keys(&:downcase)
|
||||||
assert span.status == 1
|
|
||||||
elsif response.status >= 400
|
|
||||||
assert span.get_tag("http.status_code") == response.status.to_s
|
|
||||||
assert span.get_tag("error.type") == "HTTPX::HTTPError"
|
|
||||||
assert !span.get_tag(error_tag).nil?
|
|
||||||
assert span.status == 1
|
|
||||||
else
|
|
||||||
assert span.status.zero?
|
|
||||||
assert span.get_tag("http.status_code") == response.status.to_s
|
|
||||||
# peer service
|
|
||||||
assert span.get_tag("peer.service") == span.service
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def verify_no_distributed_headers(response)
|
|
||||||
request = response.instance_variable_get(:@request)
|
|
||||||
|
|
||||||
assert !request.headers.key?("x-datadog-parent-id")
|
|
||||||
assert !request.headers.key?("x-datadog-trace-id")
|
|
||||||
assert !request.headers.key?("x-datadog-sampling-priority")
|
|
||||||
end
|
|
||||||
|
|
||||||
def verify_distributed_headers(response, span: fetch_spans.first, sampling_priority: 1)
|
|
||||||
request = response.instance_variable_get(:@request)
|
|
||||||
|
|
||||||
assert request.headers["x-datadog-parent-id"] == span.span_id.to_s
|
|
||||||
assert request.headers["x-datadog-trace-id"] == span.trace_id.to_s
|
|
||||||
assert request.headers["x-datadog-sampling-priority"] == sampling_priority.to_s
|
|
||||||
end
|
|
||||||
|
|
||||||
def verify_analytics_headers(span, sample_rate: nil)
|
|
||||||
assert span.get_metric("_dd1.sr.eausr") == sample_rate
|
|
||||||
end
|
|
||||||
|
|
||||||
if defined?(::DDTrace) && Gem::Version.new(::DDTrace::VERSION::STRING) >= Gem::Version.new("1.0.0")
|
|
||||||
|
|
||||||
def set_datadog(options = {}, &blk)
|
|
||||||
Datadog.configure do |c|
|
|
||||||
c.tracing.instrument(:httpx, options, &blk)
|
|
||||||
end
|
|
||||||
|
|
||||||
tracer # initialize tracer patches
|
|
||||||
end
|
|
||||||
|
|
||||||
def tracer
|
|
||||||
@tracer ||= begin
|
|
||||||
tr = Datadog::Tracing.send(:tracer)
|
|
||||||
def tr.write(trace)
|
|
||||||
@traces ||= []
|
|
||||||
@traces << trace
|
|
||||||
end
|
|
||||||
tr
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def trace_with_sampling_priority(priority)
|
|
||||||
tracer.trace("foo.bar") do
|
|
||||||
tracer.active_trace.sampling_priority = priority
|
|
||||||
yield
|
|
||||||
end
|
|
||||||
end
|
|
||||||
else
|
|
||||||
|
|
||||||
def set_datadog(options = {}, &blk)
|
|
||||||
Datadog.configure do |c|
|
|
||||||
c.use(:httpx, options, &blk)
|
|
||||||
end
|
|
||||||
|
|
||||||
tracer # initialize tracer patches
|
|
||||||
end
|
|
||||||
|
|
||||||
def tracer
|
|
||||||
@tracer ||= begin
|
|
||||||
tr = Datadog.tracer
|
|
||||||
def tr.write(trace)
|
|
||||||
@spans ||= []
|
|
||||||
@spans << trace
|
|
||||||
end
|
|
||||||
tr
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def trace_with_sampling_priority(priority)
|
|
||||||
tracer.trace("foo.bar") do |span|
|
|
||||||
span.context.sampling_priority = priority
|
|
||||||
yield
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# Returns spans and caches it (similar to +let(:spans)+).
|
|
||||||
def spans
|
|
||||||
@spans ||= fetch_spans
|
|
||||||
end
|
|
||||||
|
|
||||||
# Retrieves and sorts all spans in the current tracer instance.
|
|
||||||
# This method does not cache its results.
|
|
||||||
def fetch_spans
|
|
||||||
spans = if defined?(::DDTrace) && Gem::Version.new(::DDTrace::VERSION::STRING) >= Gem::Version.new("1.0.0")
|
|
||||||
(tracer.instance_variable_get(:@traces) || []).map(&:spans)
|
|
||||||
else
|
|
||||||
tracer.instance_variable_get(:@spans) || []
|
|
||||||
end
|
|
||||||
spans.flatten.sort! do |a, b|
|
|
||||||
if a.name == b.name
|
|
||||||
if a.resource == b.resource
|
|
||||||
if a.start_time == b.start_time
|
|
||||||
a.end_time <=> b.end_time
|
|
||||||
else
|
|
||||||
a.start_time <=> b.start_time
|
|
||||||
end
|
|
||||||
else
|
|
||||||
a.resource <=> b.resource
|
|
||||||
end
|
|
||||||
else
|
|
||||||
a.name <=> b.name
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
198
integration_tests/faraday_datadog_test.rb
Normal file
198
integration_tests/faraday_datadog_test.rb
Normal file
@ -0,0 +1,198 @@
|
|||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
begin
|
||||||
|
# upcoming 2.0
|
||||||
|
require "datadog"
|
||||||
|
rescue LoadError
|
||||||
|
require "ddtrace"
|
||||||
|
end
|
||||||
|
|
||||||
|
require "test_helper"
|
||||||
|
require "support/http_helpers"
|
||||||
|
require "httpx/adapters/faraday"
|
||||||
|
require_relative "datadog_helpers"
|
||||||
|
|
||||||
|
DATADOG_VERSION = defined?(DDTrace) ? DDTrace::VERSION : Datadog::VERSION
|
||||||
|
|
||||||
|
class FaradayDatadogTest < Minitest::Test
|
||||||
|
include HTTPHelpers
|
||||||
|
include DatadogHelpers
|
||||||
|
include FaradayHelpers
|
||||||
|
|
||||||
|
def test_faraday_datadog_successful_get_request
|
||||||
|
set_datadog
|
||||||
|
uri = URI(build_uri("/status/200"))
|
||||||
|
|
||||||
|
response = faraday_connection.get(uri)
|
||||||
|
verify_status(response, 200)
|
||||||
|
|
||||||
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
|
verify_instrumented_request(response.status, verb: "GET", uri: uri)
|
||||||
|
verify_distributed_headers(request_headers(response))
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_faraday_datadog_successful_post_request
|
||||||
|
set_datadog
|
||||||
|
uri = URI(build_uri("/status/200"))
|
||||||
|
|
||||||
|
response = faraday_connection.post(uri, "bla")
|
||||||
|
verify_status(response, 200)
|
||||||
|
|
||||||
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
|
verify_instrumented_request(response.status, verb: "POST", uri: uri)
|
||||||
|
verify_distributed_headers(request_headers(response))
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_faraday_datadog_server_error_request
|
||||||
|
set_datadog
|
||||||
|
uri = URI(build_uri("/status/500"))
|
||||||
|
|
||||||
|
ex = assert_raises(Faraday::ServerError) do
|
||||||
|
faraday_connection.tap do |conn|
|
||||||
|
adapter_handler = conn.builder.handlers.last
|
||||||
|
conn.builder.insert_before adapter_handler, Faraday::Response::RaiseError
|
||||||
|
end.get(uri)
|
||||||
|
end
|
||||||
|
|
||||||
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
|
verify_instrumented_request(ex.response[:status], verb: "GET", uri: uri, error: "Error 500")
|
||||||
|
|
||||||
|
verify_distributed_headers(request_headers(ex.response))
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_faraday_datadog_client_error_request
|
||||||
|
set_datadog
|
||||||
|
uri = URI(build_uri("/status/404"))
|
||||||
|
|
||||||
|
ex = assert_raises(Faraday::ResourceNotFound) do
|
||||||
|
faraday_connection.tap do |conn|
|
||||||
|
adapter_handler = conn.builder.handlers.last
|
||||||
|
conn.builder.insert_before adapter_handler, Faraday::Response::RaiseError
|
||||||
|
end.get(uri)
|
||||||
|
end
|
||||||
|
|
||||||
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
|
verify_instrumented_request(ex.response[:status], verb: "GET", uri: uri, error: "Error 404")
|
||||||
|
verify_distributed_headers(request_headers(ex.response))
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_faraday_datadog_some_other_error
|
||||||
|
set_datadog
|
||||||
|
uri = URI("http://unexisting/")
|
||||||
|
|
||||||
|
assert_raises(HTTPX::NativeResolveError) { faraday_connection.get(uri) }
|
||||||
|
|
||||||
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
|
verify_instrumented_request(nil, verb: "GET", uri: uri, error: "HTTPX::NativeResolveError")
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_faraday_datadog_host_config
|
||||||
|
uri = URI(build_uri("/status/200"))
|
||||||
|
set_datadog(describe: /#{uri.host}/) do |http|
|
||||||
|
http.service_name = "httpbin"
|
||||||
|
http.split_by_domain = false
|
||||||
|
end
|
||||||
|
|
||||||
|
response = faraday_connection.get(uri)
|
||||||
|
verify_status(response, 200)
|
||||||
|
|
||||||
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
|
verify_instrumented_request(response.status, service: "httpbin", verb: "GET", uri: uri)
|
||||||
|
verify_distributed_headers(request_headers(response))
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_faraday_datadog_split_by_domain
|
||||||
|
uri = URI(build_uri("/status/200"))
|
||||||
|
set_datadog do |http|
|
||||||
|
http.split_by_domain = true
|
||||||
|
end
|
||||||
|
|
||||||
|
response = faraday_connection.get(uri)
|
||||||
|
verify_status(response, 200)
|
||||||
|
|
||||||
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
|
verify_instrumented_request(response.status, service: uri.host, verb: "GET", uri: uri)
|
||||||
|
verify_distributed_headers(request_headers(response))
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_faraday_datadog_distributed_headers_disabled
|
||||||
|
set_datadog(distributed_tracing: false)
|
||||||
|
uri = URI(build_uri("/status/200"))
|
||||||
|
|
||||||
|
sampling_priority = 10
|
||||||
|
response = trace_with_sampling_priority(sampling_priority) do
|
||||||
|
faraday_connection.get(uri)
|
||||||
|
end
|
||||||
|
verify_status(response, 200)
|
||||||
|
|
||||||
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
|
span = fetch_spans.last
|
||||||
|
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||||
|
verify_no_distributed_headers(request_headers(response))
|
||||||
|
verify_analytics_headers(span)
|
||||||
|
end unless ENV.key?("CI") # TODO: https://github.com/DataDog/dd-trace-rb/issues/4308
|
||||||
|
|
||||||
|
def test_faraday_datadog_distributed_headers_sampling_priority
|
||||||
|
set_datadog
|
||||||
|
uri = URI(build_uri("/status/200"))
|
||||||
|
|
||||||
|
sampling_priority = 10
|
||||||
|
response = trace_with_sampling_priority(sampling_priority) do
|
||||||
|
faraday_connection.get(uri)
|
||||||
|
end
|
||||||
|
|
||||||
|
verify_status(response, 200)
|
||||||
|
|
||||||
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
|
span = fetch_spans.last
|
||||||
|
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||||
|
verify_distributed_headers(request_headers(response), span: span, sampling_priority: sampling_priority)
|
||||||
|
verify_analytics_headers(span)
|
||||||
|
end unless ENV.key?("CI") # TODO: https://github.com/DataDog/dd-trace-rb/issues/4308
|
||||||
|
|
||||||
|
def test_faraday_datadog_analytics_enabled
|
||||||
|
set_datadog(analytics_enabled: true)
|
||||||
|
uri = URI(build_uri("/status/200"))
|
||||||
|
|
||||||
|
response = faraday_connection.get(uri)
|
||||||
|
verify_status(response, 200)
|
||||||
|
|
||||||
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
|
span = fetch_spans.last
|
||||||
|
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||||
|
verify_analytics_headers(span, sample_rate: 1.0)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_faraday_datadog_analytics_sample_rate
|
||||||
|
set_datadog(analytics_enabled: true, analytics_sample_rate: 0.5)
|
||||||
|
uri = URI(build_uri("/status/200"))
|
||||||
|
|
||||||
|
response = faraday_connection.get(uri)
|
||||||
|
verify_status(response, 200)
|
||||||
|
|
||||||
|
assert !fetch_spans.empty?, "expected to have spans"
|
||||||
|
span = fetch_spans.last
|
||||||
|
verify_instrumented_request(response.status, span: span, verb: "GET", uri: uri)
|
||||||
|
verify_analytics_headers(span, sample_rate: 0.5)
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def setup
|
||||||
|
super
|
||||||
|
Datadog.registry[:faraday].reset_configuration!
|
||||||
|
end
|
||||||
|
|
||||||
|
def teardown
|
||||||
|
super
|
||||||
|
Datadog.registry[:faraday].reset_configuration!
|
||||||
|
end
|
||||||
|
|
||||||
|
def datadog_service_name
|
||||||
|
:faraday
|
||||||
|
end
|
||||||
|
|
||||||
|
def origin(orig = httpbin)
|
||||||
|
"http://#{orig}"
|
||||||
|
end
|
||||||
|
end
|
@ -1,6 +1,5 @@
|
|||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
if RUBY_VERSION >= "2.4.0"
|
|
||||||
require "logger"
|
require "logger"
|
||||||
require "stringio"
|
require "stringio"
|
||||||
require "sentry-ruby"
|
require "sentry-ruby"
|
||||||
@ -100,7 +99,7 @@ if RUBY_VERSION >= "2.4.0"
|
|||||||
verify_spans(transaction, response, verb: "GET")
|
verify_spans(transaction, response, verb: "GET")
|
||||||
crumb = Sentry.get_current_scope.breadcrumbs.peek
|
crumb = Sentry.get_current_scope.breadcrumbs.peek
|
||||||
assert crumb.category == "httpx"
|
assert crumb.category == "httpx"
|
||||||
assert crumb.data == { error: "name or service not known (unexisting)", method: "GET", url: uri.to_s }
|
assert crumb.data == { error: "name or service not known", method: "GET", url: uri.to_s }
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
@ -134,12 +133,13 @@ if RUBY_VERSION >= "2.4.0"
|
|||||||
|
|
||||||
Sentry.init do |config|
|
Sentry.init do |config|
|
||||||
config.traces_sample_rate = 1.0
|
config.traces_sample_rate = 1.0
|
||||||
config.logger = mock_logger
|
config.sdk_logger = mock_logger
|
||||||
config.dsn = DUMMY_DSN
|
config.dsn = DUMMY_DSN
|
||||||
config.transport.transport_class = Sentry::DummyTransport
|
config.transport.transport_class = Sentry::DummyTransport
|
||||||
config.breadcrumbs_logger = [:http_logger]
|
|
||||||
# so the events will be sent synchronously for testing
|
|
||||||
config.background_worker_threads = 0
|
config.background_worker_threads = 0
|
||||||
|
config.breadcrumbs_logger = [:http_logger]
|
||||||
|
config.enabled_patches << :httpx
|
||||||
|
# so the events will be sent synchronously for testing
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -147,4 +147,3 @@ if RUBY_VERSION >= "2.4.0"
|
|||||||
"https://#{httpbin}"
|
"https://#{httpbin}"
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
|
||||||
|
@ -26,6 +26,7 @@ class WebmockTest < Minitest::Test
|
|||||||
end
|
end
|
||||||
|
|
||||||
def teardown
|
def teardown
|
||||||
|
super
|
||||||
WebMock.reset!
|
WebMock.reset!
|
||||||
WebMock.allow_net_connect!
|
WebMock.allow_net_connect!
|
||||||
WebMock.disable!
|
WebMock.disable!
|
||||||
@ -49,6 +50,14 @@ class WebmockTest < Minitest::Test
|
|||||||
assert_equal(@exception_class.new("exception message"), response.error)
|
assert_equal(@exception_class.new("exception message"), response.error)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_response_not_decoded
|
||||||
|
request = stub_request(:get, MOCK_URL_HTTP).to_return(body: "body", headers: { content_encoding: "gzip" })
|
||||||
|
response = HTTPX.get(MOCK_URL_HTTP)
|
||||||
|
|
||||||
|
assert_equal("body", response.body.to_s)
|
||||||
|
assert_requested(request)
|
||||||
|
end
|
||||||
|
|
||||||
def test_to_timeout
|
def test_to_timeout
|
||||||
response = http_request(:get, MOCK_URL_HTTP_TIMEOUT)
|
response = http_request(:get, MOCK_URL_HTTP_TIMEOUT)
|
||||||
assert_requested(@stub_timeout)
|
assert_requested(@stub_timeout)
|
||||||
@ -87,7 +96,7 @@ class WebmockTest < Minitest::Test
|
|||||||
expected_message = "The request GET #{MOCK_URL_HTTP}/ was expected to execute 1 time but it executed 0 times" \
|
expected_message = "The request GET #{MOCK_URL_HTTP}/ was expected to execute 1 time but it executed 0 times" \
|
||||||
"\n\nThe following requests were made:\n\nNo requests were made.\n" \
|
"\n\nThe following requests were made:\n\nNo requests were made.\n" \
|
||||||
"============================================================"
|
"============================================================"
|
||||||
assert_raise_with_message(MiniTest::Assertion, expected_message) do
|
assert_raise_with_message(Minitest::Assertion, expected_message) do
|
||||||
assert_requested(:get, MOCK_URL_HTTP)
|
assert_requested(:get, MOCK_URL_HTTP)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -96,7 +105,7 @@ class WebmockTest < Minitest::Test
|
|||||||
expected_message = "The request ANY #{MOCK_URL_HTTP}/ was expected to execute 1 time but it executed 0 times" \
|
expected_message = "The request ANY #{MOCK_URL_HTTP}/ was expected to execute 1 time but it executed 0 times" \
|
||||||
"\n\nThe following requests were made:\n\nNo requests were made.\n" \
|
"\n\nThe following requests were made:\n\nNo requests were made.\n" \
|
||||||
"============================================================"
|
"============================================================"
|
||||||
assert_raise_with_message(MiniTest::Assertion, expected_message) do
|
assert_raise_with_message(Minitest::Assertion, expected_message) do
|
||||||
assert_requested(@stub_http)
|
assert_requested(@stub_http)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -146,13 +155,36 @@ class WebmockTest < Minitest::Test
|
|||||||
assert_requested(:get, MOCK_URL_HTTP, query: hash_excluding("a" => %w[b c]))
|
assert_requested(:get, MOCK_URL_HTTP, query: hash_excluding("a" => %w[b c]))
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_verification_that_expected_request_with_hash_as_body
|
||||||
|
stub_request(:post, MOCK_URL_HTTP).with(body: { foo: "bar" })
|
||||||
|
http_request(:post, MOCK_URL_HTTP, form: { foo: "bar" })
|
||||||
|
assert_requested(:post, MOCK_URL_HTTP, body: { foo: "bar" })
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_verification_that_expected_request_occured_with_form_file
|
||||||
|
file = File.new(fixture_file_path)
|
||||||
|
stub_request(:post, MOCK_URL_HTTP)
|
||||||
|
http_request(:post, MOCK_URL_HTTP, form: { file: file })
|
||||||
|
# TODO: webmock does not support matching multipart request body
|
||||||
|
assert_requested(:post, MOCK_URL_HTTP)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_verification_that_expected_request_occured_with_form_tempfile
|
||||||
|
stub_request(:post, MOCK_URL_HTTP)
|
||||||
|
Tempfile.open("tmp") do |file|
|
||||||
|
http_request(:post, MOCK_URL_HTTP, form: { file: file })
|
||||||
|
end
|
||||||
|
# TODO: webmock does not support matching multipart request body
|
||||||
|
assert_requested(:post, MOCK_URL_HTTP)
|
||||||
|
end
|
||||||
|
|
||||||
def test_verification_that_non_expected_request_didnt_occur
|
def test_verification_that_non_expected_request_didnt_occur
|
||||||
expected_message = Regexp.new(
|
expected_message = Regexp.new(
|
||||||
"The request GET #{MOCK_URL_HTTP}/ was not expected to execute but it executed 1 time\n\n" \
|
"The request GET #{MOCK_URL_HTTP}/ was not expected to execute but it executed 1 time\n\n" \
|
||||||
"The following requests were made:\n\nGET #{MOCK_URL_HTTP}/ with headers .+ was made 1 time\n\n" \
|
"The following requests were made:\n\nGET #{MOCK_URL_HTTP}/ with headers .+ was made 1 time\n\n" \
|
||||||
"============================================================"
|
"============================================================"
|
||||||
)
|
)
|
||||||
assert_raise_with_message(MiniTest::Assertion, expected_message) do
|
assert_raise_with_message(Minitest::Assertion, expected_message) do
|
||||||
http_request(:get, "http://www.example.com/")
|
http_request(:get, "http://www.example.com/")
|
||||||
assert_not_requested(:get, "http://www.example.com")
|
assert_not_requested(:get, "http://www.example.com")
|
||||||
end
|
end
|
||||||
@ -164,7 +196,7 @@ class WebmockTest < Minitest::Test
|
|||||||
"The following requests were made:\n\nGET #{MOCK_URL_HTTP}/ with headers .+ was made 1 time\n\n" \
|
"The following requests were made:\n\nGET #{MOCK_URL_HTTP}/ with headers .+ was made 1 time\n\n" \
|
||||||
"============================================================"
|
"============================================================"
|
||||||
)
|
)
|
||||||
assert_raise_with_message(MiniTest::Assertion, expected_message) do
|
assert_raise_with_message(Minitest::Assertion, expected_message) do
|
||||||
http_request(:get, "#{MOCK_URL_HTTP}/")
|
http_request(:get, "#{MOCK_URL_HTTP}/")
|
||||||
refute_requested(:get, MOCK_URL_HTTP)
|
refute_requested(:get, MOCK_URL_HTTP)
|
||||||
end
|
end
|
||||||
@ -176,12 +208,43 @@ class WebmockTest < Minitest::Test
|
|||||||
"The following requests were made:\n\nGET #{MOCK_URL_HTTP}/ with headers .+ was made 1 time\n\n" \
|
"The following requests were made:\n\nGET #{MOCK_URL_HTTP}/ with headers .+ was made 1 time\n\n" \
|
||||||
"============================================================"
|
"============================================================"
|
||||||
)
|
)
|
||||||
assert_raise_with_message(MiniTest::Assertion, expected_message) do
|
assert_raise_with_message(Minitest::Assertion, expected_message) do
|
||||||
http_request(:get, "#{MOCK_URL_HTTP}/")
|
http_request(:get, "#{MOCK_URL_HTTP}/")
|
||||||
assert_not_requested(@stub_http)
|
assert_not_requested(@stub_http)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_webmock_allows_real_request
|
||||||
|
WebMock.allow_net_connect!
|
||||||
|
uri = build_uri("/get?foo=bar")
|
||||||
|
response = HTTPX.get(uri)
|
||||||
|
verify_status(response, 200)
|
||||||
|
verify_body_length(response)
|
||||||
|
assert_requested(:get, uri, query: { "foo" => "bar" })
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_webmock_allows_real_request_with_body
|
||||||
|
WebMock.allow_net_connect!
|
||||||
|
uri = build_uri("/post")
|
||||||
|
response = HTTPX.post(uri, form: { foo: "bar" })
|
||||||
|
verify_status(response, 200)
|
||||||
|
verify_body_length(response)
|
||||||
|
assert_requested(:post, uri, headers: { "Content-Type" => "application/x-www-form-urlencoded" }, body: "foo=bar")
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_webmock_allows_real_request_with_file_body
|
||||||
|
WebMock.allow_net_connect!
|
||||||
|
uri = build_uri("/post")
|
||||||
|
response = HTTPX.post(uri, form: { image: File.new(fixture_file_path) })
|
||||||
|
verify_status(response, 200)
|
||||||
|
verify_body_length(response)
|
||||||
|
body = json_body(response)
|
||||||
|
verify_header(body["headers"], "Content-Type", "multipart/form-data")
|
||||||
|
verify_uploaded_image(body, "image", "image/jpeg")
|
||||||
|
# TODO: webmock does not support matching multipart request body
|
||||||
|
# assert_requested(:post, uri, headers: { "Content-Type" => "multipart/form-data" }, form: { "image" => File.new(fixture_file_path) })
|
||||||
|
end
|
||||||
|
|
||||||
def test_webmock_mix_mock_and_real_request
|
def test_webmock_mix_mock_and_real_request
|
||||||
WebMock.allow_net_connect!
|
WebMock.allow_net_connect!
|
||||||
|
|
||||||
@ -214,6 +277,49 @@ class WebmockTest < Minitest::Test
|
|||||||
assert_not_requested(:get, "http://#{httpbin}")
|
assert_not_requested(:get, "http://#{httpbin}")
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_webmock_follow_redirects_with_stream_plugin_each
|
||||||
|
session = HTTPX.plugin(:follow_redirects).plugin(:stream)
|
||||||
|
redirect_url = "#{MOCK_URL_HTTP}/redirect"
|
||||||
|
initial_request = stub_request(:get, MOCK_URL_HTTP).to_return(status: 302, headers: { location: redirect_url }, body: "redirecting")
|
||||||
|
redirect_request = stub_request(:get, redirect_url).to_return(status: 200, body: "body")
|
||||||
|
|
||||||
|
response = session.get(MOCK_URL_HTTP, stream: true)
|
||||||
|
body = "".b
|
||||||
|
response.each do |chunk|
|
||||||
|
next if (300..399).cover?(response.status)
|
||||||
|
|
||||||
|
body << chunk
|
||||||
|
end
|
||||||
|
assert_equal("body", body)
|
||||||
|
assert_requested(initial_request)
|
||||||
|
assert_requested(redirect_request)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_webmock_with_stream_plugin_each
|
||||||
|
session = HTTPX.plugin(:stream)
|
||||||
|
request = stub_request(:get, MOCK_URL_HTTP).to_return(body: "body")
|
||||||
|
|
||||||
|
body = "".b
|
||||||
|
response = session.get(MOCK_URL_HTTP, stream: true)
|
||||||
|
response.each do |chunk|
|
||||||
|
next if (300..399).cover?(response.status)
|
||||||
|
|
||||||
|
body << chunk
|
||||||
|
end
|
||||||
|
|
||||||
|
assert_equal("body", body)
|
||||||
|
assert_requested(request)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_webmock_with_stream_plugin_each_line
|
||||||
|
session = HTTPX.plugin(:stream)
|
||||||
|
request = stub_request(:get, MOCK_URL_HTTP).to_return(body: "First line\nSecond line")
|
||||||
|
|
||||||
|
response = session.get(MOCK_URL_HTTP, stream: true)
|
||||||
|
assert_equal(["First line", "Second line"], response.each_line.to_a)
|
||||||
|
assert_requested(request)
|
||||||
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
def assert_raise_with_message(e, message, &block)
|
def assert_raise_with_message(e, message, &block)
|
||||||
@ -228,4 +334,8 @@ class WebmockTest < Minitest::Test
|
|||||||
def http_request(meth, *uris, **options)
|
def http_request(meth, *uris, **options)
|
||||||
HTTPX.__send__(meth, *uris, **options)
|
HTTPX.__send__(meth, *uris, **options)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def scheme
|
||||||
|
"http://"
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
82
lib/httpx.rb
82
lib/httpx.rb
@ -2,6 +2,42 @@
|
|||||||
|
|
||||||
require "httpx/version"
|
require "httpx/version"
|
||||||
|
|
||||||
|
# Top-Level Namespace
|
||||||
|
#
|
||||||
|
module HTTPX
|
||||||
|
EMPTY = [].freeze
|
||||||
|
EMPTY_HASH = {}.freeze
|
||||||
|
|
||||||
|
# All plugins should be stored under this module/namespace. Can register and load
|
||||||
|
# plugins.
|
||||||
|
#
|
||||||
|
module Plugins
|
||||||
|
@plugins = {}
|
||||||
|
@plugins_mutex = Thread::Mutex.new
|
||||||
|
|
||||||
|
# Loads a plugin based on a name. If the plugin hasn't been loaded, tries to load
|
||||||
|
# it from the load path under "httpx/plugins/" directory.
|
||||||
|
#
|
||||||
|
def self.load_plugin(name)
|
||||||
|
h = @plugins
|
||||||
|
m = @plugins_mutex
|
||||||
|
unless (plugin = m.synchronize { h[name] })
|
||||||
|
require "httpx/plugins/#{name}"
|
||||||
|
raise "Plugin #{name} hasn't been registered" unless (plugin = m.synchronize { h[name] })
|
||||||
|
end
|
||||||
|
plugin
|
||||||
|
end
|
||||||
|
|
||||||
|
# Registers a plugin (+mod+) in the central store indexed by +name+.
|
||||||
|
#
|
||||||
|
def self.register_plugin(name, mod)
|
||||||
|
h = @plugins
|
||||||
|
m = @plugins_mutex
|
||||||
|
m.synchronize { h[name] = mod }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
require "httpx/extensions"
|
require "httpx/extensions"
|
||||||
|
|
||||||
require "httpx/errors"
|
require "httpx/errors"
|
||||||
@ -20,55 +56,11 @@ require "httpx/response"
|
|||||||
require "httpx/options"
|
require "httpx/options"
|
||||||
require "httpx/chainable"
|
require "httpx/chainable"
|
||||||
|
|
||||||
require "mutex_m"
|
|
||||||
# Top-Level Namespace
|
|
||||||
#
|
|
||||||
module HTTPX
|
|
||||||
EMPTY = [].freeze
|
|
||||||
|
|
||||||
# All plugins should be stored under this module/namespace. Can register and load
|
|
||||||
# plugins.
|
|
||||||
#
|
|
||||||
module Plugins
|
|
||||||
@plugins = {}
|
|
||||||
@plugins.extend(Mutex_m)
|
|
||||||
|
|
||||||
# Loads a plugin based on a name. If the plugin hasn't been loaded, tries to load
|
|
||||||
# it from the load path under "httpx/plugins/" directory.
|
|
||||||
#
|
|
||||||
def self.load_plugin(name)
|
|
||||||
h = @plugins
|
|
||||||
unless (plugin = h.synchronize { h[name] })
|
|
||||||
require "httpx/plugins/#{name}"
|
|
||||||
raise "Plugin #{name} hasn't been registered" unless (plugin = h.synchronize { h[name] })
|
|
||||||
end
|
|
||||||
plugin
|
|
||||||
end
|
|
||||||
|
|
||||||
# Registers a plugin (+mod+) in the central store indexed by +name+.
|
|
||||||
#
|
|
||||||
def self.register_plugin(name, mod)
|
|
||||||
h = @plugins
|
|
||||||
h.synchronize { h[name] = mod }
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# :nocov:
|
|
||||||
def self.const_missing(const_name)
|
|
||||||
super unless const_name == :Client
|
|
||||||
warn "DEPRECATION WARNING: the class #{self}::Client is deprecated. Use #{self}::Session instead."
|
|
||||||
Session
|
|
||||||
end
|
|
||||||
# :nocov:
|
|
||||||
|
|
||||||
extend Chainable
|
|
||||||
end
|
|
||||||
|
|
||||||
require "httpx/session"
|
require "httpx/session"
|
||||||
require "httpx/session_extensions"
|
require "httpx/session_extensions"
|
||||||
|
|
||||||
# load integrations when possible
|
# load integrations when possible
|
||||||
|
|
||||||
require "httpx/adapters/datadog" if defined?(DDTrace) || defined?(Datadog)
|
require "httpx/adapters/datadog" if defined?(DDTrace) || defined?(Datadog::Tracing)
|
||||||
require "httpx/adapters/sentry" if defined?(Sentry)
|
require "httpx/adapters/sentry" if defined?(Sentry)
|
||||||
require "httpx/adapters/webmock" if defined?(WebMock)
|
require "httpx/adapters/webmock" if defined?(WebMock)
|
||||||
|
@ -1,177 +1,211 @@
|
|||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
if defined?(DDTrace) && DDTrace::VERSION::STRING >= "1.0.0"
|
|
||||||
require "datadog/tracing/contrib/integration"
|
require "datadog/tracing/contrib/integration"
|
||||||
require "datadog/tracing/contrib/configuration/settings"
|
require "datadog/tracing/contrib/configuration/settings"
|
||||||
require "datadog/tracing/contrib/patcher"
|
require "datadog/tracing/contrib/patcher"
|
||||||
|
|
||||||
TRACING_MODULE = Datadog::Tracing
|
module Datadog::Tracing
|
||||||
else
|
|
||||||
|
|
||||||
require "ddtrace/contrib/integration"
|
|
||||||
require "ddtrace/contrib/configuration/settings"
|
|
||||||
require "ddtrace/contrib/patcher"
|
|
||||||
|
|
||||||
TRACING_MODULE = Datadog
|
|
||||||
end
|
|
||||||
|
|
||||||
module TRACING_MODULE # rubocop:disable Naming/ClassAndModuleCamelCase
|
|
||||||
module Contrib
|
module Contrib
|
||||||
module HTTPX
|
module HTTPX
|
||||||
if defined?(::DDTrace) && ::DDTrace::VERSION::STRING >= "1.0.0"
|
DATADOG_VERSION = defined?(::DDTrace) ? ::DDTrace::VERSION : ::Datadog::VERSION
|
||||||
METADATA_MODULE = TRACING_MODULE::Metadata
|
|
||||||
|
|
||||||
TYPE_OUTBOUND = TRACING_MODULE::Metadata::Ext::HTTP::TYPE_OUTBOUND
|
METADATA_MODULE = Datadog::Tracing::Metadata
|
||||||
|
|
||||||
TAG_PEER_SERVICE = TRACING_MODULE::Metadata::Ext::TAG_PEER_SERVICE
|
TYPE_OUTBOUND = Datadog::Tracing::Metadata::Ext::HTTP::TYPE_OUTBOUND
|
||||||
|
|
||||||
TAG_URL = TRACING_MODULE::Metadata::Ext::HTTP::TAG_URL
|
|
||||||
TAG_METHOD = TRACING_MODULE::Metadata::Ext::HTTP::TAG_METHOD
|
|
||||||
TAG_TARGET_HOST = TRACING_MODULE::Metadata::Ext::NET::TAG_TARGET_HOST
|
|
||||||
TAG_TARGET_PORT = TRACING_MODULE::Metadata::Ext::NET::TAG_TARGET_PORT
|
|
||||||
|
|
||||||
TAG_STATUS_CODE = TRACING_MODULE::Metadata::Ext::HTTP::TAG_STATUS_CODE
|
|
||||||
|
|
||||||
|
TAG_BASE_SERVICE = if Gem::Version.new(DATADOG_VERSION::STRING) < Gem::Version.new("1.15.0")
|
||||||
|
"_dd.base_service"
|
||||||
else
|
else
|
||||||
|
Datadog::Tracing::Contrib::Ext::Metadata::TAG_BASE_SERVICE
|
||||||
METADATA_MODULE = Datadog
|
|
||||||
|
|
||||||
TYPE_OUTBOUND = TRACING_MODULE::Ext::HTTP::TYPE_OUTBOUND
|
|
||||||
TAG_PEER_SERVICE = TRACING_MODULE::Ext::Integration::TAG_PEER_SERVICE
|
|
||||||
TAG_URL = TRACING_MODULE::Ext::HTTP::URL
|
|
||||||
TAG_METHOD = TRACING_MODULE::Ext::HTTP::METHOD
|
|
||||||
TAG_TARGET_HOST = TRACING_MODULE::Ext::NET::TARGET_HOST
|
|
||||||
TAG_TARGET_PORT = TRACING_MODULE::Ext::NET::TARGET_PORT
|
|
||||||
TAG_STATUS_CODE = Datadog::Ext::HTTP::STATUS_CODE
|
|
||||||
PROPAGATOR = TRACING_MODULE::HTTPPropagator
|
|
||||||
|
|
||||||
end
|
end
|
||||||
|
TAG_PEER_HOSTNAME = Datadog::Tracing::Metadata::Ext::TAG_PEER_HOSTNAME
|
||||||
|
|
||||||
|
TAG_KIND = Datadog::Tracing::Metadata::Ext::TAG_KIND
|
||||||
|
TAG_CLIENT = Datadog::Tracing::Metadata::Ext::SpanKind::TAG_CLIENT
|
||||||
|
TAG_COMPONENT = Datadog::Tracing::Metadata::Ext::TAG_COMPONENT
|
||||||
|
TAG_OPERATION = Datadog::Tracing::Metadata::Ext::TAG_OPERATION
|
||||||
|
TAG_URL = Datadog::Tracing::Metadata::Ext::HTTP::TAG_URL
|
||||||
|
TAG_METHOD = Datadog::Tracing::Metadata::Ext::HTTP::TAG_METHOD
|
||||||
|
TAG_TARGET_HOST = Datadog::Tracing::Metadata::Ext::NET::TAG_TARGET_HOST
|
||||||
|
TAG_TARGET_PORT = Datadog::Tracing::Metadata::Ext::NET::TAG_TARGET_PORT
|
||||||
|
|
||||||
|
TAG_STATUS_CODE = Datadog::Tracing::Metadata::Ext::HTTP::TAG_STATUS_CODE
|
||||||
|
|
||||||
# HTTPX Datadog Plugin
|
# HTTPX Datadog Plugin
|
||||||
#
|
#
|
||||||
# Enables tracing for httpx requests. A span will be created for each individual requests,
|
# Enables tracing for httpx requests.
|
||||||
# and it'll trace since the moment it is fed to the connection, until the moment the response is
|
#
|
||||||
# fed back to the session.
|
# A span will be created for each request transaction; the span is created lazily only when
|
||||||
|
# buffering a request, and it is fed the start time stored inside the tracer object.
|
||||||
#
|
#
|
||||||
module Plugin
|
module Plugin
|
||||||
class RequestTracer
|
module RequestTracer
|
||||||
include Contrib::HttpAnnotationHelper
|
extend Contrib::HttpAnnotationHelper
|
||||||
|
|
||||||
|
module_function
|
||||||
|
|
||||||
SPAN_REQUEST = "httpx.request"
|
SPAN_REQUEST = "httpx.request"
|
||||||
|
|
||||||
def initialize(request)
|
# initializes tracing on the +request+.
|
||||||
@request = request
|
def call(request)
|
||||||
|
return unless configuration(request).enabled
|
||||||
|
|
||||||
|
span = nil
|
||||||
|
|
||||||
|
# request objects are reused, when already buffered requests get rerouted to a different
|
||||||
|
# connection due to connection issues, or when they already got a response, but need to
|
||||||
|
# be retried. In such situations, the original span needs to be extended for the former,
|
||||||
|
# while a new is required for the latter.
|
||||||
|
request.on(:idle) do
|
||||||
|
span = nil
|
||||||
|
end
|
||||||
|
# the span is initialized when the request is buffered in the parser, which is the closest
|
||||||
|
# one gets to actually sending the request.
|
||||||
|
request.on(:headers) do
|
||||||
|
next if span
|
||||||
|
|
||||||
|
span = initialize_span(request, now)
|
||||||
end
|
end
|
||||||
|
|
||||||
def call
|
request.on(:response) do |response|
|
||||||
return unless tracing_enabled?
|
unless span
|
||||||
|
next unless response.is_a?(::HTTPX::ErrorResponse) && response.error.respond_to?(:connection)
|
||||||
|
|
||||||
@request.on(:response, &method(:finish))
|
# handles the case when the +error+ happened during name resolution, which means
|
||||||
|
# that the tracing start point hasn't been triggered yet; in such cases, the approximate
|
||||||
|
# initial resolving time is collected from the connection, and used as span start time,
|
||||||
|
# and the tracing object in inserted before the on response callback is called.
|
||||||
|
span = initialize_span(request, response.error.connection.init_time)
|
||||||
|
|
||||||
verb = @request.verb
|
end
|
||||||
uri = @request.uri
|
|
||||||
|
|
||||||
@span = build_span
|
finish(response, span)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
@span.resource = verb
|
def finish(response, span)
|
||||||
|
if response.is_a?(::HTTPX::ErrorResponse)
|
||||||
|
span.set_error(response.error)
|
||||||
|
else
|
||||||
|
span.set_tag(TAG_STATUS_CODE, response.status.to_s)
|
||||||
|
|
||||||
# Add additional request specific tags to the span.
|
span.set_error(::HTTPX::HTTPError.new(response)) if response.status >= 400 && response.status <= 599
|
||||||
|
|
||||||
@span.set_tag(TAG_URL, @request.path)
|
span.set_tags(
|
||||||
@span.set_tag(TAG_METHOD, verb)
|
Datadog.configuration.tracing.header_tags.response_tags(response.headers.to_h)
|
||||||
|
) if Datadog.configuration.tracing.respond_to?(:header_tags)
|
||||||
|
end
|
||||||
|
|
||||||
@span.set_tag(TAG_TARGET_HOST, uri.host)
|
span.finish
|
||||||
@span.set_tag(TAG_TARGET_PORT, uri.port.to_s)
|
end
|
||||||
|
|
||||||
|
# return a span initialized with the +@request+ state.
|
||||||
|
def initialize_span(request, start_time)
|
||||||
|
verb = request.verb
|
||||||
|
uri = request.uri
|
||||||
|
|
||||||
|
config = configuration(request)
|
||||||
|
|
||||||
|
span = create_span(request, config, start_time)
|
||||||
|
|
||||||
|
span.resource = verb
|
||||||
|
|
||||||
|
# Tag original global service name if not used
|
||||||
|
span.set_tag(TAG_BASE_SERVICE, Datadog.configuration.service) if span.service != Datadog.configuration.service
|
||||||
|
|
||||||
|
span.set_tag(TAG_KIND, TAG_CLIENT)
|
||||||
|
|
||||||
|
span.set_tag(TAG_COMPONENT, "httpx")
|
||||||
|
span.set_tag(TAG_OPERATION, "request")
|
||||||
|
|
||||||
|
span.set_tag(TAG_URL, request.path)
|
||||||
|
span.set_tag(TAG_METHOD, verb)
|
||||||
|
|
||||||
|
span.set_tag(TAG_TARGET_HOST, uri.host)
|
||||||
|
span.set_tag(TAG_TARGET_PORT, uri.port)
|
||||||
|
|
||||||
|
span.set_tag(TAG_PEER_HOSTNAME, uri.host)
|
||||||
|
|
||||||
# Tag as an external peer service
|
# Tag as an external peer service
|
||||||
@span.set_tag(TAG_PEER_SERVICE, @span.service)
|
# span.set_tag(TAG_PEER_SERVICE, span.service)
|
||||||
|
|
||||||
propagate_headers if @configuration[:distributed_tracing]
|
if config[:distributed_tracing]
|
||||||
|
propagate_trace_http(
|
||||||
|
Datadog::Tracing.active_trace,
|
||||||
|
request.headers
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
# Set analytics sample rate
|
# Set analytics sample rate
|
||||||
if Contrib::Analytics.enabled?(@configuration[:analytics_enabled])
|
if Contrib::Analytics.enabled?(config[:analytics_enabled])
|
||||||
Contrib::Analytics.set_sample_rate(@span, @configuration[:analytics_sample_rate])
|
Contrib::Analytics.set_sample_rate(span, config[:analytics_sample_rate])
|
||||||
end
|
end
|
||||||
|
|
||||||
|
span.set_tags(
|
||||||
|
Datadog.configuration.tracing.header_tags.request_tags(request.headers.to_h)
|
||||||
|
) if Datadog.configuration.tracing.respond_to?(:header_tags)
|
||||||
|
|
||||||
|
span
|
||||||
rescue StandardError => e
|
rescue StandardError => e
|
||||||
Datadog.logger.error("error preparing span for http request: #{e}")
|
Datadog.logger.error("error preparing span for http request: #{e}")
|
||||||
Datadog.logger.error(e.backtrace)
|
Datadog.logger.error(e.backtrace)
|
||||||
end
|
end
|
||||||
|
|
||||||
def finish(response)
|
def now
|
||||||
return unless @span
|
::Datadog::Core::Utils::Time.now.utc
|
||||||
|
|
||||||
if response.is_a?(::HTTPX::ErrorResponse)
|
|
||||||
@span.set_error(response.error)
|
|
||||||
else
|
|
||||||
@span.set_tag(TAG_STATUS_CODE, response.status.to_s)
|
|
||||||
|
|
||||||
@span.set_error(::HTTPX::HTTPError.new(response)) if response.status >= 400 && response.status <= 599
|
|
||||||
end
|
end
|
||||||
|
|
||||||
@span.finish
|
def configuration(request)
|
||||||
|
Datadog.configuration.tracing[:httpx, request.uri.host]
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
if Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("2.0.0")
|
||||||
|
def propagate_trace_http(trace, headers)
|
||||||
|
Datadog::Tracing::Contrib::HTTP.inject(trace, headers)
|
||||||
|
end
|
||||||
|
|
||||||
if defined?(::DDTrace) && ::DDTrace::VERSION::STRING >= "1.0.0"
|
def create_span(request, configuration, start_time)
|
||||||
|
Datadog::Tracing.trace(
|
||||||
def build_span
|
|
||||||
TRACING_MODULE.trace(
|
|
||||||
SPAN_REQUEST,
|
SPAN_REQUEST,
|
||||||
service: service_name(@request.uri.host, configuration, Datadog.configuration_for(self)),
|
service: service_name(request.uri.host, configuration),
|
||||||
span_type: TYPE_OUTBOUND
|
type: TYPE_OUTBOUND,
|
||||||
|
start_time: start_time
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
def propagate_headers
|
|
||||||
TRACING_MODULE::Propagation::HTTP.inject!(TRACING_MODULE.active_trace, @request.headers)
|
|
||||||
end
|
|
||||||
|
|
||||||
def configuration
|
|
||||||
@configuration ||= Datadog.configuration.tracing[:httpx, @request.uri.host]
|
|
||||||
end
|
|
||||||
|
|
||||||
def tracing_enabled?
|
|
||||||
TRACING_MODULE.enabled?
|
|
||||||
end
|
|
||||||
else
|
else
|
||||||
def build_span
|
def propagate_trace_http(trace, headers)
|
||||||
service_name = configuration[:split_by_domain] ? @request.uri.host : configuration[:service_name]
|
Datadog::Tracing::Propagation::HTTP.inject!(trace.to_digest, headers)
|
||||||
configuration[:tracer].trace(
|
end
|
||||||
|
|
||||||
|
def create_span(request, configuration, start_time)
|
||||||
|
Datadog::Tracing.trace(
|
||||||
SPAN_REQUEST,
|
SPAN_REQUEST,
|
||||||
service: service_name,
|
service: service_name(request.uri.host, configuration),
|
||||||
span_type: TYPE_OUTBOUND
|
span_type: TYPE_OUTBOUND,
|
||||||
|
start_time: start_time
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
def propagate_headers
|
|
||||||
Datadog::HTTPPropagator.inject!(@span.context, @request.headers)
|
|
||||||
end
|
|
||||||
|
|
||||||
def configuration
|
|
||||||
@configuration ||= Datadog.configuration[:httpx, @request.uri.host]
|
|
||||||
end
|
|
||||||
|
|
||||||
def tracing_enabled?
|
|
||||||
configuration[:tracer].enabled
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
module RequestMethods
|
module RequestMethods
|
||||||
def __datadog_enable_trace!
|
# intercepts request initialization to inject the tracing logic.
|
||||||
return super if @__datadog_enable_trace
|
def initialize(*)
|
||||||
|
super
|
||||||
|
|
||||||
RequestTracer.new(self).call
|
return unless Datadog::Tracing.enabled?
|
||||||
@__datadog_enable_trace = true
|
|
||||||
|
RequestTracer.call(self)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
module ConnectionMethods
|
module ConnectionMethods
|
||||||
def send(request)
|
attr_reader :init_time
|
||||||
request.__datadog_enable_trace!
|
|
||||||
|
|
||||||
|
def initialize(*)
|
||||||
super
|
super
|
||||||
|
|
||||||
|
@init_time = ::Datadog::Core::Utils::Time.now.utc
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -179,7 +213,7 @@ module TRACING_MODULE # rubocop:disable Naming/ClassAndModuleCamelCase
|
|||||||
module Configuration
|
module Configuration
|
||||||
# Default settings for httpx
|
# Default settings for httpx
|
||||||
#
|
#
|
||||||
class Settings < TRACING_MODULE::Contrib::Configuration::Settings
|
class Settings < Datadog::Tracing::Contrib::Configuration::Settings
|
||||||
DEFAULT_ERROR_HANDLER = lambda do |response|
|
DEFAULT_ERROR_HANDLER = lambda do |response|
|
||||||
Datadog::Ext::HTTP::ERROR_RANGE.cover?(response.status)
|
Datadog::Ext::HTTP::ERROR_RANGE.cover?(response.status)
|
||||||
end
|
end
|
||||||
@ -188,6 +222,25 @@ module TRACING_MODULE # rubocop:disable Naming/ClassAndModuleCamelCase
|
|||||||
option :distributed_tracing, default: true
|
option :distributed_tracing, default: true
|
||||||
option :split_by_domain, default: false
|
option :split_by_domain, default: false
|
||||||
|
|
||||||
|
if Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("1.13.0")
|
||||||
|
option :enabled do |o|
|
||||||
|
o.type :bool
|
||||||
|
o.env "DD_TRACE_HTTPX_ENABLED"
|
||||||
|
o.default true
|
||||||
|
end
|
||||||
|
|
||||||
|
option :analytics_enabled do |o|
|
||||||
|
o.type :bool
|
||||||
|
o.env "DD_TRACE_HTTPX_ANALYTICS_ENABLED"
|
||||||
|
o.default false
|
||||||
|
end
|
||||||
|
|
||||||
|
option :analytics_sample_rate do |o|
|
||||||
|
o.type :float
|
||||||
|
o.env "DD_TRACE_HTTPX_ANALYTICS_SAMPLE_RATE"
|
||||||
|
o.default 1.0
|
||||||
|
end
|
||||||
|
else
|
||||||
option :enabled do |o|
|
option :enabled do |o|
|
||||||
o.default { env_to_bool("DD_TRACE_HTTPX_ENABLED", true) }
|
o.default { env_to_bool("DD_TRACE_HTTPX_ENABLED", true) }
|
||||||
o.lazy
|
o.lazy
|
||||||
@ -202,15 +255,49 @@ module TRACING_MODULE # rubocop:disable Naming/ClassAndModuleCamelCase
|
|||||||
o.default { env_to_float(%w[DD_TRACE_HTTPX_ANALYTICS_SAMPLE_RATE DD_HTTPX_ANALYTICS_SAMPLE_RATE], 1.0) }
|
o.default { env_to_float(%w[DD_TRACE_HTTPX_ANALYTICS_SAMPLE_RATE DD_HTTPX_ANALYTICS_SAMPLE_RATE], 1.0) }
|
||||||
o.lazy
|
o.lazy
|
||||||
end
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
if defined?(Datadog::Tracing::Contrib::SpanAttributeSchema)
|
||||||
|
option :service_name do |o|
|
||||||
|
o.default do
|
||||||
|
Datadog::Tracing::Contrib::SpanAttributeSchema.fetch_service_name(
|
||||||
|
"DD_TRACE_HTTPX_SERVICE_NAME",
|
||||||
|
"httpx"
|
||||||
|
)
|
||||||
|
end
|
||||||
|
o.lazy unless Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("1.13.0")
|
||||||
|
end
|
||||||
|
else
|
||||||
|
option :service_name do |o|
|
||||||
|
o.default do
|
||||||
|
ENV.fetch("DD_TRACE_HTTPX_SERVICE_NAME", "httpx")
|
||||||
|
end
|
||||||
|
o.lazy unless Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("1.13.0")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
option :distributed_tracing, default: true
|
||||||
|
|
||||||
|
if Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("1.15.0")
|
||||||
|
option :error_handler do |o|
|
||||||
|
o.type :proc
|
||||||
|
o.default_proc(&DEFAULT_ERROR_HANDLER)
|
||||||
|
end
|
||||||
|
elsif Gem::Version.new(DATADOG_VERSION::STRING) >= Gem::Version.new("1.13.0")
|
||||||
|
option :error_handler do |o|
|
||||||
|
o.type :proc
|
||||||
|
o.experimental_default_proc(&DEFAULT_ERROR_HANDLER)
|
||||||
|
end
|
||||||
|
else
|
||||||
option :error_handler, default: DEFAULT_ERROR_HANDLER
|
option :error_handler, default: DEFAULT_ERROR_HANDLER
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
end
|
||||||
|
|
||||||
# Patcher enables patching of 'httpx' with datadog components.
|
# Patcher enables patching of 'httpx' with datadog components.
|
||||||
#
|
#
|
||||||
module Patcher
|
module Patcher
|
||||||
include TRACING_MODULE::Contrib::Patcher
|
include Datadog::Tracing::Contrib::Patcher
|
||||||
|
|
||||||
module_function
|
module_function
|
||||||
|
|
||||||
@ -233,7 +320,6 @@ module TRACING_MODULE # rubocop:disable Naming/ClassAndModuleCamelCase
|
|||||||
class Integration
|
class Integration
|
||||||
include Contrib::Integration
|
include Contrib::Integration
|
||||||
|
|
||||||
# MINIMUM_VERSION = Gem::Version.new('0.11.0')
|
|
||||||
MINIMUM_VERSION = Gem::Version.new("0.10.2")
|
MINIMUM_VERSION = Gem::Version.new("0.10.2")
|
||||||
|
|
||||||
register_as :httpx
|
register_as :httpx
|
||||||
@ -250,15 +336,9 @@ module TRACING_MODULE # rubocop:disable Naming/ClassAndModuleCamelCase
|
|||||||
super && version >= MINIMUM_VERSION
|
super && version >= MINIMUM_VERSION
|
||||||
end
|
end
|
||||||
|
|
||||||
if defined?(::DDTrace) && ::DDTrace::VERSION::STRING >= "1.0.0"
|
|
||||||
def new_configuration
|
def new_configuration
|
||||||
Configuration::Settings.new
|
Configuration::Settings.new
|
||||||
end
|
end
|
||||||
else
|
|
||||||
def default_configuration
|
|
||||||
Configuration::Settings.new
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def patcher
|
def patcher
|
||||||
Patcher
|
Patcher
|
||||||
|
@ -7,69 +7,112 @@ require "faraday"
|
|||||||
module Faraday
|
module Faraday
|
||||||
class Adapter
|
class Adapter
|
||||||
class HTTPX < Faraday::Adapter
|
class HTTPX < Faraday::Adapter
|
||||||
# :nocov:
|
|
||||||
SSL_ERROR = if defined?(Faraday::SSLError)
|
|
||||||
Faraday::SSLError
|
|
||||||
else
|
|
||||||
Faraday::Error::SSLError
|
|
||||||
end
|
|
||||||
|
|
||||||
CONNECTION_FAILED_ERROR = if defined?(Faraday::ConnectionFailed)
|
|
||||||
Faraday::ConnectionFailed
|
|
||||||
else
|
|
||||||
Faraday::Error::ConnectionFailed
|
|
||||||
end
|
|
||||||
# :nocov:
|
|
||||||
|
|
||||||
unless Faraday::RequestOptions.method_defined?(:stream_response?)
|
|
||||||
module RequestOptionsExtensions
|
|
||||||
refine Faraday::RequestOptions do
|
|
||||||
def stream_response?
|
|
||||||
false
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
using RequestOptionsExtensions
|
|
||||||
end
|
|
||||||
|
|
||||||
module RequestMixin
|
module RequestMixin
|
||||||
using ::HTTPX::HashExtensions
|
def build_connection(env)
|
||||||
|
return @connection if defined?(@connection)
|
||||||
|
|
||||||
|
@connection = ::HTTPX.plugin(:persistent).plugin(ReasonPlugin)
|
||||||
|
@connection = @connection.with(@connection_options) unless @connection_options.empty?
|
||||||
|
connection_opts = options_from_env(env)
|
||||||
|
|
||||||
|
if (bind = env.request.bind)
|
||||||
|
@bind = TCPSocket.new(bind[:host], bind[:port])
|
||||||
|
connection_opts[:io] = @bind
|
||||||
|
end
|
||||||
|
@connection = @connection.with(connection_opts)
|
||||||
|
|
||||||
|
if (proxy = env.request.proxy)
|
||||||
|
proxy_options = { uri: proxy.uri }
|
||||||
|
proxy_options[:username] = proxy.user if proxy.user
|
||||||
|
proxy_options[:password] = proxy.password if proxy.password
|
||||||
|
|
||||||
|
@connection = @connection.plugin(:proxy).with(proxy: proxy_options)
|
||||||
|
end
|
||||||
|
@connection = @connection.plugin(OnDataPlugin) if env.request.stream_response?
|
||||||
|
|
||||||
|
@connection = @config_block.call(@connection) || @connection if @config_block
|
||||||
|
@connection
|
||||||
|
end
|
||||||
|
|
||||||
|
def close
|
||||||
|
@connection.close if @connection
|
||||||
|
@bind.close if @bind
|
||||||
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
|
def connect(env, &blk)
|
||||||
|
connection(env, &blk)
|
||||||
|
rescue ::HTTPX::TLSError => e
|
||||||
|
raise Faraday::SSLError, e
|
||||||
|
rescue Errno::ECONNABORTED,
|
||||||
|
Errno::ECONNREFUSED,
|
||||||
|
Errno::ECONNRESET,
|
||||||
|
Errno::EHOSTUNREACH,
|
||||||
|
Errno::EINVAL,
|
||||||
|
Errno::ENETUNREACH,
|
||||||
|
Errno::EPIPE,
|
||||||
|
::HTTPX::ConnectionError => e
|
||||||
|
raise Faraday::ConnectionFailed, e
|
||||||
|
end
|
||||||
|
|
||||||
def build_request(env)
|
def build_request(env)
|
||||||
meth = env[:method]
|
meth = env[:method]
|
||||||
|
|
||||||
request_options = {
|
request_options = {
|
||||||
headers: env.request_headers,
|
headers: env.request_headers,
|
||||||
body: env.body,
|
body: env.body,
|
||||||
|
**options_from_env(env),
|
||||||
}
|
}
|
||||||
[meth.to_s.upcase, env.url, request_options]
|
[meth.to_s.upcase, env.url, request_options]
|
||||||
end
|
end
|
||||||
|
|
||||||
def options_from_env(env)
|
def options_from_env(env)
|
||||||
timeout_options = {
|
timeout_options = {}
|
||||||
connect_timeout: env.request.open_timeout,
|
req_opts = env.request
|
||||||
operation_timeout: env.request.timeout,
|
if (sec = request_timeout(:read, req_opts))
|
||||||
}.compact
|
timeout_options[:read_timeout] = sec
|
||||||
|
end
|
||||||
|
|
||||||
options = {
|
if (sec = request_timeout(:write, req_opts))
|
||||||
ssl: {},
|
timeout_options[:write_timeout] = sec
|
||||||
|
end
|
||||||
|
|
||||||
|
if (sec = request_timeout(:open, req_opts))
|
||||||
|
timeout_options[:connect_timeout] = sec
|
||||||
|
end
|
||||||
|
|
||||||
|
{
|
||||||
|
ssl: ssl_options_from_env(env),
|
||||||
timeout: timeout_options,
|
timeout: timeout_options,
|
||||||
}
|
}
|
||||||
|
end
|
||||||
|
|
||||||
options[:ssl][:verify_mode] = OpenSSL::SSL::VERIFY_PEER if env.ssl.verify
|
if defined?(::OpenSSL)
|
||||||
options[:ssl][:ca_file] = env.ssl.ca_file if env.ssl.ca_file
|
def ssl_options_from_env(env)
|
||||||
options[:ssl][:ca_path] = env.ssl.ca_path if env.ssl.ca_path
|
ssl_options = {}
|
||||||
options[:ssl][:cert_store] = env.ssl.cert_store if env.ssl.cert_store
|
|
||||||
options[:ssl][:cert] = env.ssl.client_cert if env.ssl.client_cert
|
|
||||||
options[:ssl][:key] = env.ssl.client_key if env.ssl.client_key
|
|
||||||
options[:ssl][:ssl_version] = env.ssl.version if env.ssl.version
|
|
||||||
options[:ssl][:verify_depth] = env.ssl.verify_depth if env.ssl.verify_depth
|
|
||||||
options[:ssl][:min_version] = env.ssl.min_version if env.ssl.min_version
|
|
||||||
options[:ssl][:max_version] = env.ssl.max_version if env.ssl.max_version
|
|
||||||
|
|
||||||
options
|
unless env.ssl.verify.nil?
|
||||||
|
ssl_options[:verify_mode] = env.ssl.verify ? OpenSSL::SSL::VERIFY_PEER : OpenSSL::SSL::VERIFY_NONE
|
||||||
|
end
|
||||||
|
|
||||||
|
ssl_options[:ca_file] = env.ssl.ca_file if env.ssl.ca_file
|
||||||
|
ssl_options[:ca_path] = env.ssl.ca_path if env.ssl.ca_path
|
||||||
|
ssl_options[:cert_store] = env.ssl.cert_store if env.ssl.cert_store
|
||||||
|
ssl_options[:cert] = env.ssl.client_cert if env.ssl.client_cert
|
||||||
|
ssl_options[:key] = env.ssl.client_key if env.ssl.client_key
|
||||||
|
ssl_options[:ssl_version] = env.ssl.version if env.ssl.version
|
||||||
|
ssl_options[:verify_depth] = env.ssl.verify_depth if env.ssl.verify_depth
|
||||||
|
ssl_options[:min_version] = env.ssl.min_version if env.ssl.min_version
|
||||||
|
ssl_options[:max_version] = env.ssl.max_version if env.ssl.max_version
|
||||||
|
ssl_options
|
||||||
|
end
|
||||||
|
else
|
||||||
|
# :nocov:
|
||||||
|
def ssl_options_from_env(*)
|
||||||
|
{}
|
||||||
|
end
|
||||||
|
# :nocov:
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -100,30 +143,15 @@ module Faraday
|
|||||||
end
|
end
|
||||||
|
|
||||||
module ReasonPlugin
|
module ReasonPlugin
|
||||||
if RUBY_VERSION < "2.5"
|
|
||||||
def self.load_dependencies(*)
|
|
||||||
require "webrick"
|
|
||||||
end
|
|
||||||
else
|
|
||||||
def self.load_dependencies(*)
|
def self.load_dependencies(*)
|
||||||
require "net/http/status"
|
require "net/http/status"
|
||||||
end
|
end
|
||||||
end
|
|
||||||
module ResponseMethods
|
|
||||||
if RUBY_VERSION < "2.5"
|
|
||||||
def reason
|
|
||||||
WEBrick::HTTPStatus::StatusMessage.fetch(@status)
|
|
||||||
end
|
|
||||||
else
|
|
||||||
def reason
|
|
||||||
Net::HTTP::STATUS_CODES.fetch(@status)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def self.session
|
module ResponseMethods
|
||||||
@session ||= ::HTTPX.plugin(:compression).plugin(:persistent).plugin(ReasonPlugin)
|
def reason
|
||||||
|
Net::HTTP::STATUS_CODES.fetch(@status, "Non-Standard status code")
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
class ParallelManager
|
class ParallelManager
|
||||||
@ -158,8 +186,9 @@ module Faraday
|
|||||||
|
|
||||||
include RequestMixin
|
include RequestMixin
|
||||||
|
|
||||||
def initialize
|
def initialize(options)
|
||||||
@handlers = []
|
@handlers = []
|
||||||
|
@connection_options = options
|
||||||
end
|
end
|
||||||
|
|
||||||
def enqueue(request)
|
def enqueue(request)
|
||||||
@ -173,10 +202,7 @@ module Faraday
|
|||||||
|
|
||||||
env = @handlers.last.env
|
env = @handlers.last.env
|
||||||
|
|
||||||
session = HTTPX.session.with(options_from_env(env))
|
connect(env) do |session|
|
||||||
session = session.plugin(:proxy).with(proxy: { uri: env.request.proxy }) if env.request.proxy
|
|
||||||
session = session.plugin(OnDataPlugin) if env.request.stream_response?
|
|
||||||
|
|
||||||
requests = @handlers.map { |handler| session.build_request(*build_request(handler.env)) }
|
requests = @handlers.map { |handler| session.build_request(*build_request(handler.env)) }
|
||||||
|
|
||||||
if env.request.stream_response?
|
if env.request.stream_response?
|
||||||
@ -189,24 +215,38 @@ module Faraday
|
|||||||
Array(responses).each_with_index do |response, index|
|
Array(responses).each_with_index do |response, index|
|
||||||
handler = @handlers[index]
|
handler = @handlers[index]
|
||||||
handler.on_response.call(response)
|
handler.on_response.call(response)
|
||||||
handler.on_complete.call(handler.env)
|
handler.on_complete.call(handler.env) if handler.on_complete
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
rescue ::HTTPX::TimeoutError => e
|
||||||
|
raise Faraday::TimeoutError, e
|
||||||
|
end
|
||||||
|
|
||||||
|
# from Faraday::Adapter#connection
|
||||||
|
def connection(env)
|
||||||
|
conn = build_connection(env)
|
||||||
|
return conn unless block_given?
|
||||||
|
|
||||||
|
yield conn
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# from Faraday::Adapter#request_timeout
|
||||||
|
def request_timeout(type, options)
|
||||||
|
key = Faraday::Adapter::TIMEOUT_KEYS[type]
|
||||||
|
options[key] || options[:timeout]
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
self.supports_parallel = true
|
self.supports_parallel = true
|
||||||
|
|
||||||
class << self
|
class << self
|
||||||
def setup_parallel_manager
|
def setup_parallel_manager(options = {})
|
||||||
ParallelManager.new
|
ParallelManager.new(options)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def initialize(app, options = {})
|
|
||||||
super(app)
|
|
||||||
@session_options = options
|
|
||||||
end
|
|
||||||
|
|
||||||
def call(env)
|
def call(env)
|
||||||
super
|
super
|
||||||
if parallel?(env)
|
if parallel?(env)
|
||||||
@ -224,12 +264,17 @@ module Faraday
|
|||||||
return handler
|
return handler
|
||||||
end
|
end
|
||||||
|
|
||||||
session = HTTPX.session
|
response = connect_and_request(env)
|
||||||
session = session.with(@session_options) unless @session_options.empty?
|
save_response(env, response.status, response.body.to_s, response.headers, response.reason) do |response_headers|
|
||||||
session = session.with(options_from_env(env))
|
response_headers.merge!(response.headers)
|
||||||
session = session.plugin(:proxy).with(proxy: { uri: env.request.proxy }) if env.request.proxy
|
end
|
||||||
session = session.plugin(OnDataPlugin) if env.request.stream_response?
|
@app.call(env)
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def connect_and_request(env)
|
||||||
|
connect(env) do |session|
|
||||||
request = session.build_request(*build_request(env))
|
request = session.build_request(*build_request(env))
|
||||||
|
|
||||||
request.response_on_data = env.request.on_data if env.request.stream_response?
|
request.response_on_data = env.request.on_data if env.request.stream_response?
|
||||||
@ -237,25 +282,12 @@ module Faraday
|
|||||||
response = session.request(request)
|
response = session.request(request)
|
||||||
# do not call #raise_for_status for HTTP 4xx or 5xx, as faraday has a middleware for that.
|
# do not call #raise_for_status for HTTP 4xx or 5xx, as faraday has a middleware for that.
|
||||||
response.raise_for_status unless response.is_a?(::HTTPX::Response)
|
response.raise_for_status unless response.is_a?(::HTTPX::Response)
|
||||||
save_response(env, response.status, response.body.to_s, response.headers, response.reason) do |response_headers|
|
response
|
||||||
response_headers.merge!(response.headers)
|
|
||||||
end
|
end
|
||||||
@app.call(env)
|
rescue ::HTTPX::TimeoutError => e
|
||||||
rescue ::HTTPX::TLSError => e
|
raise Faraday::TimeoutError, e
|
||||||
raise SSL_ERROR, e
|
|
||||||
rescue Errno::ECONNABORTED,
|
|
||||||
Errno::ECONNREFUSED,
|
|
||||||
Errno::ECONNRESET,
|
|
||||||
Errno::EHOSTUNREACH,
|
|
||||||
Errno::EINVAL,
|
|
||||||
Errno::ENETUNREACH,
|
|
||||||
Errno::EPIPE,
|
|
||||||
::HTTPX::ConnectionError => e
|
|
||||||
raise CONNECTION_FAILED_ERROR, e
|
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
|
||||||
|
|
||||||
def parallel?(env)
|
def parallel?(env)
|
||||||
env[:parallel_manager]
|
env[:parallel_manager]
|
||||||
end
|
end
|
||||||
|
@ -27,6 +27,11 @@ module HTTPX::Plugins
|
|||||||
def set_sentry_trace_header(request, sentry_span)
|
def set_sentry_trace_header(request, sentry_span)
|
||||||
return unless sentry_span
|
return unless sentry_span
|
||||||
|
|
||||||
|
config = ::Sentry.configuration
|
||||||
|
url = request.uri.to_s
|
||||||
|
|
||||||
|
return unless config.propagate_traces && config.trace_propagation_targets.any? { |target| url.match?(target) }
|
||||||
|
|
||||||
trace = ::Sentry.get_current_client.generate_sentry_trace(sentry_span)
|
trace = ::Sentry.get_current_client.generate_sentry_trace(sentry_span)
|
||||||
request.headers[::Sentry::SENTRY_TRACE_HEADER_NAME] = trace if trace
|
request.headers[::Sentry::SENTRY_TRACE_HEADER_NAME] = trace if trace
|
||||||
end
|
end
|
||||||
@ -91,7 +96,7 @@ module HTTPX::Plugins
|
|||||||
|
|
||||||
module RequestMethods
|
module RequestMethods
|
||||||
def __sentry_enable_trace!
|
def __sentry_enable_trace!
|
||||||
return super if @__sentry_enable_trace
|
return if @__sentry_enable_trace
|
||||||
|
|
||||||
Tracer.call(self)
|
Tracer.call(self)
|
||||||
@__sentry_enable_trace = true
|
@__sentry_enable_trace = true
|
||||||
@ -108,7 +113,7 @@ module HTTPX::Plugins
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
Sentry.register_patch do
|
Sentry.register_patch(:httpx) do
|
||||||
sentry_session = HTTPX.plugin(HTTPX::Plugins::Sentry)
|
sentry_session = HTTPX.plugin(HTTPX::Plugins::Sentry)
|
||||||
|
|
||||||
HTTPX.send(:remove_const, :Session)
|
HTTPX.send(:remove_const, :Session)
|
||||||
|
@ -2,13 +2,8 @@
|
|||||||
|
|
||||||
module WebMock
|
module WebMock
|
||||||
module HttpLibAdapters
|
module HttpLibAdapters
|
||||||
if RUBY_VERSION < "2.5"
|
|
||||||
require "webrick/httpstatus"
|
|
||||||
HTTP_REASONS = WEBrick::HTTPStatus::StatusMessage
|
|
||||||
else
|
|
||||||
require "net/http/status"
|
require "net/http/status"
|
||||||
HTTP_REASONS = Net::HTTP::STATUS_CODES
|
HTTP_REASONS = Net::HTTP::STATUS_CODES
|
||||||
end
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# HTTPX plugin for webmock.
|
# HTTPX plugin for webmock.
|
||||||
@ -25,7 +20,7 @@ module WebMock
|
|||||||
WebMock::RequestSignature.new(
|
WebMock::RequestSignature.new(
|
||||||
request.verb.downcase.to_sym,
|
request.verb.downcase.to_sym,
|
||||||
uri.to_s,
|
uri.to_s,
|
||||||
body: request.body.each.to_a.join,
|
body: request.body.to_s,
|
||||||
headers: request.headers.to_h
|
headers: request.headers.to_h
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
@ -43,27 +38,53 @@ module WebMock
|
|||||||
|
|
||||||
return build_error_response(request, webmock_response.exception) if webmock_response.exception
|
return build_error_response(request, webmock_response.exception) if webmock_response.exception
|
||||||
|
|
||||||
response = request.options.response_class.new(request,
|
request.options.response_class.new(request,
|
||||||
webmock_response.status[0],
|
webmock_response.status[0],
|
||||||
"2.0",
|
"2.0",
|
||||||
webmock_response.headers)
|
webmock_response.headers).tap do |res|
|
||||||
response << webmock_response.body.dup
|
res.mocked = true
|
||||||
response
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def build_error_response(request, exception)
|
def build_error_response(request, exception)
|
||||||
HTTPX::ErrorResponse.new(request, exception, request.options)
|
HTTPX::ErrorResponse.new(request, exception)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
module InstanceMethods
|
module InstanceMethods
|
||||||
def build_connection(*)
|
private
|
||||||
connection = super
|
|
||||||
|
def do_init_connection(connection, selector)
|
||||||
|
super
|
||||||
|
|
||||||
connection.once(:unmock_connection) do
|
connection.once(:unmock_connection) do
|
||||||
pool.__send__(:resolve_connection, connection)
|
next unless connection.current_session == self
|
||||||
pool.__send__(:unregister_connection, connection) unless connection.addresses
|
|
||||||
|
unless connection.addresses
|
||||||
|
# reset Happy Eyeballs, fail early
|
||||||
|
connection.sibling = nil
|
||||||
|
|
||||||
|
deselect_connection(connection, selector)
|
||||||
end
|
end
|
||||||
connection
|
resolve_connection(connection, selector)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
module ResponseMethods
|
||||||
|
attr_accessor :mocked
|
||||||
|
|
||||||
|
def initialize(*)
|
||||||
|
super
|
||||||
|
@mocked = false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
module ResponseBodyMethods
|
||||||
|
def decode_chunk(chunk)
|
||||||
|
return chunk if @response.mocked
|
||||||
|
|
||||||
|
super
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -85,6 +106,10 @@ module WebMock
|
|||||||
super
|
super
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def terminate
|
||||||
|
force_reset
|
||||||
|
end
|
||||||
|
|
||||||
def send(request)
|
def send(request)
|
||||||
request_signature = Plugin.build_webmock_request_signature(request)
|
request_signature = Plugin.build_webmock_request_signature(request)
|
||||||
WebMock::RequestRegistry.instance.requested_signatures.put(request_signature)
|
WebMock::RequestRegistry.instance.requested_signatures.put(request_signature)
|
||||||
@ -93,8 +118,16 @@ module WebMock
|
|||||||
response = Plugin.build_from_webmock_response(request, mock_response)
|
response = Plugin.build_from_webmock_response(request, mock_response)
|
||||||
WebMock::CallbackRegistry.invoke_callbacks({ lib: :httpx }, request_signature, mock_response)
|
WebMock::CallbackRegistry.invoke_callbacks({ lib: :httpx }, request_signature, mock_response)
|
||||||
log { "mocking #{request.uri} with #{mock_response.inspect}" }
|
log { "mocking #{request.uri} with #{mock_response.inspect}" }
|
||||||
|
request.transition(:headers)
|
||||||
|
request.transition(:body)
|
||||||
|
request.transition(:trailers)
|
||||||
|
request.transition(:done)
|
||||||
|
response.finish!
|
||||||
request.response = response
|
request.response = response
|
||||||
request.emit(:response, response)
|
request.emit(:response, response)
|
||||||
|
request_signature.headers = request.headers.to_h
|
||||||
|
|
||||||
|
response << mock_response.body.dup unless response.is_a?(HTTPX::ErrorResponse)
|
||||||
elsif WebMock.net_connect_allowed?(request_signature.uri)
|
elsif WebMock.net_connect_allowed?(request_signature.uri)
|
||||||
if WebMock::CallbackRegistry.any_callbacks?
|
if WebMock::CallbackRegistry.any_callbacks?
|
||||||
request.on(:response) do |resp|
|
request.on(:response) do |resp|
|
||||||
|
@ -4,7 +4,59 @@ require "strscan"
|
|||||||
|
|
||||||
module HTTPX
|
module HTTPX
|
||||||
module AltSvc
|
module AltSvc
|
||||||
@altsvc_mutex = Mutex.new
|
# makes connections able to accept requests destined to primary service.
|
||||||
|
module ConnectionMixin
|
||||||
|
using URIExtensions
|
||||||
|
|
||||||
|
def send(request)
|
||||||
|
request.headers["alt-used"] = @origin.authority if @parser && !@write_buffer.full? && match_altsvcs?(request.uri)
|
||||||
|
|
||||||
|
super
|
||||||
|
end
|
||||||
|
|
||||||
|
def match?(uri, options)
|
||||||
|
return false if !used? && (@state == :closing || @state == :closed)
|
||||||
|
|
||||||
|
match_altsvcs?(uri) && match_altsvc_options?(uri, options)
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# checks if this is connection is an alternative service of
|
||||||
|
# +uri+
|
||||||
|
def match_altsvcs?(uri)
|
||||||
|
@origins.any? { |origin| altsvc_match?(uri, origin) } ||
|
||||||
|
AltSvc.cached_altsvc(@origin).any? do |altsvc|
|
||||||
|
origin = altsvc["origin"]
|
||||||
|
altsvc_match?(origin, uri.origin)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def match_altsvc_options?(uri, options)
|
||||||
|
return @options == options unless @options.ssl.all? do |k, v|
|
||||||
|
v == (k == :hostname ? uri.host : options.ssl[k])
|
||||||
|
end
|
||||||
|
|
||||||
|
@options.options_equals?(options, Options::REQUEST_BODY_IVARS + %i[@ssl])
|
||||||
|
end
|
||||||
|
|
||||||
|
def altsvc_match?(uri, other_uri)
|
||||||
|
other_uri = URI(other_uri)
|
||||||
|
|
||||||
|
uri.origin == other_uri.origin || begin
|
||||||
|
case uri.scheme
|
||||||
|
when "h2"
|
||||||
|
(other_uri.scheme == "https" || other_uri.scheme == "h2") &&
|
||||||
|
uri.host == other_uri.host &&
|
||||||
|
uri.port == other_uri.port
|
||||||
|
else
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@altsvc_mutex = Thread::Mutex.new
|
||||||
@altsvcs = Hash.new { |h, k| h[k] = [] }
|
@altsvcs = Hash.new { |h, k| h[k] = [] }
|
||||||
|
|
||||||
module_function
|
module_function
|
||||||
@ -46,7 +98,7 @@ module HTTPX
|
|||||||
|
|
||||||
altsvc = response.headers["alt-svc"]
|
altsvc = response.headers["alt-svc"]
|
||||||
|
|
||||||
# https://tools.ietf.org/html/rfc7838#section-3
|
# https://datatracker.ietf.org/doc/html/rfc7838#section-3
|
||||||
# A field value containing the special value "clear" indicates that the
|
# A field value containing the special value "clear" indicates that the
|
||||||
# origin requests all alternatives for that origin to be invalidated
|
# origin requests all alternatives for that origin to be invalidated
|
||||||
# (including those specified in the same response, in case of an
|
# (including those specified in the same response, in case of an
|
||||||
@ -79,9 +131,9 @@ module HTTPX
|
|||||||
scanner.skip(/;/)
|
scanner.skip(/;/)
|
||||||
break if scanner.eos? || scanner.scan(/ *, */)
|
break if scanner.eos? || scanner.scan(/ *, */)
|
||||||
end
|
end
|
||||||
alt_params = Hash[alt_params.map { |field| field.split("=") }]
|
alt_params = Hash[alt_params.map { |field| field.split("=", 2) }]
|
||||||
|
|
||||||
alt_proto, alt_authority = alt_service.split("=")
|
alt_proto, alt_authority = alt_service.split("=", 2)
|
||||||
alt_origin = parse_altsvc_origin(alt_proto, alt_authority)
|
alt_origin = parse_altsvc_origin(alt_proto, alt_authority)
|
||||||
return unless alt_origin
|
return unless alt_origin
|
||||||
|
|
||||||
@ -98,29 +150,14 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# :nocov:
|
|
||||||
if RUBY_VERSION < "2.2"
|
|
||||||
def parse_altsvc_origin(alt_proto, alt_origin)
|
def parse_altsvc_origin(alt_proto, alt_origin)
|
||||||
alt_scheme = parse_altsvc_scheme(alt_proto) or return
|
alt_scheme = parse_altsvc_scheme(alt_proto)
|
||||||
|
|
||||||
|
return unless alt_scheme
|
||||||
|
|
||||||
alt_origin = alt_origin[1..-2] if alt_origin.start_with?("\"") && alt_origin.end_with?("\"")
|
|
||||||
if alt_origin.start_with?(":")
|
|
||||||
alt_origin = "#{alt_scheme}://dummy#{alt_origin}"
|
|
||||||
uri = URI.parse(alt_origin)
|
|
||||||
uri.host = nil
|
|
||||||
uri
|
|
||||||
else
|
|
||||||
URI.parse("#{alt_scheme}://#{alt_origin}")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
else
|
|
||||||
def parse_altsvc_origin(alt_proto, alt_origin)
|
|
||||||
alt_scheme = parse_altsvc_scheme(alt_proto) or return
|
|
||||||
alt_origin = alt_origin[1..-2] if alt_origin.start_with?("\"") && alt_origin.end_with?("\"")
|
alt_origin = alt_origin[1..-2] if alt_origin.start_with?("\"") && alt_origin.end_with?("\"")
|
||||||
|
|
||||||
URI.parse("#{alt_scheme}://#{alt_origin}")
|
URI.parse("#{alt_scheme}://#{alt_origin}")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
# :nocov:
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
27
lib/httpx/base64.rb
Normal file
27
lib/httpx/base64.rb
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
if RUBY_VERSION < "3.3.0"
|
||||||
|
require "base64"
|
||||||
|
elsif !defined?(Base64)
|
||||||
|
module HTTPX
|
||||||
|
# require "base64" will not be a default gem after ruby 3.4.0
|
||||||
|
module Base64
|
||||||
|
module_function
|
||||||
|
|
||||||
|
def decode64(str)
|
||||||
|
str.unpack1("m")
|
||||||
|
end
|
||||||
|
|
||||||
|
def strict_encode64(bin)
|
||||||
|
[bin].pack("m0")
|
||||||
|
end
|
||||||
|
|
||||||
|
def urlsafe_encode64(bin, padding: true)
|
||||||
|
str = strict_encode64(bin)
|
||||||
|
str.chomp!("==") or str.chomp!("=") unless padding
|
||||||
|
str.tr!("+/", "-_")
|
||||||
|
str
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
@ -3,11 +3,17 @@
|
|||||||
require "forwardable"
|
require "forwardable"
|
||||||
|
|
||||||
module HTTPX
|
module HTTPX
|
||||||
|
# Internal class to abstract a string buffer, by wrapping a string and providing the
|
||||||
|
# minimum possible API and functionality required.
|
||||||
|
#
|
||||||
|
# buffer = Buffer.new(640)
|
||||||
|
# buffer.full? #=> false
|
||||||
|
# buffer << "aa"
|
||||||
|
# buffer.capacity #=> 638
|
||||||
|
#
|
||||||
class Buffer
|
class Buffer
|
||||||
extend Forwardable
|
extend Forwardable
|
||||||
|
|
||||||
def_delegator :@buffer, :<<
|
|
||||||
|
|
||||||
def_delegator :@buffer, :to_s
|
def_delegator :@buffer, :to_s
|
||||||
|
|
||||||
def_delegator :@buffer, :to_str
|
def_delegator :@buffer, :to_str
|
||||||
@ -22,11 +28,24 @@ module HTTPX
|
|||||||
|
|
||||||
attr_reader :limit
|
attr_reader :limit
|
||||||
|
|
||||||
|
if RUBY_VERSION >= "3.4.0"
|
||||||
|
def initialize(limit)
|
||||||
|
@buffer = String.new("", encoding: Encoding::BINARY, capacity: limit)
|
||||||
|
@limit = limit
|
||||||
|
end
|
||||||
|
|
||||||
|
def <<(chunk)
|
||||||
|
@buffer.append_as_bytes(chunk)
|
||||||
|
end
|
||||||
|
else
|
||||||
def initialize(limit)
|
def initialize(limit)
|
||||||
@buffer = "".b
|
@buffer = "".b
|
||||||
@limit = limit
|
@limit = limit
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def_delegator :@buffer, :<<
|
||||||
|
end
|
||||||
|
|
||||||
def full?
|
def full?
|
||||||
@buffer.bytesize >= @limit
|
@buffer.bytesize >= @limit
|
||||||
end
|
end
|
||||||
|
@ -4,6 +4,7 @@ module HTTPX
|
|||||||
module Callbacks
|
module Callbacks
|
||||||
def on(type, &action)
|
def on(type, &action)
|
||||||
callbacks(type) << action
|
callbacks(type) << action
|
||||||
|
action
|
||||||
end
|
end
|
||||||
|
|
||||||
def once(type, &block)
|
def once(type, &block)
|
||||||
@ -13,17 +14,13 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def only(type, &block)
|
|
||||||
callbacks(type).clear
|
|
||||||
on(type, &block)
|
|
||||||
end
|
|
||||||
|
|
||||||
def emit(type, *args)
|
def emit(type, *args)
|
||||||
|
log { "emit #{type.inspect} callbacks" } if respond_to?(:log)
|
||||||
callbacks(type).delete_if { |pr| :delete == pr.call(*args) } # rubocop:disable Style/YodaCondition
|
callbacks(type).delete_if { |pr| :delete == pr.call(*args) } # rubocop:disable Style/YodaCondition
|
||||||
end
|
end
|
||||||
|
|
||||||
def callbacks_for?(type)
|
def callbacks_for?(type)
|
||||||
@callbacks.key?(type) && @callbacks[type].any?
|
@callbacks && @callbacks.key?(type) && @callbacks[type].any?
|
||||||
end
|
end
|
||||||
|
|
||||||
protected
|
protected
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module HTTPX
|
module HTTPX
|
||||||
|
# Session mixin, implements most of the APIs that the users call.
|
||||||
|
# delegates to a default session when extended.
|
||||||
module Chainable
|
module Chainable
|
||||||
%w[head get post put delete trace options connect patch].each do |meth|
|
%w[head get post put delete trace options connect patch].each do |meth|
|
||||||
class_eval(<<-MOD, __FILE__, __LINE__ + 1)
|
class_eval(<<-MOD, __FILE__, __LINE__ + 1)
|
||||||
@ -10,80 +12,95 @@ module HTTPX
|
|||||||
MOD
|
MOD
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# delegates to the default session (see HTTPX::Session#request).
|
||||||
def request(*args, **options)
|
def request(*args, **options)
|
||||||
branch(default_options).request(*args, **options)
|
branch(default_options).request(*args, **options)
|
||||||
end
|
end
|
||||||
|
|
||||||
# :nocov:
|
|
||||||
def timeout(**args)
|
|
||||||
warn ":#{__method__} is deprecated, use :with_timeout instead"
|
|
||||||
with(timeout: args)
|
|
||||||
end
|
|
||||||
|
|
||||||
def headers(headers)
|
|
||||||
warn ":#{__method__} is deprecated, use :with_headers instead"
|
|
||||||
with(headers: headers)
|
|
||||||
end
|
|
||||||
# :nocov:
|
|
||||||
|
|
||||||
def accept(type)
|
def accept(type)
|
||||||
with(headers: { "accept" => String(type) })
|
with(headers: { "accept" => String(type) })
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# delegates to the default session (see HTTPX::Session#wrap).
|
||||||
def wrap(&blk)
|
def wrap(&blk)
|
||||||
branch(default_options).wrap(&blk)
|
branch(default_options).wrap(&blk)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# returns a new instance loaded with the +pl+ plugin and +options+.
|
||||||
def plugin(pl, options = nil, &blk)
|
def plugin(pl, options = nil, &blk)
|
||||||
klass = is_a?(Session) ? self.class : Session
|
klass = is_a?(S) ? self.class : Session
|
||||||
klass = Class.new(klass)
|
klass = Class.new(klass)
|
||||||
klass.instance_variable_set(:@default_options, klass.default_options.merge(default_options))
|
klass.instance_variable_set(:@default_options, klass.default_options.merge(default_options))
|
||||||
klass.plugin(pl, options, &blk).new
|
klass.plugin(pl, options, &blk).new
|
||||||
end
|
end
|
||||||
|
|
||||||
# deprecated
|
# returns a new instance loaded with +options+.
|
||||||
# :nocov:
|
|
||||||
def plugins(pls)
|
|
||||||
warn ":#{__method__} is deprecated, use :plugin instead"
|
|
||||||
klass = is_a?(Session) ? self.class : Session
|
|
||||||
klass = Class.new(klass)
|
|
||||||
klass.instance_variable_set(:@default_options, klass.default_options.merge(default_options))
|
|
||||||
klass.plugins(pls).new
|
|
||||||
end
|
|
||||||
# :nocov:
|
|
||||||
|
|
||||||
def with(options, &blk)
|
def with(options, &blk)
|
||||||
branch(default_options.merge(options), &blk)
|
branch(default_options.merge(options), &blk)
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
|
# returns default instance of HTTPX::Options.
|
||||||
def default_options
|
def default_options
|
||||||
@options || Session.default_options
|
@options || Session.default_options
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# returns a default instance of HTTPX::Session.
|
||||||
def branch(options, &blk)
|
def branch(options, &blk)
|
||||||
return self.class.new(options, &blk) if is_a?(Session)
|
return self.class.new(options, &blk) if is_a?(S)
|
||||||
|
|
||||||
Session.new(options, &blk)
|
Session.new(options, &blk)
|
||||||
end
|
end
|
||||||
|
|
||||||
def method_missing(meth, *args, **options)
|
def method_missing(meth, *args, **options, &blk)
|
||||||
return super unless meth =~ /\Awith_(.+)/
|
case meth
|
||||||
|
when /\Awith_(.+)/
|
||||||
|
|
||||||
option = Regexp.last_match(1)
|
option = Regexp.last_match(1)
|
||||||
|
|
||||||
return super unless option
|
return super unless option
|
||||||
|
|
||||||
with(option.to_sym => (args.first || options))
|
with(option.to_sym => args.first || options)
|
||||||
|
when /\Aon_(.+)/
|
||||||
|
callback = Regexp.last_match(1)
|
||||||
|
|
||||||
|
return super unless %w[
|
||||||
|
connection_opened connection_closed
|
||||||
|
request_error
|
||||||
|
request_started request_body_chunk request_completed
|
||||||
|
response_started response_body_chunk response_completed
|
||||||
|
].include?(callback)
|
||||||
|
|
||||||
|
warn "DEPRECATION WARNING: calling `.#{meth}` on plain HTTPX sessions is deprecated. " \
|
||||||
|
"Use `HTTPX.plugin(:callbacks).#{meth}` instead."
|
||||||
|
|
||||||
|
plugin(:callbacks).__send__(meth, *args, **options, &blk)
|
||||||
|
else
|
||||||
|
super
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def respond_to_missing?(meth, *)
|
def respond_to_missing?(meth, *)
|
||||||
return super unless meth =~ /\Awith_(.+)/
|
case meth
|
||||||
|
when /\Awith_(.+)/
|
||||||
option = Regexp.last_match(1)
|
option = Regexp.last_match(1)
|
||||||
|
|
||||||
default_options.respond_to?(option) || super
|
default_options.respond_to?(option) || super
|
||||||
|
when /\Aon_(.+)/
|
||||||
|
callback = Regexp.last_match(1)
|
||||||
|
|
||||||
|
%w[
|
||||||
|
connection_opened connection_closed
|
||||||
|
request_error
|
||||||
|
request_started request_body_chunk request_completed
|
||||||
|
response_started response_body_chunk response_completed
|
||||||
|
].include?(callback) || super
|
||||||
|
else
|
||||||
|
super
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
extend Chainable
|
||||||
|
end
|
||||||
|
@ -33,7 +33,6 @@ module HTTPX
|
|||||||
include Callbacks
|
include Callbacks
|
||||||
|
|
||||||
using URIExtensions
|
using URIExtensions
|
||||||
using NumericExtensions
|
|
||||||
|
|
||||||
require "httpx/connection/http2"
|
require "httpx/connection/http2"
|
||||||
require "httpx/connection/http1"
|
require "httpx/connection/http1"
|
||||||
@ -42,21 +41,33 @@ module HTTPX
|
|||||||
|
|
||||||
def_delegator :@write_buffer, :empty?
|
def_delegator :@write_buffer, :empty?
|
||||||
|
|
||||||
attr_reader :type, :io, :origin, :origins, :state, :pending, :options
|
attr_reader :type, :io, :origin, :origins, :state, :pending, :options, :ssl_session, :sibling
|
||||||
|
|
||||||
attr_writer :timers
|
attr_writer :current_selector
|
||||||
|
|
||||||
attr_accessor :family
|
attr_accessor :current_session, :family
|
||||||
|
|
||||||
def initialize(type, uri, options)
|
protected :sibling
|
||||||
@type = type
|
|
||||||
|
def initialize(uri, options)
|
||||||
|
@current_session = @current_selector =
|
||||||
|
@parser = @sibling = @coalesced_connection =
|
||||||
|
@io = @ssl_session = @timeout =
|
||||||
|
@connected_at = @response_received_at = nil
|
||||||
|
|
||||||
|
@exhausted = @cloned = @main_sibling = false
|
||||||
|
|
||||||
|
@options = Options.new(options)
|
||||||
|
@type = initialize_type(uri, @options)
|
||||||
@origins = [uri.origin]
|
@origins = [uri.origin]
|
||||||
@origin = Utils.to_uri(uri.origin)
|
@origin = Utils.to_uri(uri.origin)
|
||||||
@options = Options.new(options)
|
|
||||||
@window_size = @options.window_size
|
@window_size = @options.window_size
|
||||||
@read_buffer = Buffer.new(@options.buffer_size)
|
@read_buffer = Buffer.new(@options.buffer_size)
|
||||||
@write_buffer = Buffer.new(@options.buffer_size)
|
@write_buffer = Buffer.new(@options.buffer_size)
|
||||||
@pending = []
|
@pending = []
|
||||||
|
@inflight = 0
|
||||||
|
@keep_alive_timeout = @options.timeout[:keep_alive_timeout]
|
||||||
|
|
||||||
on(:error, &method(:on_error))
|
on(:error, &method(:on_error))
|
||||||
if @options.io
|
if @options.io
|
||||||
# if there's an already open IO, get its
|
# if there's an already open IO, get its
|
||||||
@ -67,14 +78,39 @@ module HTTPX
|
|||||||
else
|
else
|
||||||
transition(:idle)
|
transition(:idle)
|
||||||
end
|
end
|
||||||
|
on(:close) do
|
||||||
|
next if @exhausted # it'll reset
|
||||||
|
|
||||||
@inflight = 0
|
# may be called after ":close" above, so after the connection has been checked back in.
|
||||||
@keep_alive_timeout = @options.timeout[:keep_alive_timeout]
|
# next unless @current_session
|
||||||
@total_timeout = @options.timeout[:total_timeout]
|
|
||||||
|
next unless @current_session
|
||||||
|
|
||||||
|
@current_session.deselect_connection(self, @current_selector, @cloned)
|
||||||
|
end
|
||||||
|
on(:terminate) do
|
||||||
|
next if @exhausted # it'll reset
|
||||||
|
|
||||||
|
current_session = @current_session
|
||||||
|
current_selector = @current_selector
|
||||||
|
|
||||||
|
# may be called after ":close" above, so after the connection has been checked back in.
|
||||||
|
next unless current_session && current_selector
|
||||||
|
|
||||||
|
current_session.deselect_connection(self, current_selector)
|
||||||
|
end
|
||||||
|
|
||||||
|
on(:altsvc) do |alt_origin, origin, alt_params|
|
||||||
|
build_altsvc_connection(alt_origin, origin, alt_params)
|
||||||
|
end
|
||||||
|
|
||||||
self.addresses = @options.addresses if @options.addresses
|
self.addresses = @options.addresses if @options.addresses
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def peer
|
||||||
|
@origin
|
||||||
|
end
|
||||||
|
|
||||||
# this is a semi-private method, to be used by the resolver
|
# this is a semi-private method, to be used by the resolver
|
||||||
# to initiate the io object.
|
# to initiate the io object.
|
||||||
def addresses=(addrs)
|
def addresses=(addrs)
|
||||||
@ -90,11 +126,8 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
|
|
||||||
def match?(uri, options)
|
def match?(uri, options)
|
||||||
return false if @state == :closing || @state == :closed
|
return false if !used? && (@state == :closing || @state == :closed)
|
||||||
|
|
||||||
return false if exhausted?
|
|
||||||
|
|
||||||
(
|
|
||||||
(
|
(
|
||||||
@origins.include?(uri.origin) &&
|
@origins.include?(uri.origin) &&
|
||||||
# if there is more than one origin to match, it means that this connection
|
# if there is more than one origin to match, it means that this connection
|
||||||
@ -103,14 +136,17 @@ module HTTPX
|
|||||||
# SSL certificate
|
# SSL certificate
|
||||||
(@origins.size == 1 || @origin == uri.origin || (@io.is_a?(SSL) && @io.verify_hostname(uri.host)))
|
(@origins.size == 1 || @origin == uri.origin || (@io.is_a?(SSL) && @io.verify_hostname(uri.host)))
|
||||||
) && @options == options
|
) && @options == options
|
||||||
) || (match_altsvcs?(uri) && match_altsvc_options?(uri, options))
|
end
|
||||||
|
|
||||||
|
def expired?
|
||||||
|
return false unless @io
|
||||||
|
|
||||||
|
@io.expired?
|
||||||
end
|
end
|
||||||
|
|
||||||
def mergeable?(connection)
|
def mergeable?(connection)
|
||||||
return false if @state == :closing || @state == :closed || !@io
|
return false if @state == :closing || @state == :closed || !@io
|
||||||
|
|
||||||
return false if exhausted?
|
|
||||||
|
|
||||||
return false unless connection.addresses
|
return false unless connection.addresses
|
||||||
|
|
||||||
(
|
(
|
||||||
@ -119,6 +155,14 @@ module HTTPX
|
|||||||
) && @options == connection.options
|
) && @options == connection.options
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# coalesces +self+ into +connection+.
|
||||||
|
def coalesce!(connection)
|
||||||
|
@coalesced_connection = connection
|
||||||
|
|
||||||
|
close_sibling
|
||||||
|
connection.merge(self)
|
||||||
|
end
|
||||||
|
|
||||||
# coalescable connections need to be mergeable!
|
# coalescable connections need to be mergeable!
|
||||||
# but internally, #mergeable? is called before #coalescable?
|
# but internally, #mergeable? is called before #coalescable?
|
||||||
def coalescable?(connection)
|
def coalescable?(connection)
|
||||||
@ -133,11 +177,17 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
|
|
||||||
def create_idle(options = {})
|
def create_idle(options = {})
|
||||||
self.class.new(@type, @origin, @options.merge(options))
|
self.class.new(@origin, @options.merge(options))
|
||||||
end
|
end
|
||||||
|
|
||||||
def merge(connection)
|
def merge(connection)
|
||||||
@origins |= connection.instance_variable_get(:@origins)
|
@origins |= connection.instance_variable_get(:@origins)
|
||||||
|
if connection.ssl_session
|
||||||
|
@ssl_session = connection.ssl_session
|
||||||
|
@io.session_new_cb do |sess|
|
||||||
|
@ssl_session = sess
|
||||||
|
end if @io
|
||||||
|
end
|
||||||
connection.purge_pending do |req|
|
connection.purge_pending do |req|
|
||||||
send(req)
|
send(req)
|
||||||
end
|
end
|
||||||
@ -155,22 +205,10 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# checks if this is connection is an alternative service of
|
def io_connected?
|
||||||
# +uri+
|
return @coalesced_connection.io_connected? if @coalesced_connection
|
||||||
def match_altsvcs?(uri)
|
|
||||||
@origins.any? { |origin| uri.altsvc_match?(origin) } ||
|
|
||||||
AltSvc.cached_altsvc(@origin).any? do |altsvc|
|
|
||||||
origin = altsvc["origin"]
|
|
||||||
origin.altsvc_match?(uri.origin)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def match_altsvc_options?(uri, options)
|
@io && @io.state == :connected
|
||||||
return @options == options unless @options.ssl[:hostname] == uri.host
|
|
||||||
|
|
||||||
dup_options = @options.merge(ssl: { hostname: nil })
|
|
||||||
dup_options.ssl.delete(:hostname)
|
|
||||||
dup_options == options
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def connecting?
|
def connecting?
|
||||||
@ -178,7 +216,12 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
|
|
||||||
def inflight?
|
def inflight?
|
||||||
@parser && !@parser.empty? && !@write_buffer.empty?
|
@parser && (
|
||||||
|
# parser may be dealing with other requests (possibly started from a different fiber)
|
||||||
|
!@parser.empty? ||
|
||||||
|
# connection may be doing connection termination handshake
|
||||||
|
!@write_buffer.empty?
|
||||||
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
def interests
|
def interests
|
||||||
@ -194,6 +237,9 @@ module HTTPX
|
|||||||
|
|
||||||
return @parser.interests if @parser
|
return @parser.interests if @parser
|
||||||
|
|
||||||
|
nil
|
||||||
|
rescue StandardError => e
|
||||||
|
emit(:error, e)
|
||||||
nil
|
nil
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -203,16 +249,22 @@ module HTTPX
|
|||||||
|
|
||||||
def call
|
def call
|
||||||
case @state
|
case @state
|
||||||
|
when :idle
|
||||||
|
connect
|
||||||
|
consume
|
||||||
when :closed
|
when :closed
|
||||||
return
|
return
|
||||||
when :closing
|
when :closing
|
||||||
consume
|
consume
|
||||||
transition(:closed)
|
transition(:closed)
|
||||||
emit(:close)
|
|
||||||
when :open
|
when :open
|
||||||
consume
|
consume
|
||||||
end
|
end
|
||||||
nil
|
nil
|
||||||
|
rescue StandardError => e
|
||||||
|
@write_buffer.clear
|
||||||
|
emit(:error, e)
|
||||||
|
raise e
|
||||||
end
|
end
|
||||||
|
|
||||||
def close
|
def close
|
||||||
@ -221,24 +273,38 @@ module HTTPX
|
|||||||
@parser.close if @parser
|
@parser.close if @parser
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def terminate
|
||||||
|
case @state
|
||||||
|
when :idle
|
||||||
|
purge_after_closed
|
||||||
|
emit(:terminate)
|
||||||
|
when :closed
|
||||||
|
@connected_at = nil
|
||||||
|
end
|
||||||
|
|
||||||
|
close
|
||||||
|
end
|
||||||
|
|
||||||
# bypasses the state machine to force closing of connections still connecting.
|
# bypasses the state machine to force closing of connections still connecting.
|
||||||
# **only** used for Happy Eyeballs v2.
|
# **only** used for Happy Eyeballs v2.
|
||||||
def force_reset
|
def force_reset(cloned = false)
|
||||||
@state = :closing
|
@state = :closing
|
||||||
|
@cloned = cloned
|
||||||
transition(:closed)
|
transition(:closed)
|
||||||
emit(:close)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def reset
|
def reset
|
||||||
|
return if @state == :closing || @state == :closed
|
||||||
|
|
||||||
transition(:closing)
|
transition(:closing)
|
||||||
|
|
||||||
transition(:closed)
|
transition(:closed)
|
||||||
emit(:close)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def send(request)
|
def send(request)
|
||||||
if @parser && !@write_buffer.full?
|
return @coalesced_connection.send(request) if @coalesced_connection
|
||||||
request.headers["alt-used"] = @origin.authority if match_altsvcs?(request.uri)
|
|
||||||
|
|
||||||
|
if @parser && !@write_buffer.full?
|
||||||
if @response_received_at && @keep_alive_timeout &&
|
if @response_received_at && @keep_alive_timeout &&
|
||||||
Utils.elapsed_time(@response_received_at) > @keep_alive_timeout
|
Utils.elapsed_time(@response_received_at) > @keep_alive_timeout
|
||||||
# when pushing a request into an existing connection, we have to check whether there
|
# when pushing a request into an existing connection, we have to check whether there
|
||||||
@ -246,8 +312,9 @@ module HTTPX
|
|||||||
# for such cases, we want to ping for availability before deciding to shovel requests.
|
# for such cases, we want to ping for availability before deciding to shovel requests.
|
||||||
log(level: 3) { "keep alive timeout expired, pinging connection..." }
|
log(level: 3) { "keep alive timeout expired, pinging connection..." }
|
||||||
@pending << request
|
@pending << request
|
||||||
parser.ping
|
|
||||||
transition(:active) if @state == :inactive
|
transition(:active) if @state == :inactive
|
||||||
|
parser.ping
|
||||||
|
request.ping!
|
||||||
return
|
return
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -258,28 +325,26 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
|
|
||||||
def timeout
|
def timeout
|
||||||
if @total_timeout
|
return if @state == :closed || @state == :inactive
|
||||||
return @total_timeout unless @connected_at
|
|
||||||
|
|
||||||
elapsed_time = @total_timeout - Utils.elapsed_time(@connected_at)
|
return @timeout if @timeout
|
||||||
|
|
||||||
if elapsed_time.negative?
|
|
||||||
ex = TotalTimeoutError.new(@total_timeout, "Timed out after #{@total_timeout} seconds")
|
|
||||||
ex.set_backtrace(caller)
|
|
||||||
on_error(ex)
|
|
||||||
return
|
|
||||||
end
|
|
||||||
|
|
||||||
return elapsed_time
|
|
||||||
end
|
|
||||||
|
|
||||||
return @timeout if defined?(@timeout)
|
|
||||||
|
|
||||||
return @options.timeout[:connect_timeout] if @state == :idle
|
return @options.timeout[:connect_timeout] if @state == :idle
|
||||||
|
|
||||||
@options.timeout[:operation_timeout]
|
@options.timeout[:operation_timeout]
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def idling
|
||||||
|
purge_after_closed
|
||||||
|
@write_buffer.clear
|
||||||
|
transition(:idle)
|
||||||
|
@parser = nil if @parser
|
||||||
|
end
|
||||||
|
|
||||||
|
def used?
|
||||||
|
@connected_at
|
||||||
|
end
|
||||||
|
|
||||||
def deactivate
|
def deactivate
|
||||||
transition(:inactive)
|
transition(:inactive)
|
||||||
end
|
end
|
||||||
@ -288,28 +353,65 @@ module HTTPX
|
|||||||
@state == :open || @state == :inactive
|
@state == :open || @state == :inactive
|
||||||
end
|
end
|
||||||
|
|
||||||
def raise_timeout_error(interval)
|
def handle_socket_timeout(interval)
|
||||||
error = HTTPX::TimeoutError.new(interval, "timed out while waiting on select")
|
error = OperationTimeoutError.new(interval, "timed out while waiting on select")
|
||||||
error.set_backtrace(caller)
|
error.set_backtrace(caller)
|
||||||
on_error(error)
|
on_error(error)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def sibling=(connection)
|
||||||
|
@sibling = connection
|
||||||
|
|
||||||
|
return unless connection
|
||||||
|
|
||||||
|
@main_sibling = connection.sibling.nil?
|
||||||
|
|
||||||
|
return unless @main_sibling
|
||||||
|
|
||||||
|
connection.sibling = self
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_connect_error(error)
|
||||||
|
return handle_error(error) unless @sibling && @sibling.connecting?
|
||||||
|
|
||||||
|
@sibling.merge(self)
|
||||||
|
|
||||||
|
force_reset(true)
|
||||||
|
end
|
||||||
|
|
||||||
|
def disconnect
|
||||||
|
return unless @current_session && @current_selector
|
||||||
|
|
||||||
|
emit(:close)
|
||||||
|
@current_session = nil
|
||||||
|
@current_selector = nil
|
||||||
|
end
|
||||||
|
|
||||||
|
# :nocov:
|
||||||
|
def inspect
|
||||||
|
"#<#{self.class}:#{object_id} " \
|
||||||
|
"@origin=#{@origin} " \
|
||||||
|
"@state=#{@state} " \
|
||||||
|
"@pending=#{@pending.size} " \
|
||||||
|
"@io=#{@io}>"
|
||||||
|
end
|
||||||
|
# :nocov:
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
def connect
|
def connect
|
||||||
transition(:open)
|
transition(:open)
|
||||||
end
|
end
|
||||||
|
|
||||||
def exhausted?
|
|
||||||
@parser && parser.exhausted?
|
|
||||||
end
|
|
||||||
|
|
||||||
def consume
|
def consume
|
||||||
return unless @io
|
return unless @io
|
||||||
|
|
||||||
catch(:called) do
|
catch(:called) do
|
||||||
epiped = false
|
epiped = false
|
||||||
loop do
|
loop do
|
||||||
|
# connection may have
|
||||||
|
return if @state == :idle
|
||||||
|
|
||||||
parser.consume
|
parser.consume
|
||||||
|
|
||||||
# we exit if there's no more requests to process
|
# we exit if there's no more requests to process
|
||||||
@ -339,8 +441,10 @@ module HTTPX
|
|||||||
#
|
#
|
||||||
loop do
|
loop do
|
||||||
siz = @io.read(@window_size, @read_buffer)
|
siz = @io.read(@window_size, @read_buffer)
|
||||||
log(level: 3, color: :cyan) { "IO READ: #{siz} bytes..." }
|
log(level: 3, color: :cyan) { "IO READ: #{siz} bytes... (wsize: #{@window_size}, rbuffer: #{@read_buffer.bytesize})" }
|
||||||
unless siz
|
unless siz
|
||||||
|
@write_buffer.clear
|
||||||
|
|
||||||
ex = EOFError.new("descriptor closed")
|
ex = EOFError.new("descriptor closed")
|
||||||
ex.set_backtrace(caller)
|
ex.set_backtrace(caller)
|
||||||
on_error(ex)
|
on_error(ex)
|
||||||
@ -395,6 +499,8 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
log(level: 3, color: :cyan) { "IO WRITE: #{siz} bytes..." }
|
log(level: 3, color: :cyan) { "IO WRITE: #{siz} bytes..." }
|
||||||
unless siz
|
unless siz
|
||||||
|
@write_buffer.clear
|
||||||
|
|
||||||
ex = EOFError.new("descriptor closed")
|
ex = EOFError.new("descriptor closed")
|
||||||
ex.set_backtrace(caller)
|
ex.set_backtrace(caller)
|
||||||
on_error(ex)
|
on_error(ex)
|
||||||
@ -440,17 +546,22 @@ module HTTPX
|
|||||||
|
|
||||||
def send_request_to_parser(request)
|
def send_request_to_parser(request)
|
||||||
@inflight += 1
|
@inflight += 1
|
||||||
parser.send(request)
|
request.peer_address = @io.ip
|
||||||
|
|
||||||
set_request_timeouts(request)
|
set_request_timeouts(request)
|
||||||
|
|
||||||
|
parser.send(request)
|
||||||
|
|
||||||
return unless @state == :inactive
|
return unless @state == :inactive
|
||||||
|
|
||||||
transition(:active)
|
transition(:active)
|
||||||
|
# mark request as ping, as this inactive connection may have been
|
||||||
|
# closed by the server, and we don't want that to influence retry
|
||||||
|
# bookkeeping.
|
||||||
|
request.ping!
|
||||||
end
|
end
|
||||||
|
|
||||||
def build_parser(protocol = @io.protocol)
|
def build_parser(protocol = @io.protocol)
|
||||||
parser = self.class.parser_type(protocol).new(@write_buffer, @options)
|
parser = parser_type(protocol).new(@write_buffer, @options)
|
||||||
set_parser_callbacks(parser)
|
set_parser_callbacks(parser)
|
||||||
parser
|
parser
|
||||||
end
|
end
|
||||||
@ -462,6 +573,7 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
@response_received_at = Utils.now
|
@response_received_at = Utils.now
|
||||||
@inflight -= 1
|
@inflight -= 1
|
||||||
|
response.finish!
|
||||||
request.emit(:response, response)
|
request.emit(:response, response)
|
||||||
end
|
end
|
||||||
parser.on(:altsvc) do |alt_origin, origin, alt_params|
|
parser.on(:altsvc) do |alt_origin, origin, alt_params|
|
||||||
@ -474,32 +586,49 @@ module HTTPX
|
|||||||
request.emit(:promise, parser, stream)
|
request.emit(:promise, parser, stream)
|
||||||
end
|
end
|
||||||
parser.on(:exhausted) do
|
parser.on(:exhausted) do
|
||||||
emit(:exhausted)
|
@exhausted = true
|
||||||
|
current_session = @current_session
|
||||||
|
current_selector = @current_selector
|
||||||
|
begin
|
||||||
|
parser.close
|
||||||
|
@pending.concat(parser.pending)
|
||||||
|
ensure
|
||||||
|
@current_session = current_session
|
||||||
|
@current_selector = current_selector
|
||||||
|
end
|
||||||
|
|
||||||
|
case @state
|
||||||
|
when :closed
|
||||||
|
idling
|
||||||
|
@exhausted = false
|
||||||
|
when :closing
|
||||||
|
once(:closed) do
|
||||||
|
idling
|
||||||
|
@exhausted = false
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
parser.on(:origin) do |origin|
|
parser.on(:origin) do |origin|
|
||||||
@origins |= [origin]
|
@origins |= [origin]
|
||||||
end
|
end
|
||||||
parser.on(:close) do |force|
|
parser.on(:close) do |force|
|
||||||
transition(:closing)
|
if force
|
||||||
if force || @state == :idle
|
reset
|
||||||
transition(:closed)
|
emit(:terminate)
|
||||||
emit(:close)
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
parser.on(:close_handshake) do
|
parser.on(:close_handshake) do
|
||||||
consume
|
consume
|
||||||
end
|
end
|
||||||
parser.on(:reset) do
|
parser.on(:reset) do
|
||||||
if parser.empty?
|
@pending.concat(parser.pending) unless parser.empty?
|
||||||
|
current_session = @current_session
|
||||||
|
current_selector = @current_selector
|
||||||
reset
|
reset
|
||||||
else
|
unless @pending.empty?
|
||||||
transition(:closing)
|
idling
|
||||||
transition(:closed)
|
@current_session = current_session
|
||||||
emit(:reset)
|
@current_selector = current_selector
|
||||||
|
|
||||||
@parser.reset if @parser
|
|
||||||
transition(:idle)
|
|
||||||
transition(:open)
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
parser.on(:current_timeout) do
|
parser.on(:current_timeout) do
|
||||||
@ -508,17 +637,30 @@ module HTTPX
|
|||||||
parser.on(:timeout) do |tout|
|
parser.on(:timeout) do |tout|
|
||||||
@timeout = tout
|
@timeout = tout
|
||||||
end
|
end
|
||||||
parser.on(:error) do |request, ex|
|
parser.on(:error) do |request, error|
|
||||||
case ex
|
case error
|
||||||
when MisdirectedRequestError
|
when :http_1_1_required
|
||||||
emit(:misdirected, request)
|
current_session = @current_session
|
||||||
else
|
current_selector = @current_selector
|
||||||
response = ErrorResponse.new(request, ex, @options)
|
parser.close
|
||||||
|
|
||||||
|
other_connection = current_session.find_connection(@origin, current_selector,
|
||||||
|
@options.merge(ssl: { alpn_protocols: %w[http/1.1] }))
|
||||||
|
other_connection.merge(self)
|
||||||
|
request.transition(:idle)
|
||||||
|
other_connection.send(request)
|
||||||
|
next
|
||||||
|
when OperationTimeoutError
|
||||||
|
# request level timeouts should take precedence
|
||||||
|
next unless request.active_timeouts.empty?
|
||||||
|
end
|
||||||
|
|
||||||
|
@inflight -= 1
|
||||||
|
response = ErrorResponse.new(request, error)
|
||||||
request.response = response
|
request.response = response
|
||||||
request.emit(:response, response)
|
request.emit(:response, response)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
|
||||||
|
|
||||||
def transition(nextstate)
|
def transition(nextstate)
|
||||||
handle_transition(nextstate)
|
handle_transition(nextstate)
|
||||||
@ -531,19 +673,22 @@ module HTTPX
|
|||||||
Errno::ENETUNREACH,
|
Errno::ENETUNREACH,
|
||||||
Errno::EPIPE,
|
Errno::EPIPE,
|
||||||
Errno::ENOENT,
|
Errno::ENOENT,
|
||||||
SocketError => e
|
SocketError,
|
||||||
|
IOError => e
|
||||||
# connect errors, exit gracefully
|
# connect errors, exit gracefully
|
||||||
error = ConnectionError.new(e.message)
|
error = ConnectionError.new(e.message)
|
||||||
error.set_backtrace(e.backtrace)
|
error.set_backtrace(e.backtrace)
|
||||||
connecting? && callbacks_for?(:connect_error) ? emit(:connect_error, error) : handle_error(error)
|
handle_connect_error(error) if connecting?
|
||||||
@state = :closed
|
@state = :closed
|
||||||
emit(:close)
|
purge_after_closed
|
||||||
rescue TLSError => e
|
disconnect
|
||||||
|
rescue TLSError, ::HTTP2::Error::ProtocolError, ::HTTP2::Error::HandshakeError => e
|
||||||
# connect errors, exit gracefully
|
# connect errors, exit gracefully
|
||||||
handle_error(e)
|
handle_error(e)
|
||||||
connecting? && callbacks_for?(:connect_error) ? emit(:connect_error, e) : handle_error(e)
|
handle_connect_error(e) if connecting?
|
||||||
@state = :closed
|
@state = :closed
|
||||||
emit(:close)
|
purge_after_closed
|
||||||
|
disconnect
|
||||||
end
|
end
|
||||||
|
|
||||||
def handle_transition(nextstate)
|
def handle_transition(nextstate)
|
||||||
@ -551,11 +696,12 @@ module HTTPX
|
|||||||
when :idle
|
when :idle
|
||||||
@timeout = @current_timeout = @options.timeout[:connect_timeout]
|
@timeout = @current_timeout = @options.timeout[:connect_timeout]
|
||||||
|
|
||||||
|
@connected_at = @response_received_at = nil
|
||||||
when :open
|
when :open
|
||||||
return if @state == :closed
|
return if @state == :closed
|
||||||
|
|
||||||
@io.connect
|
@io.connect
|
||||||
emit(:tcp_open, self) if @io.state == :connected
|
close_sibling if @io.state == :connected
|
||||||
|
|
||||||
return unless @io.connected?
|
return unless @io.connected?
|
||||||
|
|
||||||
@ -567,52 +713,136 @@ module HTTPX
|
|||||||
emit(:open)
|
emit(:open)
|
||||||
when :inactive
|
when :inactive
|
||||||
return unless @state == :open
|
return unless @state == :open
|
||||||
when :closing
|
|
||||||
return unless @state == :open
|
|
||||||
|
|
||||||
|
# do not deactivate connection in use
|
||||||
|
return if @inflight.positive?
|
||||||
|
when :closing
|
||||||
|
return unless @state == :idle || @state == :open
|
||||||
|
|
||||||
|
unless @write_buffer.empty?
|
||||||
|
# preset state before handshake, as error callbacks
|
||||||
|
# may take it back here.
|
||||||
|
@state = nextstate
|
||||||
|
# handshakes, try sending
|
||||||
|
consume
|
||||||
|
@write_buffer.clear
|
||||||
|
return
|
||||||
|
end
|
||||||
when :closed
|
when :closed
|
||||||
return unless @state == :closing
|
return unless @state == :closing
|
||||||
return unless @write_buffer.empty?
|
return unless @write_buffer.empty?
|
||||||
|
|
||||||
purge_after_closed
|
purge_after_closed
|
||||||
|
disconnect if @pending.empty?
|
||||||
|
|
||||||
when :already_open
|
when :already_open
|
||||||
nextstate = :open
|
nextstate = :open
|
||||||
|
# the first check for given io readiness must still use a timeout.
|
||||||
|
# connect is the reasonable choice in such a case.
|
||||||
|
@timeout = @options.timeout[:connect_timeout]
|
||||||
send_pending
|
send_pending
|
||||||
when :active
|
when :active
|
||||||
return unless @state == :inactive
|
return unless @state == :inactive
|
||||||
|
|
||||||
nextstate = :open
|
nextstate = :open
|
||||||
emit(:activate)
|
|
||||||
|
# activate
|
||||||
|
@current_session.select_connection(self, @current_selector)
|
||||||
end
|
end
|
||||||
|
log(level: 3) { "#{@state} -> #{nextstate}" }
|
||||||
@state = nextstate
|
@state = nextstate
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def close_sibling
|
||||||
|
return unless @sibling
|
||||||
|
|
||||||
|
if @sibling.io_connected?
|
||||||
|
reset
|
||||||
|
# TODO: transition connection to closed
|
||||||
|
end
|
||||||
|
|
||||||
|
unless @sibling.state == :closed
|
||||||
|
merge(@sibling) unless @main_sibling
|
||||||
|
@sibling.force_reset(true)
|
||||||
|
end
|
||||||
|
|
||||||
|
@sibling = nil
|
||||||
|
end
|
||||||
|
|
||||||
def purge_after_closed
|
def purge_after_closed
|
||||||
@io.close if @io
|
@io.close if @io
|
||||||
@read_buffer.clear
|
@read_buffer.clear
|
||||||
remove_instance_variable(:@timeout) if defined?(@timeout)
|
@timeout = nil
|
||||||
|
end
|
||||||
|
|
||||||
|
def initialize_type(uri, options)
|
||||||
|
options.transport || begin
|
||||||
|
case uri.scheme
|
||||||
|
when "http"
|
||||||
|
"tcp"
|
||||||
|
when "https"
|
||||||
|
"ssl"
|
||||||
|
else
|
||||||
|
raise UnsupportedSchemeError, "#{uri}: #{uri.scheme}: unsupported URI scheme"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# returns an HTTPX::Connection for the negotiated Alternative Service (or none).
|
||||||
|
def build_altsvc_connection(alt_origin, origin, alt_params)
|
||||||
|
# do not allow security downgrades on altsvc negotiation
|
||||||
|
return if @origin.scheme == "https" && alt_origin.scheme != "https"
|
||||||
|
|
||||||
|
altsvc = AltSvc.cached_altsvc_set(origin, alt_params.merge("origin" => alt_origin))
|
||||||
|
|
||||||
|
# altsvc already exists, somehow it wasn't advertised, probably noop
|
||||||
|
return unless altsvc
|
||||||
|
|
||||||
|
alt_options = @options.merge(ssl: @options.ssl.merge(hostname: URI(origin).host))
|
||||||
|
|
||||||
|
connection = @current_session.find_connection(alt_origin, @current_selector, alt_options)
|
||||||
|
|
||||||
|
# advertised altsvc is the same origin being used, ignore
|
||||||
|
return if connection == self
|
||||||
|
|
||||||
|
connection.extend(AltSvc::ConnectionMixin) unless connection.is_a?(AltSvc::ConnectionMixin)
|
||||||
|
|
||||||
|
log(level: 1) { "#{origin} alt-svc: #{alt_origin}" }
|
||||||
|
|
||||||
|
connection.merge(self)
|
||||||
|
terminate
|
||||||
|
rescue UnsupportedSchemeError
|
||||||
|
altsvc["noop"] = true
|
||||||
|
nil
|
||||||
end
|
end
|
||||||
|
|
||||||
def build_socket(addrs = nil)
|
def build_socket(addrs = nil)
|
||||||
transport_type = case @type
|
case @type
|
||||||
when "tcp" then TCP
|
when "tcp"
|
||||||
when "ssl" then SSL
|
TCP.new(peer, addrs, @options)
|
||||||
when "unix" then UNIX
|
when "ssl"
|
||||||
|
SSL.new(peer, addrs, @options) do |sock|
|
||||||
|
sock.ssl_session = @ssl_session
|
||||||
|
sock.session_new_cb do |sess|
|
||||||
|
@ssl_session = sess
|
||||||
|
|
||||||
|
sock.ssl_session = sess
|
||||||
|
end
|
||||||
|
end
|
||||||
|
when "unix"
|
||||||
|
path = Array(addrs).first
|
||||||
|
|
||||||
|
path = String(path) if path
|
||||||
|
|
||||||
|
UNIX.new(peer, path, @options)
|
||||||
else
|
else
|
||||||
raise Error, "unsupported transport (#{@type})"
|
raise Error, "unsupported transport (#{@type})"
|
||||||
end
|
end
|
||||||
transport_type.new(@origin, addrs, @options)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def on_error(error)
|
def on_error(error, request = nil)
|
||||||
if error.instance_of?(TimeoutError)
|
if error.is_a?(OperationTimeoutError)
|
||||||
|
|
||||||
if @total_timeout && @connected_at &&
|
|
||||||
Utils.elapsed_time(@connected_at) > @total_timeout
|
|
||||||
ex = TotalTimeoutError.new(@total_timeout, "Timed out after #{@total_timeout} seconds")
|
|
||||||
ex.set_backtrace(error.backtrace)
|
|
||||||
error = ex
|
|
||||||
else
|
|
||||||
# inactive connections do not contribute to the select loop, therefore
|
# inactive connections do not contribute to the select loop, therefore
|
||||||
# they should not fail due to such errors.
|
# they should not fail due to such errors.
|
||||||
return if @state == :inactive
|
return if @state == :inactive
|
||||||
@ -624,35 +854,62 @@ module HTTPX
|
|||||||
|
|
||||||
error = error.to_connection_error if connecting?
|
error = error.to_connection_error if connecting?
|
||||||
end
|
end
|
||||||
end
|
handle_error(error, request)
|
||||||
handle_error(error)
|
|
||||||
reset
|
reset
|
||||||
end
|
end
|
||||||
|
|
||||||
def handle_error(error)
|
def handle_error(error, request = nil)
|
||||||
parser.handle_error(error) if @parser && parser.respond_to?(:handle_error)
|
parser.handle_error(error, request) if @parser && parser.respond_to?(:handle_error)
|
||||||
while (request = @pending.shift)
|
while (req = @pending.shift)
|
||||||
response = ErrorResponse.new(request, error, request.options)
|
next if request && req == request
|
||||||
|
|
||||||
|
response = ErrorResponse.new(req, error)
|
||||||
|
req.response = response
|
||||||
|
req.emit(:response, response)
|
||||||
|
end
|
||||||
|
|
||||||
|
return unless request
|
||||||
|
|
||||||
|
@inflight -= 1
|
||||||
|
response = ErrorResponse.new(request, error)
|
||||||
request.response = response
|
request.response = response
|
||||||
request.emit(:response, response)
|
request.emit(:response, response)
|
||||||
end
|
end
|
||||||
end
|
|
||||||
|
|
||||||
def set_request_timeouts(request)
|
def set_request_timeouts(request)
|
||||||
write_timeout = request.write_timeout
|
set_request_write_timeout(request)
|
||||||
request.once(:headers) do
|
set_request_read_timeout(request)
|
||||||
@timers.after(write_timeout) { write_timeout_callback(request, write_timeout) }
|
set_request_request_timeout(request)
|
||||||
end unless write_timeout.nil? || write_timeout.infinite?
|
end
|
||||||
|
|
||||||
|
def set_request_read_timeout(request)
|
||||||
read_timeout = request.read_timeout
|
read_timeout = request.read_timeout
|
||||||
request.once(:done) do
|
|
||||||
@timers.after(read_timeout) { read_timeout_callback(request, read_timeout) }
|
|
||||||
end unless read_timeout.nil? || read_timeout.infinite?
|
|
||||||
|
|
||||||
|
return if read_timeout.nil? || read_timeout.infinite?
|
||||||
|
|
||||||
|
set_request_timeout(:read_timeout, request, read_timeout, :done, :response) do
|
||||||
|
read_timeout_callback(request, read_timeout)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def set_request_write_timeout(request)
|
||||||
|
write_timeout = request.write_timeout
|
||||||
|
|
||||||
|
return if write_timeout.nil? || write_timeout.infinite?
|
||||||
|
|
||||||
|
set_request_timeout(:write_timeout, request, write_timeout, :headers, %i[done response]) do
|
||||||
|
write_timeout_callback(request, write_timeout)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def set_request_request_timeout(request)
|
||||||
request_timeout = request.request_timeout
|
request_timeout = request.request_timeout
|
||||||
request.once(:headers) do
|
|
||||||
@timers.after(request_timeout) { read_timeout_callback(request, request_timeout, RequestTimeoutError) }
|
return if request_timeout.nil? || request_timeout.infinite?
|
||||||
end unless request_timeout.nil? || request_timeout.infinite?
|
|
||||||
|
set_request_timeout(:request_timeout, request, request_timeout, :headers, :complete) do
|
||||||
|
read_timeout_callback(request, request_timeout, RequestTimeoutError)
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def write_timeout_callback(request, write_timeout)
|
def write_timeout_callback(request, write_timeout)
|
||||||
@ -660,7 +917,8 @@ module HTTPX
|
|||||||
|
|
||||||
@write_buffer.clear
|
@write_buffer.clear
|
||||||
error = WriteTimeoutError.new(request, nil, write_timeout)
|
error = WriteTimeoutError.new(request, nil, write_timeout)
|
||||||
on_error(error)
|
|
||||||
|
on_error(error, request)
|
||||||
end
|
end
|
||||||
|
|
||||||
def read_timeout_callback(request, read_timeout, error_type = ReadTimeoutError)
|
def read_timeout_callback(request, read_timeout, error_type = ReadTimeoutError)
|
||||||
@ -670,10 +928,25 @@ module HTTPX
|
|||||||
|
|
||||||
@write_buffer.clear
|
@write_buffer.clear
|
||||||
error = error_type.new(request, request.response, read_timeout)
|
error = error_type.new(request, request.response, read_timeout)
|
||||||
on_error(error)
|
|
||||||
|
on_error(error, request)
|
||||||
|
end
|
||||||
|
|
||||||
|
def set_request_timeout(label, request, timeout, start_event, finish_events, &callback)
|
||||||
|
request.set_timeout_callback(start_event) do
|
||||||
|
timer = @current_selector.after(timeout, callback)
|
||||||
|
request.active_timeouts << label
|
||||||
|
|
||||||
|
Array(finish_events).each do |event|
|
||||||
|
# clean up request timeouts if the connection errors out
|
||||||
|
request.set_timeout_callback(event) do
|
||||||
|
timer.cancel
|
||||||
|
request.active_timeouts.delete(label)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
class << self
|
|
||||||
def parser_type(protocol)
|
def parser_type(protocol)
|
||||||
case protocol
|
case protocol
|
||||||
when "h2" then HTTP2
|
when "h2" then HTTP2
|
||||||
@ -684,4 +957,3 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
|
||||||
|
@ -7,15 +7,17 @@ module HTTPX
|
|||||||
include Callbacks
|
include Callbacks
|
||||||
include Loggable
|
include Loggable
|
||||||
|
|
||||||
MAX_REQUESTS = 100
|
MAX_REQUESTS = 200
|
||||||
CRLF = "\r\n"
|
CRLF = "\r\n"
|
||||||
|
|
||||||
attr_reader :pending, :requests
|
attr_reader :pending, :requests
|
||||||
|
|
||||||
|
attr_accessor :max_concurrent_requests
|
||||||
|
|
||||||
def initialize(buffer, options)
|
def initialize(buffer, options)
|
||||||
@options = Options.new(options)
|
@options = options
|
||||||
@max_concurrent_requests = @options.max_concurrent_requests || MAX_REQUESTS
|
@max_concurrent_requests = @options.max_concurrent_requests || MAX_REQUESTS
|
||||||
@max_requests = @options.max_requests || MAX_REQUESTS
|
@max_requests = @options.max_requests
|
||||||
@parser = Parser::HTTP1.new(self)
|
@parser = Parser::HTTP1.new(self)
|
||||||
@buffer = buffer
|
@buffer = buffer
|
||||||
@version = [1, 1]
|
@version = [1, 1]
|
||||||
@ -47,6 +49,7 @@ module HTTPX
|
|||||||
@max_requests = @options.max_requests || MAX_REQUESTS
|
@max_requests = @options.max_requests || MAX_REQUESTS
|
||||||
@parser.reset!
|
@parser.reset!
|
||||||
@handshake_completed = false
|
@handshake_completed = false
|
||||||
|
@pending.concat(@requests) unless @requests.empty?
|
||||||
end
|
end
|
||||||
|
|
||||||
def close
|
def close
|
||||||
@ -90,7 +93,7 @@ module HTTPX
|
|||||||
concurrent_requests_limit = [@max_concurrent_requests, requests_limit].min
|
concurrent_requests_limit = [@max_concurrent_requests, requests_limit].min
|
||||||
@requests.each_with_index do |request, idx|
|
@requests.each_with_index do |request, idx|
|
||||||
break if idx >= concurrent_requests_limit
|
break if idx >= concurrent_requests_limit
|
||||||
next if request.state == :done
|
next unless request.can_buffer?
|
||||||
|
|
||||||
handle(request)
|
handle(request)
|
||||||
end
|
end
|
||||||
@ -116,7 +119,7 @@ module HTTPX
|
|||||||
@parser.http_version.join("."),
|
@parser.http_version.join("."),
|
||||||
headers)
|
headers)
|
||||||
log(color: :yellow) { "-> HEADLINE: #{response.status} HTTP/#{@parser.http_version.join(".")}" }
|
log(color: :yellow) { "-> HEADLINE: #{response.status} HTTP/#{@parser.http_version.join(".")}" }
|
||||||
log(color: :yellow) { response.headers.each.map { |f, v| "-> HEADER: #{f}: #{v}" }.join("\n") }
|
log(color: :yellow) { response.headers.each.map { |f, v| "-> HEADER: #{f}: #{log_redact(v)}" }.join("\n") }
|
||||||
|
|
||||||
@request.response = response
|
@request.response = response
|
||||||
on_complete if response.finished?
|
on_complete if response.finished?
|
||||||
@ -128,38 +131,46 @@ module HTTPX
|
|||||||
response = @request.response
|
response = @request.response
|
||||||
log(level: 2) { "trailer headers received" }
|
log(level: 2) { "trailer headers received" }
|
||||||
|
|
||||||
log(color: :yellow) { h.each.map { |f, v| "-> HEADER: #{f}: #{v.join(", ")}" }.join("\n") }
|
log(color: :yellow) { h.each.map { |f, v| "-> HEADER: #{f}: #{log_redact(v.join(", "))}" }.join("\n") }
|
||||||
response.merge_headers(h)
|
response.merge_headers(h)
|
||||||
end
|
end
|
||||||
|
|
||||||
def on_data(chunk)
|
def on_data(chunk)
|
||||||
return unless @request
|
request = @request
|
||||||
|
|
||||||
|
return unless request
|
||||||
|
|
||||||
log(color: :green) { "-> DATA: #{chunk.bytesize} bytes..." }
|
log(color: :green) { "-> DATA: #{chunk.bytesize} bytes..." }
|
||||||
log(level: 2, color: :green) { "-> #{chunk.inspect}" }
|
log(level: 2, color: :green) { "-> #{log_redact(chunk.inspect)}" }
|
||||||
response = @request.response
|
response = request.response
|
||||||
|
|
||||||
response << chunk
|
response << chunk
|
||||||
|
rescue StandardError => e
|
||||||
|
error_response = ErrorResponse.new(request, e)
|
||||||
|
request.response = error_response
|
||||||
|
dispatch
|
||||||
end
|
end
|
||||||
|
|
||||||
def on_complete
|
def on_complete
|
||||||
return unless @request
|
request = @request
|
||||||
|
|
||||||
|
return unless request
|
||||||
|
|
||||||
log(level: 2) { "parsing complete" }
|
log(level: 2) { "parsing complete" }
|
||||||
dispatch
|
dispatch
|
||||||
end
|
end
|
||||||
|
|
||||||
def dispatch
|
def dispatch
|
||||||
if @request.expects?
|
request = @request
|
||||||
|
|
||||||
|
if request.expects?
|
||||||
@parser.reset!
|
@parser.reset!
|
||||||
return handle(@request)
|
return handle(request)
|
||||||
end
|
end
|
||||||
|
|
||||||
request = @request
|
|
||||||
@request = nil
|
@request = nil
|
||||||
@requests.shift
|
@requests.shift
|
||||||
response = request.response
|
response = request.response
|
||||||
response.finish!
|
|
||||||
emit(:response, request, response)
|
emit(:response, request, response)
|
||||||
|
|
||||||
if @parser.upgrade?
|
if @parser.upgrade?
|
||||||
@ -169,12 +180,23 @@ module HTTPX
|
|||||||
|
|
||||||
@parser.reset!
|
@parser.reset!
|
||||||
@max_requests -= 1
|
@max_requests -= 1
|
||||||
manage_connection(response)
|
if response.is_a?(ErrorResponse)
|
||||||
|
disable
|
||||||
send(@pending.shift) unless @pending.empty?
|
else
|
||||||
|
manage_connection(request, response)
|
||||||
end
|
end
|
||||||
|
|
||||||
def handle_error(ex)
|
if exhausted?
|
||||||
|
@pending.concat(@requests)
|
||||||
|
@requests.clear
|
||||||
|
|
||||||
|
emit(:exhausted)
|
||||||
|
else
|
||||||
|
send(@pending.shift) unless @pending.empty?
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_error(ex, request = nil)
|
||||||
if (ex.is_a?(EOFError) || ex.is_a?(TimeoutError)) && @request && @request.response &&
|
if (ex.is_a?(EOFError) || ex.is_a?(TimeoutError)) && @request && @request.response &&
|
||||||
!@request.response.headers.key?("content-length") &&
|
!@request.response.headers.key?("content-length") &&
|
||||||
!@request.response.headers.key?("transfer-encoding")
|
!@request.response.headers.key?("transfer-encoding")
|
||||||
@ -188,23 +210,28 @@ module HTTPX
|
|||||||
if @pipelining
|
if @pipelining
|
||||||
catch(:called) { disable }
|
catch(:called) { disable }
|
||||||
else
|
else
|
||||||
@requests.each do |request|
|
@requests.each do |req|
|
||||||
emit(:error, request, ex)
|
next if request && request == req
|
||||||
|
|
||||||
|
emit(:error, req, ex)
|
||||||
end
|
end
|
||||||
@pending.each do |request|
|
@pending.each do |req|
|
||||||
emit(:error, request, ex)
|
next if request && request == req
|
||||||
|
|
||||||
|
emit(:error, req, ex)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def ping
|
def ping
|
||||||
|
reset
|
||||||
emit(:reset)
|
emit(:reset)
|
||||||
emit(:exhausted)
|
emit(:exhausted)
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
def manage_connection(response)
|
def manage_connection(request, response)
|
||||||
connection = response.headers["connection"]
|
connection = response.headers["connection"]
|
||||||
case connection
|
case connection
|
||||||
when /keep-alive/i
|
when /keep-alive/i
|
||||||
@ -221,7 +248,7 @@ module HTTPX
|
|||||||
return unless keep_alive
|
return unless keep_alive
|
||||||
|
|
||||||
parameters = Hash[keep_alive.split(/ *, */).map do |pair|
|
parameters = Hash[keep_alive.split(/ *, */).map do |pair|
|
||||||
pair.split(/ *= */)
|
pair.split(/ *= */, 2)
|
||||||
end]
|
end]
|
||||||
@max_requests = parameters["max"].to_i - 1 if parameters.key?("max")
|
@max_requests = parameters["max"].to_i - 1 if parameters.key?("max")
|
||||||
|
|
||||||
@ -234,7 +261,7 @@ module HTTPX
|
|||||||
disable
|
disable
|
||||||
when nil
|
when nil
|
||||||
# In HTTP/1.1, it's keep alive by default
|
# In HTTP/1.1, it's keep alive by default
|
||||||
return if response.version == "1.1"
|
return if response.version == "1.1" && request.headers["connection"] != "close"
|
||||||
|
|
||||||
disable
|
disable
|
||||||
end
|
end
|
||||||
@ -242,6 +269,7 @@ module HTTPX
|
|||||||
|
|
||||||
def disable
|
def disable
|
||||||
disable_pipelining
|
disable_pipelining
|
||||||
|
reset
|
||||||
emit(:reset)
|
emit(:reset)
|
||||||
throw(:called)
|
throw(:called)
|
||||||
end
|
end
|
||||||
@ -272,9 +300,10 @@ module HTTPX
|
|||||||
request.body.chunk!
|
request.body.chunk!
|
||||||
end
|
end
|
||||||
|
|
||||||
connection = request.headers["connection"]
|
extra_headers = {}
|
||||||
|
|
||||||
connection ||= if request.options.persistent
|
unless request.headers.key?("connection")
|
||||||
|
connection_value = if request.persistent?
|
||||||
# when in a persistent connection, the request can't be at
|
# when in a persistent connection, the request can't be at
|
||||||
# the edge of a renegotiation
|
# the edge of a renegotiation
|
||||||
if @requests.index(request) + 1 < @max_requests
|
if @requests.index(request) + 1 < @max_requests
|
||||||
@ -294,7 +323,8 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
extra_headers = { "connection" => connection }
|
extra_headers["connection"] = connection_value
|
||||||
|
end
|
||||||
extra_headers["host"] = request.authority unless request.headers.key?("host")
|
extra_headers["host"] = request.authority unless request.headers.key?("host")
|
||||||
extra_headers
|
extra_headers
|
||||||
end
|
end
|
||||||
@ -331,7 +361,7 @@ module HTTPX
|
|||||||
|
|
||||||
while (chunk = request.drain_body)
|
while (chunk = request.drain_body)
|
||||||
log(color: :green) { "<- DATA: #{chunk.bytesize} bytes..." }
|
log(color: :green) { "<- DATA: #{chunk.bytesize} bytes..." }
|
||||||
log(level: 2, color: :green) { "<- #{chunk.inspect}" }
|
log(level: 2, color: :green) { "<- #{log_redact(chunk.inspect)}" }
|
||||||
@buffer << chunk
|
@buffer << chunk
|
||||||
throw(:buffer_full, request) if @buffer.full?
|
throw(:buffer_full, request) if @buffer.full?
|
||||||
end
|
end
|
||||||
@ -350,18 +380,17 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
|
|
||||||
def join_headers2(headers)
|
def join_headers2(headers)
|
||||||
buffer = "".b
|
|
||||||
headers.each do |field, value|
|
headers.each do |field, value|
|
||||||
buffer << "#{capitalized(field)}: #{value}" << CRLF
|
field = capitalized(field)
|
||||||
log(color: :yellow) { "<- HEADER: #{buffer.chomp}" }
|
log(color: :yellow) { "<- HEADER: #{[field, log_redact(value)].join(": ")}" }
|
||||||
@buffer << buffer
|
@buffer << "#{field}: #{value}#{CRLF}"
|
||||||
buffer.clear
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
UPCASED = {
|
UPCASED = {
|
||||||
"www-authenticate" => "WWW-Authenticate",
|
"www-authenticate" => "WWW-Authenticate",
|
||||||
"http2-settings" => "HTTP2-Settings",
|
"http2-settings" => "HTTP2-Settings",
|
||||||
|
"content-md5" => "Content-MD5",
|
||||||
}.freeze
|
}.freeze
|
||||||
|
|
||||||
def capitalized(field)
|
def capitalized(field)
|
||||||
|
@ -1,18 +1,24 @@
|
|||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
require "securerandom"
|
require "securerandom"
|
||||||
require "http/2/next"
|
require "http/2"
|
||||||
|
|
||||||
module HTTPX
|
module HTTPX
|
||||||
class Connection::HTTP2
|
class Connection::HTTP2
|
||||||
include Callbacks
|
include Callbacks
|
||||||
include Loggable
|
include Loggable
|
||||||
|
|
||||||
MAX_CONCURRENT_REQUESTS = HTTP2Next::DEFAULT_MAX_CONCURRENT_STREAMS
|
MAX_CONCURRENT_REQUESTS = ::HTTP2::DEFAULT_MAX_CONCURRENT_STREAMS
|
||||||
|
|
||||||
class Error < Error
|
class Error < Error
|
||||||
def initialize(id, code)
|
def initialize(id, error)
|
||||||
super("stream #{id} closed with error: #{code}")
|
super("stream #{id} closed with error: #{error}")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
class PingError < Error
|
||||||
|
def initialize
|
||||||
|
super(0, :ping_error)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -25,7 +31,7 @@ module HTTPX
|
|||||||
attr_reader :streams, :pending
|
attr_reader :streams, :pending
|
||||||
|
|
||||||
def initialize(buffer, options)
|
def initialize(buffer, options)
|
||||||
@options = Options.new(options)
|
@options = options
|
||||||
@settings = @options.http2_settings
|
@settings = @options.http2_settings
|
||||||
@pending = []
|
@pending = []
|
||||||
@streams = {}
|
@streams = {}
|
||||||
@ -35,7 +41,7 @@ module HTTPX
|
|||||||
@handshake_completed = false
|
@handshake_completed = false
|
||||||
@wait_for_handshake = @settings.key?(:wait_for_handshake) ? @settings.delete(:wait_for_handshake) : true
|
@wait_for_handshake = @settings.key?(:wait_for_handshake) ? @settings.delete(:wait_for_handshake) : true
|
||||||
@max_concurrent_requests = @options.max_concurrent_requests || MAX_CONCURRENT_REQUESTS
|
@max_concurrent_requests = @options.max_concurrent_requests || MAX_CONCURRENT_REQUESTS
|
||||||
@max_requests = @options.max_requests || 0
|
@max_requests = @options.max_requests
|
||||||
init_connection
|
init_connection
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -52,10 +58,12 @@ module HTTPX
|
|||||||
if @connection.state == :closed
|
if @connection.state == :closed
|
||||||
return unless @handshake_completed
|
return unless @handshake_completed
|
||||||
|
|
||||||
|
return if @buffer.empty?
|
||||||
|
|
||||||
return :w
|
return :w
|
||||||
end
|
end
|
||||||
|
|
||||||
unless (@connection.state == :connected && @handshake_completed)
|
unless @connection.state == :connected && @handshake_completed
|
||||||
return @buffer.empty? ? :r : :rw
|
return @buffer.empty? ? :r : :rw
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -73,8 +81,11 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
|
|
||||||
def close
|
def close
|
||||||
@connection.goaway unless @connection.state == :closed
|
unless @connection.state == :closed
|
||||||
emit(:close)
|
@connection.goaway
|
||||||
|
emit(:timeout, @options.timeout[:close_handshake_timeout])
|
||||||
|
end
|
||||||
|
emit(:close, true)
|
||||||
end
|
end
|
||||||
|
|
||||||
def empty?
|
def empty?
|
||||||
@ -82,29 +93,17 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
|
|
||||||
def exhausted?
|
def exhausted?
|
||||||
return false if @max_requests.zero? && @connection.active_stream_count.zero?
|
!@max_requests.positive?
|
||||||
|
|
||||||
@connection.active_stream_count >= @max_requests
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def <<(data)
|
def <<(data)
|
||||||
@connection << data
|
@connection << data
|
||||||
end
|
end
|
||||||
|
|
||||||
def can_buffer_more_requests?
|
def send(request, head = false)
|
||||||
if @handshake_completed
|
|
||||||
@streams.size < @max_concurrent_requests &&
|
|
||||||
@streams.size < @max_requests
|
|
||||||
else
|
|
||||||
!@wait_for_handshake &&
|
|
||||||
@streams.size < @max_concurrent_requests
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def send(request)
|
|
||||||
unless can_buffer_more_requests?
|
unless can_buffer_more_requests?
|
||||||
@pending << request
|
head ? @pending.unshift(request) : @pending << request
|
||||||
return
|
return false
|
||||||
end
|
end
|
||||||
unless (stream = @streams[request])
|
unless (stream = @streams[request])
|
||||||
stream = @connection.new_stream
|
stream = @connection.new_stream
|
||||||
@ -114,47 +113,57 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
handle(request, stream)
|
handle(request, stream)
|
||||||
true
|
true
|
||||||
rescue HTTP2Next::Error::StreamLimitExceeded
|
rescue ::HTTP2::Error::StreamLimitExceeded
|
||||||
@pending.unshift(request)
|
@pending.unshift(request)
|
||||||
emit(:exhausted)
|
false
|
||||||
end
|
end
|
||||||
|
|
||||||
def consume
|
def consume
|
||||||
@streams.each do |request, stream|
|
@streams.each do |request, stream|
|
||||||
next if request.state == :done
|
next unless request.can_buffer?
|
||||||
|
|
||||||
handle(request, stream)
|
handle(request, stream)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def handle_error(ex)
|
def handle_error(ex, request = nil)
|
||||||
if ex.instance_of?(TimeoutError) && !@handshake_completed && @connection.state != :closed
|
if ex.is_a?(OperationTimeoutError) && !@handshake_completed && @connection.state != :closed
|
||||||
@connection.goaway(:settings_timeout, "closing due to settings timeout")
|
@connection.goaway(:settings_timeout, "closing due to settings timeout")
|
||||||
emit(:close_handshake)
|
emit(:close_handshake)
|
||||||
settings_ex = SettingsTimeoutError.new(ex.timeout, ex.message)
|
settings_ex = SettingsTimeoutError.new(ex.timeout, ex.message)
|
||||||
settings_ex.set_backtrace(ex.backtrace)
|
settings_ex.set_backtrace(ex.backtrace)
|
||||||
ex = settings_ex
|
ex = settings_ex
|
||||||
end
|
end
|
||||||
@streams.each_key do |request|
|
@streams.each_key do |req|
|
||||||
emit(:error, request, ex)
|
next if request && request == req
|
||||||
|
|
||||||
|
emit(:error, req, ex)
|
||||||
end
|
end
|
||||||
@pending.each do |request|
|
while (req = @pending.shift)
|
||||||
emit(:error, request, ex)
|
next if request && request == req
|
||||||
|
|
||||||
|
emit(:error, req, ex)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def ping
|
def ping
|
||||||
ping = SecureRandom.gen_random(8)
|
ping = SecureRandom.gen_random(8)
|
||||||
@connection.ping(ping)
|
@connection.ping(ping.dup)
|
||||||
ensure
|
ensure
|
||||||
@pings << ping
|
@pings << ping
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
|
def can_buffer_more_requests?
|
||||||
|
(@handshake_completed || !@wait_for_handshake) &&
|
||||||
|
@streams.size < @max_concurrent_requests &&
|
||||||
|
@streams.size < @max_requests
|
||||||
|
end
|
||||||
|
|
||||||
def send_pending
|
def send_pending
|
||||||
while (request = @pending.shift)
|
while (request = @pending.shift)
|
||||||
break unless send(request)
|
break unless send(request, true)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -171,8 +180,7 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
|
|
||||||
def init_connection
|
def init_connection
|
||||||
@connection = HTTP2Next::Client.new(@settings)
|
@connection = ::HTTP2::Client.new(@settings)
|
||||||
@connection.max_streams = @max_requests if @connection.respond_to?(:max_streams=) && @max_requests.positive?
|
|
||||||
@connection.on(:frame, &method(:on_frame))
|
@connection.on(:frame, &method(:on_frame))
|
||||||
@connection.on(:frame_sent, &method(:on_frame_sent))
|
@connection.on(:frame_sent, &method(:on_frame_sent))
|
||||||
@connection.on(:frame_received, &method(:on_frame_received))
|
@connection.on(:frame_received, &method(:on_frame_received))
|
||||||
@ -218,12 +226,12 @@ module HTTPX
|
|||||||
extra_headers = set_protocol_headers(request)
|
extra_headers = set_protocol_headers(request)
|
||||||
|
|
||||||
if request.headers.key?("host")
|
if request.headers.key?("host")
|
||||||
log { "forbidden \"host\" header found (#{request.headers["host"]}), will use it as authority..." }
|
log { "forbidden \"host\" header found (#{log_redact(request.headers["host"])}), will use it as authority..." }
|
||||||
extra_headers[":authority"] = request.headers["host"]
|
extra_headers[":authority"] = request.headers["host"]
|
||||||
end
|
end
|
||||||
|
|
||||||
log(level: 1, color: :yellow) do
|
log(level: 1, color: :yellow) do
|
||||||
request.headers.merge(extra_headers).each.map { |k, v| "#{stream.id}: -> HEADER: #{k}: #{v}" }.join("\n")
|
"\n#{request.headers.merge(extra_headers).each.map { |k, v| "#{stream.id}: -> HEADER: #{k}: #{log_redact(v)}" }.join("\n")}"
|
||||||
end
|
end
|
||||||
stream.headers(request.headers.each(extra_headers), end_stream: request.body.empty?)
|
stream.headers(request.headers.each(extra_headers), end_stream: request.body.empty?)
|
||||||
end
|
end
|
||||||
@ -235,7 +243,7 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
|
|
||||||
log(level: 1, color: :yellow) do
|
log(level: 1, color: :yellow) do
|
||||||
request.trailers.each.map { |k, v| "#{stream.id}: -> HEADER: #{k}: #{v}" }.join("\n")
|
request.trailers.each.map { |k, v| "#{stream.id}: -> HEADER: #{k}: #{log_redact(v)}" }.join("\n")
|
||||||
end
|
end
|
||||||
stream.headers(request.trailers.each, end_stream: true)
|
stream.headers(request.trailers.each, end_stream: true)
|
||||||
end
|
end
|
||||||
@ -246,13 +254,13 @@ module HTTPX
|
|||||||
chunk = @drains.delete(request) || request.drain_body
|
chunk = @drains.delete(request) || request.drain_body
|
||||||
while chunk
|
while chunk
|
||||||
next_chunk = request.drain_body
|
next_chunk = request.drain_body
|
||||||
log(level: 1, color: :green) { "#{stream.id}: -> DATA: #{chunk.bytesize} bytes..." }
|
send_chunk(request, stream, chunk, next_chunk)
|
||||||
log(level: 2, color: :green) { "#{stream.id}: -> #{chunk.inspect}" }
|
|
||||||
stream.data(chunk, end_stream: !(next_chunk || request.trailers? || request.callbacks_for?(:trailers)))
|
|
||||||
if next_chunk && (@buffer.full? || request.body.unbounded_body?)
|
if next_chunk && (@buffer.full? || request.body.unbounded_body?)
|
||||||
@drains[request] = next_chunk
|
@drains[request] = next_chunk
|
||||||
throw(:buffer_full)
|
throw(:buffer_full)
|
||||||
end
|
end
|
||||||
|
|
||||||
chunk = next_chunk
|
chunk = next_chunk
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -261,6 +269,16 @@ module HTTPX
|
|||||||
on_stream_refuse(stream, request, error)
|
on_stream_refuse(stream, request, error)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def send_chunk(request, stream, chunk, next_chunk)
|
||||||
|
log(level: 1, color: :green) { "#{stream.id}: -> DATA: #{chunk.bytesize} bytes..." }
|
||||||
|
log(level: 2, color: :green) { "#{stream.id}: -> #{log_redact(chunk.inspect)}" }
|
||||||
|
stream.data(chunk, end_stream: end_stream?(request, next_chunk))
|
||||||
|
end
|
||||||
|
|
||||||
|
def end_stream?(request, next_chunk)
|
||||||
|
!(next_chunk || request.trailers? || request.callbacks_for?(:trailers))
|
||||||
|
end
|
||||||
|
|
||||||
######
|
######
|
||||||
# HTTP/2 Callbacks
|
# HTTP/2 Callbacks
|
||||||
######
|
######
|
||||||
@ -274,7 +292,7 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
|
|
||||||
log(color: :yellow) do
|
log(color: :yellow) do
|
||||||
h.map { |k, v| "#{stream.id}: <- HEADER: #{k}: #{v}" }.join("\n")
|
h.map { |k, v| "#{stream.id}: <- HEADER: #{k}: #{log_redact(v)}" }.join("\n")
|
||||||
end
|
end
|
||||||
_, status = h.shift
|
_, status = h.shift
|
||||||
headers = request.options.headers_class.new(h)
|
headers = request.options.headers_class.new(h)
|
||||||
@ -287,14 +305,14 @@ module HTTPX
|
|||||||
|
|
||||||
def on_stream_trailers(stream, response, h)
|
def on_stream_trailers(stream, response, h)
|
||||||
log(color: :yellow) do
|
log(color: :yellow) do
|
||||||
h.map { |k, v| "#{stream.id}: <- HEADER: #{k}: #{v}" }.join("\n")
|
h.map { |k, v| "#{stream.id}: <- HEADER: #{k}: #{log_redact(v)}" }.join("\n")
|
||||||
end
|
end
|
||||||
response.merge_headers(h)
|
response.merge_headers(h)
|
||||||
end
|
end
|
||||||
|
|
||||||
def on_stream_data(stream, request, data)
|
def on_stream_data(stream, request, data)
|
||||||
log(level: 1, color: :green) { "#{stream.id}: <- DATA: #{data.bytesize} bytes..." }
|
log(level: 1, color: :green) { "#{stream.id}: <- DATA: #{data.bytesize} bytes..." }
|
||||||
log(level: 2, color: :green) { "#{stream.id}: <- #{data.inspect}" }
|
log(level: 2, color: :green) { "#{stream.id}: <- #{log_redact(data.inspect)}" }
|
||||||
request.response << data
|
request.response << data
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -311,25 +329,33 @@ module HTTPX
|
|||||||
@streams.delete(request)
|
@streams.delete(request)
|
||||||
|
|
||||||
if error
|
if error
|
||||||
|
case error
|
||||||
|
when :http_1_1_required
|
||||||
|
emit(:error, request, error)
|
||||||
|
else
|
||||||
ex = Error.new(stream.id, error)
|
ex = Error.new(stream.id, error)
|
||||||
ex.set_backtrace(caller)
|
ex.set_backtrace(caller)
|
||||||
response = ErrorResponse.new(request, ex, request.options)
|
response = ErrorResponse.new(request, ex)
|
||||||
|
request.response = response
|
||||||
emit(:response, request, response)
|
emit(:response, request, response)
|
||||||
|
end
|
||||||
else
|
else
|
||||||
response = request.response
|
response = request.response
|
||||||
if response && response.is_a?(Response) && response.status == 421
|
if response && response.is_a?(Response) && response.status == 421
|
||||||
ex = MisdirectedRequestError.new(response)
|
emit(:error, request, :http_1_1_required)
|
||||||
ex.set_backtrace(caller)
|
|
||||||
emit(:error, request, ex)
|
|
||||||
else
|
else
|
||||||
emit(:response, request, response)
|
emit(:response, request, response)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
send(@pending.shift) unless @pending.empty?
|
send(@pending.shift) unless @pending.empty?
|
||||||
|
|
||||||
return unless @streams.empty? && exhausted?
|
return unless @streams.empty? && exhausted?
|
||||||
|
|
||||||
|
if @pending.empty?
|
||||||
close
|
close
|
||||||
emit(:exhausted) unless @pending.empty?
|
else
|
||||||
|
emit(:exhausted)
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def on_frame(bytes)
|
def on_frame(bytes)
|
||||||
@ -339,14 +365,7 @@ module HTTPX
|
|||||||
def on_settings(*)
|
def on_settings(*)
|
||||||
@handshake_completed = true
|
@handshake_completed = true
|
||||||
emit(:current_timeout)
|
emit(:current_timeout)
|
||||||
|
@max_concurrent_requests = [@max_concurrent_requests, @connection.remote_settings[:settings_max_concurrent_streams]].min
|
||||||
if @max_requests.zero?
|
|
||||||
@max_requests = @connection.remote_settings[:settings_max_concurrent_streams]
|
|
||||||
|
|
||||||
@connection.max_streams = @max_requests if @connection.respond_to?(:max_streams=) && @max_requests.positive?
|
|
||||||
end
|
|
||||||
|
|
||||||
@max_concurrent_requests = [@max_concurrent_requests, @max_requests].min
|
|
||||||
send_pending
|
send_pending
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -354,7 +373,12 @@ module HTTPX
|
|||||||
is_connection_closed = @connection.state == :closed
|
is_connection_closed = @connection.state == :closed
|
||||||
if error
|
if error
|
||||||
@buffer.clear if is_connection_closed
|
@buffer.clear if is_connection_closed
|
||||||
if error == :no_error
|
case error
|
||||||
|
when :http_1_1_required
|
||||||
|
while (request = @pending.shift)
|
||||||
|
emit(:error, request, error)
|
||||||
|
end
|
||||||
|
when :no_error
|
||||||
ex = GoawayError.new
|
ex = GoawayError.new
|
||||||
@pending.unshift(*@streams.keys)
|
@pending.unshift(*@streams.keys)
|
||||||
@drains.clear
|
@drains.clear
|
||||||
@ -362,9 +386,12 @@ module HTTPX
|
|||||||
else
|
else
|
||||||
ex = Error.new(0, error)
|
ex = Error.new(0, error)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
if ex
|
||||||
ex.set_backtrace(caller)
|
ex.set_backtrace(caller)
|
||||||
handle_error(ex)
|
handle_error(ex)
|
||||||
end
|
end
|
||||||
|
end
|
||||||
return unless is_connection_closed && @streams.empty?
|
return unless is_connection_closed && @streams.empty?
|
||||||
|
|
||||||
emit(:close, is_connection_closed)
|
emit(:close, is_connection_closed)
|
||||||
@ -373,8 +400,15 @@ module HTTPX
|
|||||||
def on_frame_sent(frame)
|
def on_frame_sent(frame)
|
||||||
log(level: 2) { "#{frame[:stream]}: frame was sent!" }
|
log(level: 2) { "#{frame[:stream]}: frame was sent!" }
|
||||||
log(level: 2, color: :blue) do
|
log(level: 2, color: :blue) do
|
||||||
payload = frame
|
payload =
|
||||||
payload = payload.merge(payload: frame[:payload].bytesize) if frame[:type] == :data
|
case frame[:type]
|
||||||
|
when :data
|
||||||
|
frame.merge(payload: frame[:payload].bytesize)
|
||||||
|
when :headers, :ping
|
||||||
|
frame.merge(payload: log_redact(frame[:payload]))
|
||||||
|
else
|
||||||
|
frame
|
||||||
|
end
|
||||||
"#{frame[:stream]}: #{payload}"
|
"#{frame[:stream]}: #{payload}"
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -382,15 +416,22 @@ module HTTPX
|
|||||||
def on_frame_received(frame)
|
def on_frame_received(frame)
|
||||||
log(level: 2) { "#{frame[:stream]}: frame was received!" }
|
log(level: 2) { "#{frame[:stream]}: frame was received!" }
|
||||||
log(level: 2, color: :magenta) do
|
log(level: 2, color: :magenta) do
|
||||||
payload = frame
|
payload =
|
||||||
payload = payload.merge(payload: frame[:payload].bytesize) if frame[:type] == :data
|
case frame[:type]
|
||||||
|
when :data
|
||||||
|
frame.merge(payload: frame[:payload].bytesize)
|
||||||
|
when :headers, :ping
|
||||||
|
frame.merge(payload: log_redact(frame[:payload]))
|
||||||
|
else
|
||||||
|
frame
|
||||||
|
end
|
||||||
"#{frame[:stream]}: #{payload}"
|
"#{frame[:stream]}: #{payload}"
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def on_altsvc(origin, frame)
|
def on_altsvc(origin, frame)
|
||||||
log(level: 2) { "#{frame[:stream]}: altsvc frame was received" }
|
log(level: 2) { "#{frame[:stream]}: altsvc frame was received" }
|
||||||
log(level: 2) { "#{frame[:stream]}: #{frame.inspect}" }
|
log(level: 2) { "#{frame[:stream]}: #{log_redact(frame.inspect)}" }
|
||||||
alt_origin = URI.parse("#{frame[:proto]}://#{frame[:host]}:#{frame[:port]}")
|
alt_origin = URI.parse("#{frame[:proto]}://#{frame[:host]}:#{frame[:port]}")
|
||||||
params = { "ma" => frame[:max_age] }
|
params = { "ma" => frame[:max_age] }
|
||||||
emit(:altsvc, origin, alt_origin, origin, params)
|
emit(:altsvc, origin, alt_origin, origin, params)
|
||||||
@ -405,11 +446,9 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
|
|
||||||
def on_pong(ping)
|
def on_pong(ping)
|
||||||
if @pings.delete(ping.to_s)
|
raise PingError unless @pings.delete(ping.to_s)
|
||||||
|
|
||||||
emit(:pong)
|
emit(:pong)
|
||||||
else
|
|
||||||
close(:protocol_error, "ping payload did not match")
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
@ -51,8 +51,6 @@ module HTTPX
|
|||||||
# non-canonical domain.
|
# non-canonical domain.
|
||||||
attr_reader :domain
|
attr_reader :domain
|
||||||
|
|
||||||
DOT = "." # :nodoc:
|
|
||||||
|
|
||||||
class << self
|
class << self
|
||||||
def new(domain)
|
def new(domain)
|
||||||
return domain if domain.is_a?(self)
|
return domain if domain.is_a?(self)
|
||||||
@ -63,8 +61,12 @@ module HTTPX
|
|||||||
# Normalizes a _domain_ using the Punycode algorithm as necessary.
|
# Normalizes a _domain_ using the Punycode algorithm as necessary.
|
||||||
# The result will be a downcased, ASCII-only string.
|
# The result will be a downcased, ASCII-only string.
|
||||||
def normalize(domain)
|
def normalize(domain)
|
||||||
domain = domain.ascii_only? ? domain : domain.chomp(DOT).unicode_normalize(:nfc)
|
unless domain.ascii_only?
|
||||||
Punycode.encode_hostname(domain).downcase
|
domain = domain.chomp(".").unicode_normalize(:nfc)
|
||||||
|
domain = Punycode.encode_hostname(domain)
|
||||||
|
end
|
||||||
|
|
||||||
|
domain.downcase
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -73,7 +75,7 @@ module HTTPX
|
|||||||
def initialize(hostname)
|
def initialize(hostname)
|
||||||
hostname = String(hostname)
|
hostname = String(hostname)
|
||||||
|
|
||||||
raise ArgumentError, "domain name must not start with a dot: #{hostname}" if hostname.start_with?(DOT)
|
raise ArgumentError, "domain name must not start with a dot: #{hostname}" if hostname.start_with?(".")
|
||||||
|
|
||||||
begin
|
begin
|
||||||
@ipaddr = IPAddr.new(hostname)
|
@ipaddr = IPAddr.new(hostname)
|
||||||
@ -84,7 +86,7 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
|
|
||||||
@hostname = DomainName.normalize(hostname)
|
@hostname = DomainName.normalize(hostname)
|
||||||
tld = if (last_dot = @hostname.rindex(DOT))
|
tld = if (last_dot = @hostname.rindex("."))
|
||||||
@hostname[(last_dot + 1)..-1]
|
@hostname[(last_dot + 1)..-1]
|
||||||
else
|
else
|
||||||
@hostname
|
@hostname
|
||||||
@ -94,7 +96,7 @@ module HTTPX
|
|||||||
@domain = if last_dot
|
@domain = if last_dot
|
||||||
# fallback - accept cookies down to second level
|
# fallback - accept cookies down to second level
|
||||||
# cf. http://www.dkim-reputation.org/regdom-libs/
|
# cf. http://www.dkim-reputation.org/regdom-libs/
|
||||||
if (penultimate_dot = @hostname.rindex(DOT, last_dot - 1))
|
if (penultimate_dot = @hostname.rindex(".", last_dot - 1))
|
||||||
@hostname[(penultimate_dot + 1)..-1]
|
@hostname[(penultimate_dot + 1)..-1]
|
||||||
else
|
else
|
||||||
@hostname
|
@hostname
|
||||||
@ -126,17 +128,12 @@ module HTTPX
|
|||||||
@domain && self <= domain && domain <= @domain
|
@domain && self <= domain && domain <= @domain
|
||||||
end
|
end
|
||||||
|
|
||||||
# def ==(other)
|
|
||||||
# other = DomainName.new(other)
|
|
||||||
# other.hostname == @hostname
|
|
||||||
# end
|
|
||||||
|
|
||||||
def <=>(other)
|
def <=>(other)
|
||||||
other = DomainName.new(other)
|
other = DomainName.new(other)
|
||||||
othername = other.hostname
|
othername = other.hostname
|
||||||
if othername == @hostname
|
if othername == @hostname
|
||||||
0
|
0
|
||||||
elsif @hostname.end_with?(othername) && @hostname[-othername.size - 1, 1] == DOT
|
elsif @hostname.end_with?(othername) && @hostname[-othername.size - 1, 1] == "."
|
||||||
# The other is higher
|
# The other is higher
|
||||||
-1
|
-1
|
||||||
else
|
else
|
||||||
|
@ -1,20 +1,27 @@
|
|||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module HTTPX
|
module HTTPX
|
||||||
|
# the default exception class for exceptions raised by HTTPX.
|
||||||
class Error < StandardError; end
|
class Error < StandardError; end
|
||||||
|
|
||||||
class UnsupportedSchemeError < Error; end
|
class UnsupportedSchemeError < Error; end
|
||||||
|
|
||||||
class ConnectionError < Error; end
|
class ConnectionError < Error; end
|
||||||
|
|
||||||
|
# Error raised when there was a timeout. Its subclasses allow for finer-grained
|
||||||
|
# control of which timeout happened.
|
||||||
class TimeoutError < Error
|
class TimeoutError < Error
|
||||||
|
# The timeout value which caused this error to be raised.
|
||||||
attr_reader :timeout
|
attr_reader :timeout
|
||||||
|
|
||||||
|
# initializes the timeout exception with the +timeout+ causing the error, and the
|
||||||
|
# error +message+ for it.
|
||||||
def initialize(timeout, message)
|
def initialize(timeout, message)
|
||||||
@timeout = timeout
|
@timeout = timeout
|
||||||
super(message)
|
super(message)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# clones this error into a HTTPX::ConnectionTimeoutError.
|
||||||
def to_connection_error
|
def to_connection_error
|
||||||
ex = ConnectTimeoutError.new(@timeout, message)
|
ex = ConnectTimeoutError.new(@timeout, message)
|
||||||
ex.set_backtrace(backtrace)
|
ex.set_backtrace(backtrace)
|
||||||
@ -22,13 +29,22 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
class TotalTimeoutError < TimeoutError; end
|
# Raise when it can't acquire a connection from the pool.
|
||||||
|
class PoolTimeoutError < TimeoutError; end
|
||||||
|
|
||||||
|
# Error raised when there was a timeout establishing the connection to a server.
|
||||||
|
# This may be raised due to timeouts during TCP and TLS (when applicable) connection
|
||||||
|
# establishment.
|
||||||
class ConnectTimeoutError < TimeoutError; end
|
class ConnectTimeoutError < TimeoutError; end
|
||||||
|
|
||||||
|
# Error raised when there was a timeout while sending a request, or receiving a response
|
||||||
|
# from the server.
|
||||||
class RequestTimeoutError < TimeoutError
|
class RequestTimeoutError < TimeoutError
|
||||||
|
# The HTTPX::Request request object this exception refers to.
|
||||||
attr_reader :request
|
attr_reader :request
|
||||||
|
|
||||||
|
# initializes the exception with the +request+ and +response+ it refers to, and the
|
||||||
|
# +timeout+ causing the error, and the
|
||||||
def initialize(request, response, timeout)
|
def initialize(request, response, timeout)
|
||||||
@request = request
|
@request = request
|
||||||
@response = response
|
@response = response
|
||||||
@ -40,19 +56,31 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# Error raised when there was a timeout while receiving a response from the server.
|
||||||
class ReadTimeoutError < RequestTimeoutError; end
|
class ReadTimeoutError < RequestTimeoutError; end
|
||||||
|
|
||||||
|
# Error raised when there was a timeout while sending a request from the server.
|
||||||
class WriteTimeoutError < RequestTimeoutError; end
|
class WriteTimeoutError < RequestTimeoutError; end
|
||||||
|
|
||||||
|
# Error raised when there was a timeout while waiting for the HTTP/2 settings frame from the server.
|
||||||
class SettingsTimeoutError < TimeoutError; end
|
class SettingsTimeoutError < TimeoutError; end
|
||||||
|
|
||||||
|
# Error raised when there was a timeout while resolving a domain to an IP.
|
||||||
class ResolveTimeoutError < TimeoutError; end
|
class ResolveTimeoutError < TimeoutError; end
|
||||||
|
|
||||||
|
# Error raise when there was a timeout waiting for readiness of the socket the request is related to.
|
||||||
|
class OperationTimeoutError < TimeoutError; end
|
||||||
|
|
||||||
|
# Error raised when there was an error while resolving a domain to an IP.
|
||||||
class ResolveError < Error; end
|
class ResolveError < Error; end
|
||||||
|
|
||||||
|
# Error raised when there was an error while resolving a domain to an IP
|
||||||
|
# using a HTTPX::Resolver::Native resolver.
|
||||||
class NativeResolveError < ResolveError
|
class NativeResolveError < ResolveError
|
||||||
attr_reader :connection, :host
|
attr_reader :connection, :host
|
||||||
|
|
||||||
|
# initializes the exception with the +connection+ it refers to, the +host+ domain
|
||||||
|
# which failed to resolve, and the error +message+.
|
||||||
def initialize(connection, host, message = "Can't resolve #{host}")
|
def initialize(connection, host, message = "Can't resolve #{host}")
|
||||||
@connection = connection
|
@connection = connection
|
||||||
@host = host
|
@host = host
|
||||||
@ -60,18 +88,22 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# The exception class for HTTP responses with 4xx or 5xx status.
|
||||||
class HTTPError < Error
|
class HTTPError < Error
|
||||||
|
# The HTTPX::Response response object this exception refers to.
|
||||||
attr_reader :response
|
attr_reader :response
|
||||||
|
|
||||||
|
# Creates the instance and assigns the HTTPX::Response +response+.
|
||||||
def initialize(response)
|
def initialize(response)
|
||||||
@response = response
|
@response = response
|
||||||
super("HTTP Error: #{@response.status} #{@response.headers}\n#{@response.body}")
|
super("HTTP Error: #{@response.status} #{@response.headers}\n#{@response.body}")
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# The HTTP response status.
|
||||||
|
#
|
||||||
|
# error.status #=> 404
|
||||||
def status
|
def status
|
||||||
@response.status
|
@response.status
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
class MisdirectedRequestError < HTTPError; end
|
|
||||||
end
|
end
|
||||||
|
@ -3,96 +3,6 @@
|
|||||||
require "uri"
|
require "uri"
|
||||||
|
|
||||||
module HTTPX
|
module HTTPX
|
||||||
unless Method.method_defined?(:curry)
|
|
||||||
|
|
||||||
# Backport
|
|
||||||
#
|
|
||||||
# Ruby 2.1 and lower implement curry only for Procs.
|
|
||||||
#
|
|
||||||
# Why not using Refinements? Because they don't work for Method (tested with ruby 2.1.9).
|
|
||||||
#
|
|
||||||
module CurryMethods
|
|
||||||
# Backport for the Method#curry method, which is part of ruby core since 2.2 .
|
|
||||||
#
|
|
||||||
def curry(*args)
|
|
||||||
to_proc.curry(*args)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
Method.__send__(:include, CurryMethods)
|
|
||||||
end
|
|
||||||
|
|
||||||
unless String.method_defined?(:+@)
|
|
||||||
# Backport for +"", to initialize unfrozen strings from the string literal.
|
|
||||||
#
|
|
||||||
module LiteralStringExtensions
|
|
||||||
def +@
|
|
||||||
frozen? ? dup : self
|
|
||||||
end
|
|
||||||
end
|
|
||||||
String.__send__(:include, LiteralStringExtensions)
|
|
||||||
end
|
|
||||||
|
|
||||||
unless Numeric.method_defined?(:positive?)
|
|
||||||
# Ruby 2.3 Backport (Numeric#positive?)
|
|
||||||
#
|
|
||||||
module PosMethods
|
|
||||||
def positive?
|
|
||||||
self > 0
|
|
||||||
end
|
|
||||||
end
|
|
||||||
Numeric.__send__(:include, PosMethods)
|
|
||||||
end
|
|
||||||
|
|
||||||
unless Numeric.method_defined?(:negative?)
|
|
||||||
# Ruby 2.3 Backport (Numeric#negative?)
|
|
||||||
#
|
|
||||||
module NegMethods
|
|
||||||
def negative?
|
|
||||||
self < 0
|
|
||||||
end
|
|
||||||
end
|
|
||||||
Numeric.__send__(:include, NegMethods)
|
|
||||||
end
|
|
||||||
|
|
||||||
module NumericExtensions
|
|
||||||
# Ruby 2.4 backport
|
|
||||||
refine Numeric do
|
|
||||||
def infinite?
|
|
||||||
self == Float::INFINITY
|
|
||||||
end unless Numeric.method_defined?(:infinite?)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
module StringExtensions
|
|
||||||
refine String do
|
|
||||||
# Ruby 2.5 backport
|
|
||||||
def delete_suffix!(suffix)
|
|
||||||
suffix = Backports.coerce_to_str(suffix)
|
|
||||||
chomp! if frozen?
|
|
||||||
len = suffix.length
|
|
||||||
if len > 0 && index(suffix, -len)
|
|
||||||
self[-len..-1] = ''
|
|
||||||
self
|
|
||||||
else
|
|
||||||
nil
|
|
||||||
end
|
|
||||||
end unless String.method_defined?(:delete_suffix!)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
module HashExtensions
|
|
||||||
refine Hash do
|
|
||||||
# Ruby 2.4 backport
|
|
||||||
def compact
|
|
||||||
h = {}
|
|
||||||
each do |key, value|
|
|
||||||
h[key] = value unless value == nil
|
|
||||||
end
|
|
||||||
h
|
|
||||||
end unless Hash.method_defined?(:compact)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
module ArrayExtensions
|
module ArrayExtensions
|
||||||
module FilterMap
|
module FilterMap
|
||||||
refine Array do
|
refine Array do
|
||||||
@ -108,16 +18,6 @@ module HTTPX
|
|||||||
end unless Array.method_defined?(:filter_map)
|
end unless Array.method_defined?(:filter_map)
|
||||||
end
|
end
|
||||||
|
|
||||||
module Sum
|
|
||||||
refine Array do
|
|
||||||
# Ruby 2.6 backport
|
|
||||||
def sum(accumulator = 0, &block)
|
|
||||||
values = block_given? ? map(&block) : self
|
|
||||||
values.inject(accumulator, :+)
|
|
||||||
end
|
|
||||||
end unless Array.method_defined?(:sum)
|
|
||||||
end
|
|
||||||
|
|
||||||
module Intersect
|
module Intersect
|
||||||
refine Array do
|
refine Array do
|
||||||
# Ruby 3.1 backport
|
# Ruby 3.1 backport
|
||||||
@ -133,30 +33,6 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
module IOExtensions
|
|
||||||
refine IO do
|
|
||||||
# Ruby 2.3 backport
|
|
||||||
# provides a fallback for rubies where IO#wait isn't implemented,
|
|
||||||
# but IO#wait_readable and IO#wait_writable are.
|
|
||||||
def wait(timeout = nil, _mode = :read_write)
|
|
||||||
r, w = IO.select([self], [self], nil, timeout)
|
|
||||||
|
|
||||||
return unless r || w
|
|
||||||
|
|
||||||
self
|
|
||||||
end unless IO.method_defined?(:wait) && IO.instance_method(:wait).arity == 2
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
module RegexpExtensions
|
|
||||||
refine(Regexp) do
|
|
||||||
# Ruby 2.4 backport
|
|
||||||
def match?(*args)
|
|
||||||
!match(*args).nil?
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
module URIExtensions
|
module URIExtensions
|
||||||
# uri 0.11 backport, ships with ruby 3.1
|
# uri 0.11 backport, ships with ruby 3.1
|
||||||
refine URI::Generic do
|
refine URI::Generic do
|
||||||
@ -178,21 +54,6 @@ module HTTPX
|
|||||||
def origin
|
def origin
|
||||||
"#{scheme}://#{authority}"
|
"#{scheme}://#{authority}"
|
||||||
end unless URI::HTTP.method_defined?(:origin)
|
end unless URI::HTTP.method_defined?(:origin)
|
||||||
|
|
||||||
def altsvc_match?(uri)
|
|
||||||
uri = URI.parse(uri)
|
|
||||||
|
|
||||||
origin == uri.origin || begin
|
|
||||||
case scheme
|
|
||||||
when "h2"
|
|
||||||
(uri.scheme == "https" || uri.scheme == "h2") &&
|
|
||||||
host == uri.host &&
|
|
||||||
(port || default_port) == (uri.port || uri.default_port)
|
|
||||||
else
|
|
||||||
false
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
@ -11,20 +11,32 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
|
|
||||||
def initialize(headers = nil)
|
def initialize(headers = nil)
|
||||||
|
if headers.nil? || headers.empty?
|
||||||
|
@headers = headers.to_h
|
||||||
|
return
|
||||||
|
end
|
||||||
|
|
||||||
@headers = {}
|
@headers = {}
|
||||||
return unless headers
|
|
||||||
|
|
||||||
headers.each do |field, value|
|
headers.each do |field, value|
|
||||||
array_value(value).each do |v|
|
field = downcased(field)
|
||||||
add(downcased(field), v)
|
|
||||||
|
value = array_value(value)
|
||||||
|
|
||||||
|
current = @headers[field]
|
||||||
|
|
||||||
|
if current.nil?
|
||||||
|
@headers[field] = value
|
||||||
|
else
|
||||||
|
current.concat(value)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# cloned initialization
|
# cloned initialization
|
||||||
def initialize_clone(orig)
|
def initialize_clone(orig, **kwargs)
|
||||||
super
|
super
|
||||||
@headers = orig.instance_variable_get(:@headers).clone
|
@headers = orig.instance_variable_get(:@headers).clone(**kwargs)
|
||||||
end
|
end
|
||||||
|
|
||||||
# dupped initialization
|
# dupped initialization
|
||||||
@ -39,17 +51,6 @@ module HTTPX
|
|||||||
super
|
super
|
||||||
end
|
end
|
||||||
|
|
||||||
def same_headers?(headers)
|
|
||||||
@headers.empty? || begin
|
|
||||||
headers.each do |k, v|
|
|
||||||
next unless key?(k)
|
|
||||||
|
|
||||||
return false unless v == self[k]
|
|
||||||
end
|
|
||||||
true
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# merges headers with another header-quack.
|
# merges headers with another header-quack.
|
||||||
# the merge rule is, if the header already exists,
|
# the merge rule is, if the header already exists,
|
||||||
# ignore what the +other+ headers has. Otherwise, set
|
# ignore what the +other+ headers has. Otherwise, set
|
||||||
@ -119,6 +120,10 @@ module HTTPX
|
|||||||
other == to_hash
|
other == to_hash
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def empty?
|
||||||
|
@headers.empty?
|
||||||
|
end
|
||||||
|
|
||||||
# the headers store in Hash format
|
# the headers store in Hash format
|
||||||
def to_hash
|
def to_hash
|
||||||
Hash[to_a]
|
Hash[to_a]
|
||||||
@ -137,7 +142,8 @@ module HTTPX
|
|||||||
|
|
||||||
# :nocov:
|
# :nocov:
|
||||||
def inspect
|
def inspect
|
||||||
to_hash.inspect
|
"#<#{self.class}:#{object_id} " \
|
||||||
|
"#{to_hash.inspect}>"
|
||||||
end
|
end
|
||||||
# :nocov:
|
# :nocov:
|
||||||
|
|
||||||
@ -160,12 +166,7 @@ module HTTPX
|
|||||||
private
|
private
|
||||||
|
|
||||||
def array_value(value)
|
def array_value(value)
|
||||||
case value
|
Array(value)
|
||||||
when Array
|
|
||||||
value.map { |val| String(val).strip }
|
|
||||||
else
|
|
||||||
[String(value).strip]
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def downcased(field)
|
def downcased(field)
|
||||||
|
@ -4,4 +4,8 @@ require "socket"
|
|||||||
require "httpx/io/udp"
|
require "httpx/io/udp"
|
||||||
require "httpx/io/tcp"
|
require "httpx/io/tcp"
|
||||||
require "httpx/io/unix"
|
require "httpx/io/unix"
|
||||||
|
|
||||||
|
begin
|
||||||
require "httpx/io/ssl"
|
require "httpx/io/ssl"
|
||||||
|
rescue LoadError
|
||||||
|
end
|
||||||
|
@ -4,26 +4,49 @@ require "openssl"
|
|||||||
|
|
||||||
module HTTPX
|
module HTTPX
|
||||||
TLSError = OpenSSL::SSL::SSLError
|
TLSError = OpenSSL::SSL::SSLError
|
||||||
IPRegex = Regexp.union(Resolv::IPv4::Regex, Resolv::IPv6::Regex)
|
|
||||||
|
|
||||||
class SSL < TCP
|
class SSL < TCP
|
||||||
using RegexpExtensions unless Regexp.method_defined?(:match?)
|
# rubocop:disable Style/MutableConstant
|
||||||
|
TLS_OPTIONS = { alpn_protocols: %w[h2 http/1.1].freeze }
|
||||||
|
# https://github.com/jruby/jruby-openssl/issues/284
|
||||||
|
# TODO: remove when dropping support for jruby-openssl < 0.15.4
|
||||||
|
TLS_OPTIONS[:verify_hostname] = true if RUBY_ENGINE == "jruby" && JOpenSSL::VERSION < "0.15.4"
|
||||||
|
# rubocop:enable Style/MutableConstant
|
||||||
|
TLS_OPTIONS.freeze
|
||||||
|
|
||||||
TLS_OPTIONS = if OpenSSL::SSL::SSLContext.instance_methods.include?(:alpn_protocols)
|
attr_writer :ssl_session
|
||||||
{ alpn_protocols: %w[h2 http/1.1].freeze }.freeze
|
|
||||||
else
|
|
||||||
{}.freeze
|
|
||||||
end
|
|
||||||
|
|
||||||
def initialize(_, _, options)
|
def initialize(_, _, options)
|
||||||
super
|
super
|
||||||
@ctx = OpenSSL::SSL::SSLContext.new
|
|
||||||
ctx_options = TLS_OPTIONS.merge(options.ssl)
|
ctx_options = TLS_OPTIONS.merge(options.ssl)
|
||||||
@sni_hostname = ctx_options.delete(:hostname) || @hostname
|
@sni_hostname = ctx_options.delete(:hostname) || @hostname
|
||||||
@ctx.set_params(ctx_options) unless ctx_options.empty?
|
|
||||||
@state = :negotiated if @keep_open
|
|
||||||
|
|
||||||
@hostname_is_ip = IPRegex.match?(@sni_hostname)
|
if @keep_open && @io.is_a?(OpenSSL::SSL::SSLSocket)
|
||||||
|
# externally initiated ssl socket
|
||||||
|
@ctx = @io.context
|
||||||
|
@state = :negotiated
|
||||||
|
else
|
||||||
|
@ctx = OpenSSL::SSL::SSLContext.new
|
||||||
|
@ctx.set_params(ctx_options) unless ctx_options.empty?
|
||||||
|
unless @ctx.session_cache_mode.nil? # a dummy method on JRuby
|
||||||
|
@ctx.session_cache_mode =
|
||||||
|
OpenSSL::SSL::SSLContext::SESSION_CACHE_CLIENT | OpenSSL::SSL::SSLContext::SESSION_CACHE_NO_INTERNAL_STORE
|
||||||
|
end
|
||||||
|
|
||||||
|
yield(self) if block_given?
|
||||||
|
end
|
||||||
|
|
||||||
|
@verify_hostname = @ctx.verify_hostname
|
||||||
|
end
|
||||||
|
|
||||||
|
if OpenSSL::SSL::SSLContext.method_defined?(:session_new_cb=)
|
||||||
|
def session_new_cb(&pr)
|
||||||
|
@ctx.session_new_cb = proc { |_, sess| pr.call(sess) }
|
||||||
|
end
|
||||||
|
else
|
||||||
|
# session_new_cb not implemented under JRuby
|
||||||
|
def session_new_cb; end
|
||||||
end
|
end
|
||||||
|
|
||||||
def protocol
|
def protocol
|
||||||
@ -32,6 +55,20 @@ module HTTPX
|
|||||||
super
|
super
|
||||||
end
|
end
|
||||||
|
|
||||||
|
if RUBY_ENGINE == "jruby"
|
||||||
|
# in jruby, alpn_protocol may return ""
|
||||||
|
# https://github.com/jruby/jruby-openssl/issues/287
|
||||||
|
def protocol
|
||||||
|
proto = @io.alpn_protocol
|
||||||
|
|
||||||
|
return super if proto.nil? || proto.empty?
|
||||||
|
|
||||||
|
proto
|
||||||
|
rescue StandardError
|
||||||
|
super
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
def can_verify_peer?
|
def can_verify_peer?
|
||||||
@ctx.verify_mode == OpenSSL::SSL::VERIFY_PEER
|
@ctx.verify_mode == OpenSSL::SSL::VERIFY_PEER
|
||||||
end
|
end
|
||||||
@ -43,59 +80,47 @@ module HTTPX
|
|||||||
OpenSSL::SSL.verify_certificate_identity(@io.peer_cert, host)
|
OpenSSL::SSL.verify_certificate_identity(@io.peer_cert, host)
|
||||||
end
|
end
|
||||||
|
|
||||||
def close
|
|
||||||
super
|
|
||||||
# allow reconnections
|
|
||||||
# connect only works if initial @io is a socket
|
|
||||||
@io = @io.io if @io.respond_to?(:io)
|
|
||||||
end
|
|
||||||
|
|
||||||
def connected?
|
def connected?
|
||||||
@state == :negotiated
|
@state == :negotiated
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def expired?
|
||||||
|
super || ssl_session_expired?
|
||||||
|
end
|
||||||
|
|
||||||
|
def ssl_session_expired?
|
||||||
|
@ssl_session.nil? || Process.clock_gettime(Process::CLOCK_REALTIME) >= (@ssl_session.time.to_f + @ssl_session.timeout)
|
||||||
|
end
|
||||||
|
|
||||||
def connect
|
def connect
|
||||||
|
return if @state == :negotiated
|
||||||
|
|
||||||
|
unless @state == :connected
|
||||||
super
|
super
|
||||||
return if @state == :negotiated ||
|
return unless @state == :connected
|
||||||
@state != :connected
|
end
|
||||||
|
|
||||||
unless @io.is_a?(OpenSSL::SSL::SSLSocket)
|
unless @io.is_a?(OpenSSL::SSL::SSLSocket)
|
||||||
|
if (hostname_is_ip = (@ip == @sni_hostname))
|
||||||
|
# IPv6 address would be "[::1]", must turn to "0000:0000:0000:0000:0000:0000:0000:0001" for cert SAN check
|
||||||
|
@sni_hostname = @ip.to_string
|
||||||
|
# IP addresses in SNI is not valid per RFC 6066, section 3.
|
||||||
|
@ctx.verify_hostname = false
|
||||||
|
end
|
||||||
|
|
||||||
@io = OpenSSL::SSL::SSLSocket.new(@io, @ctx)
|
@io = OpenSSL::SSL::SSLSocket.new(@io, @ctx)
|
||||||
@io.hostname = @sni_hostname unless @hostname_is_ip
|
|
||||||
|
@io.hostname = @sni_hostname unless hostname_is_ip
|
||||||
|
@io.session = @ssl_session unless ssl_session_expired?
|
||||||
@io.sync_close = true
|
@io.sync_close = true
|
||||||
end
|
end
|
||||||
try_ssl_connect
|
try_ssl_connect
|
||||||
end
|
end
|
||||||
|
|
||||||
if RUBY_VERSION < "2.3"
|
|
||||||
# :nocov:
|
|
||||||
def try_ssl_connect
|
def try_ssl_connect
|
||||||
@io.connect_nonblock
|
ret = @io.connect_nonblock(exception: false)
|
||||||
@io.post_connection_check(@sni_hostname) if @ctx.verify_mode != OpenSSL::SSL::VERIFY_NONE && !@hostname_is_ip
|
log(level: 3, color: :cyan) { "TLS CONNECT: #{ret}..." }
|
||||||
transition(:negotiated)
|
case ret
|
||||||
@interests = :w
|
|
||||||
rescue ::IO::WaitReadable
|
|
||||||
@interests = :r
|
|
||||||
rescue ::IO::WaitWritable
|
|
||||||
@interests = :w
|
|
||||||
end
|
|
||||||
|
|
||||||
def read(_, buffer)
|
|
||||||
super
|
|
||||||
rescue ::IO::WaitWritable
|
|
||||||
buffer.clear
|
|
||||||
0
|
|
||||||
end
|
|
||||||
|
|
||||||
def write(*)
|
|
||||||
super
|
|
||||||
rescue ::IO::WaitReadable
|
|
||||||
0
|
|
||||||
end
|
|
||||||
# :nocov:
|
|
||||||
else
|
|
||||||
def try_ssl_connect
|
|
||||||
case @io.connect_nonblock(exception: false)
|
|
||||||
when :wait_readable
|
when :wait_readable
|
||||||
@interests = :r
|
@interests = :r
|
||||||
return
|
return
|
||||||
@ -103,33 +128,18 @@ module HTTPX
|
|||||||
@interests = :w
|
@interests = :w
|
||||||
return
|
return
|
||||||
end
|
end
|
||||||
@io.post_connection_check(@sni_hostname) if @ctx.verify_mode != OpenSSL::SSL::VERIFY_NONE && !@hostname_is_ip
|
@io.post_connection_check(@sni_hostname) if @ctx.verify_mode != OpenSSL::SSL::VERIFY_NONE && @verify_hostname
|
||||||
transition(:negotiated)
|
transition(:negotiated)
|
||||||
@interests = :w
|
@interests = :w
|
||||||
end
|
end
|
||||||
|
|
||||||
# :nocov:
|
|
||||||
if OpenSSL::VERSION < "2.0.6"
|
|
||||||
def read(size, buffer)
|
|
||||||
@io.read_nonblock(size, buffer)
|
|
||||||
buffer.bytesize
|
|
||||||
rescue ::IO::WaitReadable,
|
|
||||||
::IO::WaitWritable
|
|
||||||
buffer.clear
|
|
||||||
0
|
|
||||||
rescue EOFError
|
|
||||||
nil
|
|
||||||
end
|
|
||||||
end
|
|
||||||
# :nocov:
|
|
||||||
end
|
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
def transition(nextstate)
|
def transition(nextstate)
|
||||||
case nextstate
|
case nextstate
|
||||||
when :negotiated
|
when :negotiated
|
||||||
return unless @state == :connected
|
return unless @state == :connected
|
||||||
|
|
||||||
when :closed
|
when :closed
|
||||||
return unless @state == :negotiated ||
|
return unless @state == :negotiated ||
|
||||||
@state == :connected
|
@state == :connected
|
||||||
|
@ -17,7 +17,7 @@ module HTTPX
|
|||||||
@state = :idle
|
@state = :idle
|
||||||
@addresses = []
|
@addresses = []
|
||||||
@hostname = origin.host
|
@hostname = origin.host
|
||||||
@options = Options.new(options)
|
@options = options
|
||||||
@fallback_protocol = @options.fallback_protocol
|
@fallback_protocol = @options.fallback_protocol
|
||||||
@port = origin.port
|
@port = origin.port
|
||||||
@interests = :w
|
@interests = :w
|
||||||
@ -38,7 +38,10 @@ module HTTPX
|
|||||||
add_addresses(addresses)
|
add_addresses(addresses)
|
||||||
end
|
end
|
||||||
@ip_index = @addresses.size - 1
|
@ip_index = @addresses.size - 1
|
||||||
# @io ||= build_socket
|
end
|
||||||
|
|
||||||
|
def socket
|
||||||
|
@io
|
||||||
end
|
end
|
||||||
|
|
||||||
def add_addresses(addrs)
|
def add_addresses(addrs)
|
||||||
@ -72,10 +75,20 @@ module HTTPX
|
|||||||
@io = build_socket
|
@io = build_socket
|
||||||
end
|
end
|
||||||
try_connect
|
try_connect
|
||||||
|
rescue Errno::EHOSTUNREACH,
|
||||||
|
Errno::ENETUNREACH => e
|
||||||
|
raise e if @ip_index <= 0
|
||||||
|
|
||||||
|
log { "failed connecting to #{@ip} (#{e.message}), evict from cache and trying next..." }
|
||||||
|
Resolver.cached_lookup_evict(@hostname, @ip)
|
||||||
|
|
||||||
|
@ip_index -= 1
|
||||||
|
@io = build_socket
|
||||||
|
retry
|
||||||
rescue Errno::ECONNREFUSED,
|
rescue Errno::ECONNREFUSED,
|
||||||
Errno::EADDRNOTAVAIL,
|
Errno::EADDRNOTAVAIL,
|
||||||
Errno::EHOSTUNREACH,
|
SocketError,
|
||||||
SocketError => e
|
IOError => e
|
||||||
raise e if @ip_index <= 0
|
raise e if @ip_index <= 0
|
||||||
|
|
||||||
log { "failed connecting to #{@ip} (#{e.message}), trying next..." }
|
log { "failed connecting to #{@ip} (#{e.message}), trying next..." }
|
||||||
@ -91,48 +104,10 @@ module HTTPX
|
|||||||
retry
|
retry
|
||||||
end
|
end
|
||||||
|
|
||||||
if RUBY_VERSION < "2.3"
|
|
||||||
# :nocov:
|
|
||||||
def try_connect
|
def try_connect
|
||||||
@io.connect_nonblock(Socket.sockaddr_in(@port, @ip.to_s))
|
ret = @io.connect_nonblock(Socket.sockaddr_in(@port, @ip.to_s), exception: false)
|
||||||
rescue ::IO::WaitWritable, Errno::EALREADY
|
log(level: 3, color: :cyan) { "TCP CONNECT: #{ret}..." }
|
||||||
@interests = :w
|
case ret
|
||||||
rescue ::IO::WaitReadable
|
|
||||||
@interests = :r
|
|
||||||
rescue Errno::EISCONN
|
|
||||||
transition(:connected)
|
|
||||||
@interests = :w
|
|
||||||
else
|
|
||||||
transition(:connected)
|
|
||||||
@interests = :w
|
|
||||||
end
|
|
||||||
private :try_connect
|
|
||||||
|
|
||||||
def read(size, buffer)
|
|
||||||
@io.read_nonblock(size, buffer)
|
|
||||||
log { "READ: #{buffer.bytesize} bytes..." }
|
|
||||||
buffer.bytesize
|
|
||||||
rescue ::IO::WaitReadable
|
|
||||||
buffer.clear
|
|
||||||
0
|
|
||||||
rescue EOFError
|
|
||||||
nil
|
|
||||||
end
|
|
||||||
|
|
||||||
def write(buffer)
|
|
||||||
siz = @io.write_nonblock(buffer)
|
|
||||||
log { "WRITE: #{siz} bytes..." }
|
|
||||||
buffer.shift!(siz)
|
|
||||||
siz
|
|
||||||
rescue ::IO::WaitWritable
|
|
||||||
0
|
|
||||||
rescue EOFError
|
|
||||||
nil
|
|
||||||
end
|
|
||||||
# :nocov:
|
|
||||||
else
|
|
||||||
def try_connect
|
|
||||||
case @io.connect_nonblock(Socket.sockaddr_in(@port, @ip.to_s), exception: false)
|
|
||||||
when :wait_readable
|
when :wait_readable
|
||||||
@interests = :r
|
@interests = :r
|
||||||
return
|
return
|
||||||
@ -169,7 +144,6 @@ module HTTPX
|
|||||||
buffer.shift!(siz)
|
buffer.shift!(siz)
|
||||||
siz
|
siz
|
||||||
end
|
end
|
||||||
end
|
|
||||||
|
|
||||||
def close
|
def close
|
||||||
return if @keep_open || closed?
|
return if @keep_open || closed?
|
||||||
@ -189,9 +163,25 @@ module HTTPX
|
|||||||
@state == :idle || @state == :closed
|
@state == :idle || @state == :closed
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def expired?
|
||||||
|
# do not mess with external sockets
|
||||||
|
return false if @options.io
|
||||||
|
|
||||||
|
return true unless @addresses
|
||||||
|
|
||||||
|
resolver_addresses = Resolver.nolookup_resolve(@hostname)
|
||||||
|
|
||||||
|
(Array(resolver_addresses) & @addresses).empty?
|
||||||
|
end
|
||||||
|
|
||||||
# :nocov:
|
# :nocov:
|
||||||
def inspect
|
def inspect
|
||||||
"#<#{self.class}: #{@ip}:#{@port} (state: #{@state})>"
|
"#<#{self.class}:#{object_id} " \
|
||||||
|
"#{@ip}:#{@port} " \
|
||||||
|
"@state=#{@state} " \
|
||||||
|
"@hostname=#{@hostname} " \
|
||||||
|
"@addresses=#{@addresses} " \
|
||||||
|
"@state=#{@state}>"
|
||||||
end
|
end
|
||||||
# :nocov:
|
# :nocov:
|
||||||
|
|
||||||
@ -219,12 +209,9 @@ module HTTPX
|
|||||||
end
|
end
|
||||||
|
|
||||||
def log_transition_state(nextstate)
|
def log_transition_state(nextstate)
|
||||||
case nextstate
|
label = host
|
||||||
when :connected
|
label = "#{label}(##{@io.fileno})" if nextstate == :connected
|
||||||
"Connected to #{host} (##{@io.fileno})"
|
"#{label} #{@state} -> #{nextstate}"
|
||||||
else
|
|
||||||
"#{host} #{@state} -> #{nextstate}"
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
@ -23,45 +23,19 @@ module HTTPX
|
|||||||
true
|
true
|
||||||
end
|
end
|
||||||
|
|
||||||
if RUBY_VERSION < "2.3"
|
|
||||||
# :nocov:
|
|
||||||
def close
|
def close
|
||||||
@io.close
|
@io.close
|
||||||
rescue StandardError
|
|
||||||
nil
|
|
||||||
end
|
|
||||||
# :nocov:
|
|
||||||
else
|
|
||||||
def close
|
|
||||||
@io.close
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
||||||
# :nocov:
|
if RUBY_ENGINE == "jruby"
|
||||||
if (RUBY_ENGINE == "truffleruby" && RUBY_ENGINE_VERSION < "21.1.0") ||
|
# In JRuby, sendmsg_nonblock is not implemented
|
||||||
RUBY_VERSION < "2.3"
|
|
||||||
def write(buffer)
|
def write(buffer)
|
||||||
siz = @io.sendmsg_nonblock(buffer.to_s, 0, Socket.sockaddr_in(@port, @host.to_s))
|
siz = @io.send(buffer.to_s, 0, @host, @port)
|
||||||
log { "WRITE: #{siz} bytes..." }
|
log { "WRITE: #{siz} bytes..." }
|
||||||
buffer.shift!(siz)
|
buffer.shift!(siz)
|
||||||
siz
|
siz
|
||||||
rescue ::IO::WaitWritable
|
|
||||||
0
|
|
||||||
rescue EOFError
|
|
||||||
nil
|
|
||||||
end
|
|
||||||
|
|
||||||
def read(size, buffer)
|
|
||||||
data, _ = @io.recvfrom_nonblock(size)
|
|
||||||
buffer.replace(data)
|
|
||||||
log { "READ: #{buffer.bytesize} bytes..." }
|
|
||||||
buffer.bytesize
|
|
||||||
rescue ::IO::WaitReadable
|
|
||||||
0
|
|
||||||
rescue IOError
|
|
||||||
end
|
end
|
||||||
else
|
else
|
||||||
|
|
||||||
def write(buffer)
|
def write(buffer)
|
||||||
siz = @io.sendmsg_nonblock(buffer.to_s, 0, Socket.sockaddr_in(@port, @host.to_s), exception: false)
|
siz = @io.sendmsg_nonblock(buffer.to_s, 0, Socket.sockaddr_in(@port, @host.to_s), exception: false)
|
||||||
return 0 if siz == :wait_writable
|
return 0 if siz == :wait_writable
|
||||||
@ -72,6 +46,7 @@ module HTTPX
|
|||||||
buffer.shift!(siz)
|
buffer.shift!(siz)
|
||||||
siz
|
siz
|
||||||
end
|
end
|
||||||
|
end
|
||||||
|
|
||||||
def read(size, buffer)
|
def read(size, buffer)
|
||||||
ret = @io.recvfrom_nonblock(size, 0, buffer, exception: false)
|
ret = @io.recvfrom_nonblock(size, 0, buffer, exception: false)
|
||||||
@ -84,14 +59,4 @@ module HTTPX
|
|||||||
rescue IOError
|
rescue IOError
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# In JRuby, sendmsg_nonblock is not implemented
|
|
||||||
def write(buffer)
|
|
||||||
siz = @io.send(buffer.to_s, 0, @host, @port)
|
|
||||||
log { "WRITE: #{siz} bytes..." }
|
|
||||||
buffer.shift!(siz)
|
|
||||||
siz
|
|
||||||
end if RUBY_ENGINE == "jruby"
|
|
||||||
# :nocov:
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
@ -8,11 +8,11 @@ module HTTPX
|
|||||||
|
|
||||||
alias_method :host, :path
|
alias_method :host, :path
|
||||||
|
|
||||||
def initialize(origin, addresses, options)
|
def initialize(origin, path, options)
|
||||||
@addresses = []
|
@addresses = []
|
||||||
@hostname = origin.host
|
@hostname = origin.host
|
||||||
@state = :idle
|
@state = :idle
|
||||||
@options = Options.new(options)
|
@options = options
|
||||||
@fallback_protocol = @options.fallback_protocol
|
@fallback_protocol = @options.fallback_protocol
|
||||||
if @options.io
|
if @options.io
|
||||||
@io = case @options.io
|
@io = case @options.io
|
||||||
@ -26,15 +26,10 @@ module HTTPX
|
|||||||
@path = @io.path
|
@path = @io.path
|
||||||
@keep_open = true
|
@keep_open = true
|
||||||
@state = :connected
|
@state = :connected
|
||||||
|
elsif path
|
||||||
|
@path = path
|
||||||
else
|
else
|
||||||
if @options.transport_options
|
raise Error, "No path given where to store the socket"
|
||||||
# :nocov:
|
|
||||||
warn ":transport_options is deprecated, use :addresses instead"
|
|
||||||
@path = @options.transport_options[:path]
|
|
||||||
# :nocov:
|
|
||||||
else
|
|
||||||
@path = addresses.first
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
@io ||= build_socket
|
@io ||= build_socket
|
||||||
end
|
end
|
||||||
@ -53,12 +48,16 @@ module HTTPX
|
|||||||
transition(:connected)
|
transition(:connected)
|
||||||
rescue Errno::EINPROGRESS,
|
rescue Errno::EINPROGRESS,
|
||||||
Errno::EALREADY,
|
Errno::EALREADY,
|
||||||
::IO::WaitReadable
|
IO::WaitReadable
|
||||||
|
end
|
||||||
|
|
||||||
|
def expired?
|
||||||
|
false
|
||||||
end
|
end
|
||||||
|
|
||||||
# :nocov:
|
# :nocov:
|
||||||
def inspect
|
def inspect
|
||||||
"#<#{self.class}(path: #{@path}): (state: #{@state})>"
|
"#<#{self.class}:#{object_id} @path=#{@path}) @state=#{@state})>"
|
||||||
end
|
end
|
||||||
# :nocov:
|
# :nocov:
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user