1#!/usr/bin/env python3 2# -*- coding: utf-8 -*- 3#*************************************************************************** 4# _ _ ____ _ 5# Project ___| | | | _ \| | 6# / __| | | | |_) | | 7# | (__| |_| | _ <| |___ 8# \___|\___/|_| \_\_____| 9# 10# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al. 11# 12# This software is licensed as described in the file COPYING, which 13# you should have received as part of this distribution. The terms 14# are also available at https://curl.se/docs/copyright.html. 15# 16# You may opt to use, copy, modify, merge, publish, distribute and/or sell 17# copies of the Software, and permit persons to whom the Software is 18# furnished to do so, under the terms of the COPYING file. 19# 20# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 21# KIND, either express or implied. 22# 23# SPDX-License-Identifier: curl 24# 25########################################################################### 26# 27import logging 28import os 29import pytest 30 31from testenv import Env, CurlClient, Caddy 32 33 34log = logging.getLogger(__name__) 35 36 37@pytest.mark.skipif(condition=not Env.has_caddy(), reason=f"missing caddy") 38@pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL") 39class TestCaddy: 40 41 @pytest.fixture(autouse=True, scope='class') 42 def caddy(self, env): 43 caddy = Caddy(env=env) 44 assert caddy.start() 45 yield caddy 46 caddy.stop() 47 48 def _make_docs_file(self, docs_dir: str, fname: str, fsize: int): 49 fpath = os.path.join(docs_dir, fname) 50 data1k = 1024*'x' 51 flen = 0 52 with open(fpath, 'w') as fd: 53 while flen < fsize: 54 fd.write(data1k) 55 flen += len(data1k) 56 return flen 57 58 @pytest.fixture(autouse=True, scope='class') 59 def _class_scope(self, env, caddy): 60 self._make_docs_file(docs_dir=caddy.docs_dir, fname='data1.data', fsize=1024*1024) 61 self._make_docs_file(docs_dir=caddy.docs_dir, fname='data5.data', fsize=5*1024*1024) 62 self._make_docs_file(docs_dir=caddy.docs_dir, fname='data10.data', fsize=10*1024*1024) 63 self._make_docs_file(docs_dir=caddy.docs_dir, fname='data100.data', fsize=100*1024*1024) 64 65 # download 1 file 66 @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3']) 67 def test_08_01_download_1(self, env: Env, caddy: Caddy, repeat, proto): 68 if proto == 'h3' and not env.have_h3_curl(): 69 pytest.skip("h3 not supported in curl") 70 if proto == 'h3' and env.curl_uses_lib('msh3'): 71 pytest.skip("msh3 itself crashes") 72 curl = CurlClient(env=env) 73 url = f'https://{env.domain1}:{caddy.port}/data.json' 74 r = curl.http_download(urls=[url], alpn_proto=proto) 75 r.check_response(count=1, http_status=200) 76 77 # download 1MB files sequentially 78 @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3']) 79 def test_08_02_download_1mb_sequential(self, env: Env, caddy: Caddy, 80 repeat, proto): 81 if proto == 'h3' and not env.have_h3_curl(): 82 pytest.skip("h3 not supported in curl") 83 if proto == 'h3' and env.curl_uses_lib('msh3'): 84 pytest.skip("msh3 itself crashes") 85 count = 50 86 curl = CurlClient(env=env) 87 urln = f'https://{env.domain1}:{caddy.port}/data1.data?[0-{count-1}]' 88 r = curl.http_download(urls=[urln], alpn_proto=proto) 89 r.check_response(count=count, http_status=200, connect_count=1) 90 91 # download 1MB files parallel 92 @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3']) 93 def test_08_03_download_1mb_parallel(self, env: Env, caddy: Caddy, 94 repeat, proto): 95 if proto == 'h3' and not env.have_h3_curl(): 96 pytest.skip("h3 not supported in curl") 97 if proto == 'h3' and env.curl_uses_lib('msh3'): 98 pytest.skip("msh3 itself crashes") 99 count = 20 100 curl = CurlClient(env=env) 101 urln = f'https://{env.domain1}:{caddy.port}/data1.data?[0-{count-1}]' 102 r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[ 103 '--parallel' 104 ]) 105 r.check_response(count=count, http_status=200) 106 if proto == 'http/1.1': 107 # http/1.1 parallel transfers will open multiple connections 108 assert r.total_connects > 1, r.dump_logs() 109 else: 110 assert r.total_connects == 1, r.dump_logs() 111 112 # download 5MB files sequentially 113 @pytest.mark.skipif(condition=Env().slow_network, reason="not suitable for slow network tests") 114 @pytest.mark.skipif(condition=Env().ci_run, reason="not suitable for CI runs") 115 @pytest.mark.parametrize("proto", ['h2', 'h3']) 116 def test_08_04a_download_10mb_sequential(self, env: Env, caddy: Caddy, 117 repeat, proto): 118 if proto == 'h3' and not env.have_h3_curl(): 119 pytest.skip("h3 not supported in curl") 120 if proto == 'h3' and env.curl_uses_lib('msh3'): 121 pytest.skip("msh3 itself crashes") 122 count = 40 123 curl = CurlClient(env=env) 124 urln = f'https://{env.domain1}:{caddy.port}/data5.data?[0-{count-1}]' 125 r = curl.http_download(urls=[urln], alpn_proto=proto) 126 r.check_response(count=count, http_status=200, connect_count=1) 127 128 # download 10MB files sequentially 129 @pytest.mark.skipif(condition=Env().slow_network, reason="not suitable for slow network tests") 130 @pytest.mark.skipif(condition=Env().ci_run, reason="not suitable for CI runs") 131 @pytest.mark.parametrize("proto", ['h2', 'h3']) 132 def test_08_04b_download_10mb_sequential(self, env: Env, caddy: Caddy, 133 repeat, proto): 134 if proto == 'h3' and not env.have_h3_curl(): 135 pytest.skip("h3 not supported in curl") 136 if proto == 'h3' and env.curl_uses_lib('msh3'): 137 pytest.skip("msh3 itself crashes") 138 count = 20 139 curl = CurlClient(env=env) 140 urln = f'https://{env.domain1}:{caddy.port}/data10.data?[0-{count-1}]' 141 r = curl.http_download(urls=[urln], alpn_proto=proto) 142 r.check_response(count=count, http_status=200, connect_count=1) 143 144 # download 10MB files parallel 145 @pytest.mark.skipif(condition=Env().slow_network, reason="not suitable for slow network tests") 146 @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3']) 147 @pytest.mark.skipif(condition=Env().ci_run, reason="not suitable for CI runs") 148 def test_08_05_download_1mb_parallel(self, env: Env, caddy: Caddy, 149 repeat, proto): 150 if proto == 'h3' and not env.have_h3_curl(): 151 pytest.skip("h3 not supported in curl") 152 if proto == 'h3' and env.curl_uses_lib('msh3'): 153 pytest.skip("msh3 itself crashes") 154 count = 50 155 curl = CurlClient(env=env) 156 urln = f'https://{env.domain1}:{caddy.port}/data10.data?[0-{count-1}]' 157 r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[ 158 '--parallel' 159 ]) 160 r.check_response(count=count, http_status=200) 161 if proto == 'http/1.1': 162 # http/1.1 parallel transfers will open multiple connections 163 assert r.total_connects > 1, r.dump_logs() 164 else: 165 assert r.total_connects == 1, r.dump_logs() 166 167 # upload data parallel, check that they were echoed 168 @pytest.mark.skipif(condition=Env().ci_run, reason="not suitable for CI runs") 169 @pytest.mark.parametrize("proto", ['h2', 'h3']) 170 def test_08_06_upload_parallel(self, env: Env, caddy, repeat, proto): 171 if proto == 'h3' and not env.have_h3(): 172 pytest.skip("h3 not supported") 173 if proto == 'h3' and env.curl_uses_lib('msh3'): 174 pytest.skip("msh3 stalls here") 175 # limit since we use a separate connection in h1 176 count = 20 177 data = '0123456789' 178 curl = CurlClient(env=env) 179 url = f'https://{env.domain1}:{caddy.port}/data10.data?[0-{count-1}]' 180 r = curl.http_upload(urls=[url], data=data, alpn_proto=proto, 181 extra_args=['--parallel']) 182 exp_status = 405 if env.caddy_is_at_least('2.7.0') else 200 183 r.check_stats(count=count, http_status=exp_status, exitcode=0) 184