Update networking layer w/ CURL and emscripten impl

This commit is contained in:
2025-11-08 01:50:36 +11:00
parent a17925904d
commit f6874dc55a
4105 changed files with 694617 additions and 179 deletions
+39
View File
@@ -0,0 +1,39 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# SPDX-License-Identifier: curl
#
###########################################################################
# ruff: noqa: F401, E402
import pytest
pytest.register_assert_rewrite("testenv.env", "testenv.curl", "testenv.caddy",
"testenv.httpd", "testenv.nghttpx")
from .env import Env
from .certs import TestCA, Credentials
from .caddy import Caddy
from .httpd import Httpd
from .curl import CurlClient, ExecResult, RunProfile
from .client import LocalClient
from .nghttpx import Nghttpx, NghttpxQuic, NghttpxFwd
from .vsftpd import VsFTPD
from .dante import Dante
+206
View File
@@ -0,0 +1,206 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# SPDX-License-Identifier: curl
#
###########################################################################
#
import logging
import os
import socket
import subprocess
import time
from datetime import timedelta, datetime
from json import JSONEncoder
from typing import Dict
from .curl import CurlClient
from .env import Env
from .ports import alloc_ports_and_do
log = logging.getLogger(__name__)
class Caddy:
PORT_SPECS = {
'caddy': socket.SOCK_STREAM,
'caddys': socket.SOCK_STREAM,
}
def __init__(self, env: Env):
self.env = env
self._caddy = os.environ['CADDY'] if 'CADDY' in os.environ else env.caddy
self._caddy_dir = os.path.join(env.gen_dir, 'caddy')
self._docs_dir = os.path.join(self._caddy_dir, 'docs')
self._conf_file = os.path.join(self._caddy_dir, 'Caddyfile')
self._error_log = os.path.join(self._caddy_dir, 'caddy.log')
self._tmp_dir = os.path.join(self._caddy_dir, 'tmp')
self._process = None
self._http_port = 0
self._https_port = 0
self._rmf(self._error_log)
@property
def docs_dir(self):
return self._docs_dir
@property
def port(self) -> int:
return self._https_port
def clear_logs(self):
self._rmf(self._error_log)
def is_running(self):
if self._process:
self._process.poll()
return self._process.returncode is None
return False
def start_if_needed(self):
if not self.is_running():
return self.start()
return True
def initial_start(self):
def startup(ports: Dict[str, int]) -> bool:
self._http_port = ports['caddy']
self._https_port = ports['caddys']
if self.start():
self.env.update_ports(ports)
return True
self.stop()
self._http_port = 0
self._https_port = 0
return False
return alloc_ports_and_do(Caddy.PORT_SPECS, startup,
self.env.gen_root, max_tries=3)
def start(self, wait_live=True):
assert self._http_port > 0 and self._https_port > 0
self._mkpath(self._tmp_dir)
if self._process:
self.stop()
self._write_config()
args = [
self._caddy, 'run'
]
caddyerr = open(self._error_log, 'a')
self._process = subprocess.Popen(args=args, cwd=self._caddy_dir, stderr=caddyerr)
if self._process.returncode is not None:
return False
return not wait_live or self.wait_live(timeout=timedelta(seconds=Env.SERVER_TIMEOUT))
def stop(self, wait_dead=True):
self._mkpath(self._tmp_dir)
if self._process:
self._process.terminate()
try:
self._process.wait(timeout=1)
except Exception:
self._process.kill()
self._process = None
return not wait_dead or self.wait_dead(timeout=timedelta(seconds=5))
return True
def restart(self):
self.stop()
return self.start()
def wait_dead(self, timeout: timedelta):
curl = CurlClient(env=self.env, run_dir=self._tmp_dir)
try_until = datetime.now() + timeout
while datetime.now() < try_until:
check_url = f'https://{self.env.domain1}:{self.port}/'
r = curl.http_get(url=check_url)
if r.exit_code != 0:
return True
log.debug(f'waiting for caddy to stop responding: {r}')
time.sleep(.1)
log.debug(f"Server still responding after {timeout}")
return False
def wait_live(self, timeout: timedelta):
curl = CurlClient(env=self.env, run_dir=self._tmp_dir)
try_until = datetime.now() + timeout
while datetime.now() < try_until:
check_url = f'https://{self.env.domain1}:{self.port}/'
r = curl.http_get(url=check_url)
if r.exit_code == 0:
return True
time.sleep(.1)
log.error(f"Caddy still not responding after {timeout}")
return False
def _rmf(self, path):
if os.path.exists(path):
return os.remove(path)
def _mkpath(self, path):
if not os.path.exists(path):
return os.makedirs(path)
def _write_config(self):
domain1 = self.env.domain1
creds1 = self.env.get_credentials(domain1)
assert creds1 # convince pytype this isn't None
domain2 = self.env.domain2
creds2 = self.env.get_credentials(domain2)
assert creds2 # convince pytype this isn't None
self._mkpath(self._docs_dir)
self._mkpath(self._tmp_dir)
with open(os.path.join(self._docs_dir, 'data.json'), 'w') as fd:
data = {
'server': f'{domain1}',
}
fd.write(JSONEncoder().encode(data))
with open(self._conf_file, 'w') as fd:
conf = [ # base server config
'{',
f' http_port {self._http_port}',
f' https_port {self._https_port}',
' log default {',
' level ERROR',
'}',
f' servers :{self._https_port} {{',
' protocols h3 h2 h1',
' }',
'}',
f'{domain1}:{self._https_port} {{',
' file_server * {',
f' root {self._docs_dir}',
' }',
f' tls {creds1.cert_file} {creds1.pkey_file}',
'}',
]
if self.env.http_port > 0:
conf.extend([
f'{domain2} {{',
f' reverse_proxy /* http://localhost:{self.env.http_port} {{',
' }',
f' tls {creds2.cert_file} {creds2.pkey_file}',
'}',
])
fd.write("\n".join(conf))
+569
View File
@@ -0,0 +1,569 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# SPDX-License-Identifier: curl
#
###########################################################################
#
import base64
import ipaddress
import os
import re
from datetime import timedelta, datetime, timezone
from typing import List, Any, Optional
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives._serialization import PublicFormat
from cryptography.hazmat.primitives.asymmetric import ec, rsa
from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePrivateKey
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey
from cryptography.hazmat.primitives.serialization import Encoding, PrivateFormat, NoEncryption, load_pem_private_key
from cryptography.x509 import ExtendedKeyUsageOID, NameOID
EC_SUPPORTED = {}
EC_SUPPORTED.update([(curve.name.upper(), curve) for curve in [
ec.SECP192R1,
ec.SECP224R1,
ec.SECP256R1,
ec.SECP384R1,
]])
def _private_key(key_type):
if isinstance(key_type, str):
key_type = key_type.upper()
m = re.match(r'^(RSA)?(\d+)$', key_type)
if m:
key_type = int(m.group(2))
if isinstance(key_type, int):
return rsa.generate_private_key(
public_exponent=65537,
key_size=key_type,
backend=default_backend()
)
if not isinstance(key_type, ec.EllipticCurve) and key_type in EC_SUPPORTED:
key_type = EC_SUPPORTED[key_type]
return ec.generate_private_key(
curve=key_type,
backend=default_backend()
)
class CertificateSpec:
def __init__(self, name: Optional[str] = None,
domains: Optional[List[str]] = None,
email: Optional[str] = None,
key_type: Optional[str] = None,
single_file: bool = False,
valid_from: timedelta = timedelta(days=-1),
valid_to: timedelta = timedelta(days=89),
client: bool = False,
check_valid: bool = True,
sub_specs: Optional[List['CertificateSpec']] = None):
self._name = name
self.domains = domains
self.client = client
self.email = email
self.key_type = key_type
self.single_file = single_file
self.valid_from = valid_from
self.valid_to = valid_to
self.sub_specs = sub_specs
self.check_valid = check_valid
@property
def name(self) -> Optional[str]:
if self._name:
return self._name
elif self.domains:
return self.domains[0]
return None
@property
def type(self) -> Optional[str]:
if self.domains and len(self.domains):
return "server"
elif self.client:
return "client"
elif self.name:
return "ca"
return None
class Credentials:
def __init__(self,
name: str,
cert: Any,
pkey: Any,
issuer: Optional['Credentials'] = None):
self._name = name
self._cert = cert
self._pkey = pkey
self._issuer = issuer
self._cert_file = None
self._pkey_file = None
self._store = None
self._combined_file = None
@property
def name(self) -> str:
return self._name
@property
def subject(self) -> x509.Name:
return self._cert.subject
@property
def key_type(self):
if isinstance(self._pkey, RSAPrivateKey):
return f"rsa{self._pkey.key_size}"
elif isinstance(self._pkey, EllipticCurvePrivateKey):
return f"{self._pkey.curve.name}"
else:
raise Exception(f"unknown key type: {self._pkey}")
@property
def private_key(self) -> Any:
return self._pkey
def pub_sha256_b64(self) -> Any:
pubkey = self._pkey.public_key()
sha256 = hashes.Hash(algorithm=hashes.SHA256())
sha256.update(pubkey.public_bytes(
encoding=Encoding.DER,
format=PublicFormat.SubjectPublicKeyInfo
))
return base64.b64encode(sha256.finalize()).decode('utf8')
@property
def certificate(self) -> Any:
return self._cert
@property
def cert_pem(self) -> bytes:
return self._cert.public_bytes(Encoding.PEM)
@property
def pkey_pem(self) -> bytes:
return self._pkey.private_bytes(
Encoding.PEM,
PrivateFormat.TraditionalOpenSSL if self.key_type.startswith('rsa') else PrivateFormat.PKCS8,
NoEncryption())
@property
def issuer(self) -> Optional['Credentials']:
return self._issuer
def set_store(self, store: 'CertStore'):
self._store = store
def set_files(self, cert_file: str, pkey_file: Optional[str] = None,
combined_file: Optional[str] = None):
self._cert_file = cert_file
self._pkey_file = pkey_file
self._combined_file = combined_file
@property
def cert_file(self) -> str:
return self._cert_file
@property
def pkey_file(self) -> Optional[str]:
return self._pkey_file
@property
def combined_file(self) -> Optional[str]:
return self._combined_file
def get_first(self, name) -> Optional['Credentials']:
creds = self._store.get_credentials_for_name(name) if self._store else []
return creds[0] if len(creds) else None
def get_credentials_for_name(self, name) -> List['Credentials']:
return self._store.get_credentials_for_name(name) if self._store else []
def issue_certs(self, specs: List[CertificateSpec],
chain: Optional[List['Credentials']] = None) -> List['Credentials']:
return [self.issue_cert(spec=spec, chain=chain) for spec in specs]
def issue_cert(self, spec: CertificateSpec,
chain: Optional[List['Credentials']] = None) -> 'Credentials':
key_type = spec.key_type if spec.key_type else self.key_type
creds = None
if self._store:
creds = self._store.load_credentials(
name=spec.name, key_type=key_type, single_file=spec.single_file,
issuer=self, check_valid=spec.check_valid)
if creds is None:
creds = TestCA.create_credentials(spec=spec, issuer=self, key_type=key_type,
valid_from=spec.valid_from, valid_to=spec.valid_to)
if self._store:
self._store.save(creds, single_file=spec.single_file)
if spec.type == "ca":
self._store.save_chain(creds, "ca", with_root=True)
if spec.sub_specs:
if self._store:
sub_store = CertStore(fpath=os.path.join(self._store.path, creds.name))
creds.set_store(sub_store)
subchain = chain.copy() if chain else []
subchain.append(self)
creds.issue_certs(spec.sub_specs, chain=subchain)
return creds
class CertStore:
def __init__(self, fpath: str):
self._store_dir = fpath
if not os.path.exists(self._store_dir):
os.makedirs(self._store_dir)
self._creds_by_name = {}
@property
def path(self) -> str:
return self._store_dir
def save(self, creds: Credentials, name: Optional[str] = None,
chain: Optional[List[Credentials]] = None,
single_file: bool = False) -> None:
name = name if name is not None else creds.name
cert_file = self.get_cert_file(name=name, key_type=creds.key_type)
pkey_file = self.get_pkey_file(name=name, key_type=creds.key_type)
comb_file = self.get_combined_file(name=name, key_type=creds.key_type)
if single_file:
pkey_file = None
with open(cert_file, "wb") as fd:
fd.write(creds.cert_pem)
if chain:
for c in chain:
fd.write(c.cert_pem)
if pkey_file is None:
fd.write(creds.pkey_pem)
if pkey_file is not None:
with open(pkey_file, "wb") as fd:
fd.write(creds.pkey_pem)
with open(comb_file, "wb") as fd:
fd.write(creds.cert_pem)
if chain:
for c in chain:
fd.write(c.cert_pem)
fd.write(creds.pkey_pem)
creds.set_files(cert_file, pkey_file, comb_file)
self._add_credentials(name, creds)
def save_chain(self, creds: Credentials, infix: str, with_root=False):
name = creds.name
chain = [creds]
while creds.issuer is not None:
creds = creds.issuer
chain.append(creds)
if not with_root and len(chain) > 1:
chain = chain[:-1]
chain_file = os.path.join(self._store_dir, f'{name}-{infix}.pem')
with open(chain_file, "wb") as fd:
for c in chain:
fd.write(c.cert_pem)
def _add_credentials(self, name: str, creds: Credentials):
if name not in self._creds_by_name:
self._creds_by_name[name] = []
self._creds_by_name[name].append(creds)
def get_credentials_for_name(self, name) -> List[Credentials]:
return self._creds_by_name[name] if name in self._creds_by_name else []
def get_cert_file(self, name: str, key_type=None) -> str:
key_infix = ".{0}".format(key_type) if key_type is not None else ""
return os.path.join(self._store_dir, f'{name}{key_infix}.cert.pem')
def get_pkey_file(self, name: str, key_type=None) -> str:
key_infix = ".{0}".format(key_type) if key_type is not None else ""
return os.path.join(self._store_dir, f'{name}{key_infix}.pkey.pem')
def get_combined_file(self, name: str, key_type=None) -> str:
return os.path.join(self._store_dir, f'{name}.pem')
def load_pem_cert(self, fpath: str) -> x509.Certificate:
with open(fpath) as fd:
return x509.load_pem_x509_certificate("".join(fd.readlines()).encode())
def load_pem_pkey(self, fpath: str):
with open(fpath) as fd:
return load_pem_private_key("".join(fd.readlines()).encode(), password=None)
def load_credentials(self, name: str, key_type=None,
single_file: bool = False,
issuer: Optional[Credentials] = None,
check_valid: bool = False):
cert_file = self.get_cert_file(name=name, key_type=key_type)
pkey_file = cert_file if single_file else self.get_pkey_file(name=name, key_type=key_type)
comb_file = self.get_combined_file(name=name, key_type=key_type)
if os.path.isfile(cert_file) and os.path.isfile(pkey_file):
cert = self.load_pem_cert(cert_file)
pkey = self.load_pem_pkey(pkey_file)
try:
now = datetime.now(tz=timezone.utc)
if check_valid and \
((cert.not_valid_after_utc < now) or
(cert.not_valid_before_utc > now)):
return None
except AttributeError: # older python
now = datetime.now()
if check_valid and \
((cert.not_valid_after < now) or
(cert.not_valid_before > now)):
return None
creds = Credentials(name=name, cert=cert, pkey=pkey, issuer=issuer)
creds.set_store(self)
creds.set_files(cert_file, pkey_file, comb_file)
self._add_credentials(name, creds)
return creds
return None
class TestCA:
@classmethod
def create_root(cls, name: str, store_dir: str, key_type: str = "rsa2048") -> Credentials:
store = CertStore(fpath=store_dir)
creds = store.load_credentials(name="ca", key_type=key_type, issuer=None)
if creds is None:
creds = TestCA._make_ca_credentials(name=name, key_type=key_type)
store.save(creds, name="ca")
creds.set_store(store)
return creds
@staticmethod
def create_credentials(spec: CertificateSpec, issuer: Credentials, key_type: Any,
valid_from: timedelta = timedelta(days=-1),
valid_to: timedelta = timedelta(days=89),
) -> Credentials:
"""
Create a certificate signed by this CA for the given domains.
:returns: the certificate and private key PEM file paths
"""
if spec.domains and len(spec.domains):
creds = TestCA._make_server_credentials(name=spec.name, domains=spec.domains,
issuer=issuer, valid_from=valid_from,
valid_to=valid_to, key_type=key_type)
elif spec.client:
creds = TestCA._make_client_credentials(name=spec.name, issuer=issuer,
email=spec.email, valid_from=valid_from,
valid_to=valid_to, key_type=key_type)
elif spec.name:
creds = TestCA._make_ca_credentials(name=spec.name, issuer=issuer,
valid_from=valid_from, valid_to=valid_to,
key_type=key_type)
else:
raise Exception(f"unrecognized certificate specification: {spec}")
return creds
@staticmethod
def _make_x509_name(org_name: Optional[str] = None, common_name: Optional[str] = None, parent: x509.Name = None) -> x509.Name:
name_pieces = []
if org_name:
oid = NameOID.ORGANIZATIONAL_UNIT_NAME if parent else NameOID.ORGANIZATION_NAME
name_pieces.append(x509.NameAttribute(oid, org_name))
elif common_name:
name_pieces.append(x509.NameAttribute(NameOID.COMMON_NAME, common_name))
if parent:
name_pieces.extend(list(parent))
return x509.Name(name_pieces)
@staticmethod
def _make_csr(
subject: x509.Name,
pkey: Any,
issuer_subject: Optional[Credentials],
valid_from_delta: Optional[timedelta] = None,
valid_until_delta: Optional[timedelta] = None
) -> x509.CertificateBuilder:
pubkey = pkey.public_key()
issuer_subject = issuer_subject if issuer_subject is not None else subject
valid_from = datetime.now()
if valid_until_delta is not None:
valid_from += valid_from_delta
valid_until = datetime.now()
if valid_until_delta is not None:
valid_until += valid_until_delta
return (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer_subject)
.public_key(pubkey)
.not_valid_before(valid_from)
.not_valid_after(valid_until)
.serial_number(x509.random_serial_number())
.add_extension(
x509.SubjectKeyIdentifier.from_public_key(pubkey),
critical=False,
)
)
@staticmethod
def _add_ca_usages(csr: Any) -> Any:
return csr.add_extension(
x509.BasicConstraints(ca=True, path_length=9),
critical=True,
).add_extension(
x509.KeyUsage(
digital_signature=True,
content_commitment=False,
key_encipherment=False,
data_encipherment=False,
key_agreement=False,
key_cert_sign=True,
crl_sign=True,
encipher_only=False,
decipher_only=False),
critical=True
).add_extension(
x509.ExtendedKeyUsage([
ExtendedKeyUsageOID.CLIENT_AUTH,
ExtendedKeyUsageOID.SERVER_AUTH,
ExtendedKeyUsageOID.CODE_SIGNING,
]),
critical=True
)
@staticmethod
def _add_leaf_usages(csr: Any, domains: List[str], issuer: Credentials) -> Any:
names = []
for name in domains:
m = re.match(r'dns:(.+)', name)
if m:
names.append(x509.DNSName(m.group(1)))
else:
try:
names.append(x509.IPAddress(ipaddress.ip_address(name)))
# TODO: specify specific exceptions here
except: # noqa: E722
names.append(x509.DNSName(name))
return csr.add_extension(
x509.BasicConstraints(ca=False, path_length=None),
critical=True,
).add_extension(
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(
issuer.certificate.extensions.get_extension_for_class(
x509.SubjectKeyIdentifier).value),
critical=False
).add_extension(
x509.SubjectAlternativeName(names), critical=True,
).add_extension(
x509.ExtendedKeyUsage([
ExtendedKeyUsageOID.SERVER_AUTH,
]),
critical=False
)
@staticmethod
def _add_client_usages(csr: Any, issuer: Credentials, rfc82name: Optional[str] = None) -> Any:
cert = csr.add_extension(
x509.BasicConstraints(ca=False, path_length=None),
critical=True,
).add_extension(
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(
issuer.certificate.extensions.get_extension_for_class(
x509.SubjectKeyIdentifier).value),
critical=False
)
if rfc82name:
cert.add_extension(
x509.SubjectAlternativeName([x509.RFC822Name(rfc82name)]),
critical=True,
)
cert.add_extension(
x509.ExtendedKeyUsage([
ExtendedKeyUsageOID.CLIENT_AUTH,
]),
critical=True
)
return cert
@staticmethod
def _make_ca_credentials(name, key_type: Any,
issuer: Optional[Credentials] = None,
valid_from: timedelta = timedelta(days=-1),
valid_to: timedelta = timedelta(days=89),
) -> Credentials:
pkey = _private_key(key_type=key_type)
if issuer is not None:
issuer_subject = issuer.certificate.subject
issuer_key = issuer.private_key
else:
issuer_subject = None
issuer_key = pkey
subject = TestCA._make_x509_name(org_name=name, parent=issuer.subject if issuer else None)
csr = TestCA._make_csr(subject=subject,
issuer_subject=issuer_subject, pkey=pkey,
valid_from_delta=valid_from, valid_until_delta=valid_to)
csr = TestCA._add_ca_usages(csr)
cert = csr.sign(private_key=issuer_key,
algorithm=hashes.SHA256(),
backend=default_backend())
return Credentials(name=name, cert=cert, pkey=pkey, issuer=issuer)
@staticmethod
def _make_server_credentials(name: str, domains: List[str], issuer: Credentials,
key_type: Any,
valid_from: timedelta = timedelta(days=-1),
valid_to: timedelta = timedelta(days=89),
) -> Credentials:
pkey = _private_key(key_type=key_type)
subject = TestCA._make_x509_name(common_name=name, parent=issuer.subject)
csr = TestCA._make_csr(subject=subject,
issuer_subject=issuer.certificate.subject, pkey=pkey,
valid_from_delta=valid_from, valid_until_delta=valid_to)
csr = TestCA._add_leaf_usages(csr, domains=domains, issuer=issuer)
cert = csr.sign(private_key=issuer.private_key,
algorithm=hashes.SHA256(),
backend=default_backend())
return Credentials(name=name, cert=cert, pkey=pkey, issuer=issuer)
@staticmethod
def _make_client_credentials(name: str,
issuer: Credentials, email: Optional[str],
key_type: Any,
valid_from: timedelta = timedelta(days=-1),
valid_to: timedelta = timedelta(days=89),
) -> Credentials:
pkey = _private_key(key_type=key_type)
subject = TestCA._make_x509_name(common_name=name, parent=issuer.subject)
csr = TestCA._make_csr(subject=subject,
issuer_subject=issuer.certificate.subject, pkey=pkey,
valid_from_delta=valid_from, valid_until_delta=valid_to)
csr = TestCA._add_client_usages(csr, issuer=issuer, rfc82name=email)
cert = csr.sign(private_key=issuer.private_key,
algorithm=hashes.SHA256(),
backend=default_backend())
return Credentials(name=name, cert=cert, pkey=pkey, issuer=issuer)
+121
View File
@@ -0,0 +1,121 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# SPDX-License-Identifier: curl
#
###########################################################################
#
import logging
import os
import shutil
import subprocess
from datetime import datetime
from typing import Optional, Dict
from . import ExecResult
from .env import Env
log = logging.getLogger(__name__)
class LocalClient:
def __init__(self, name: str, env: Env, run_dir: Optional[str] = None,
timeout: Optional[float] = None,
run_env: Optional[Dict[str,str]] = None):
self.name = name
self.path = os.path.join(env.build_dir, 'tests/libtest/libtests')
self.env = env
self._run_env = run_env
self._timeout = timeout if timeout else env.test_timeout
self._curl = os.environ['CURL'] if 'CURL' in os.environ else env.curl
self._run_dir = run_dir if run_dir else os.path.join(env.gen_dir, name)
self._stdoutfile = f'{self._run_dir}/stdout'
self._stderrfile = f'{self._run_dir}/stderr'
self._rmrf(self._run_dir)
self._mkpath(self._run_dir)
@property
def run_dir(self) -> str:
return self._run_dir
@property
def stderr_file(self) -> str:
return self._stderrfile
def exists(self) -> bool:
return os.path.exists(self.path)
def download_file(self, i: int) -> str:
return os.path.join(self._run_dir, f'download_{i}.data')
def _rmf(self, path):
if os.path.exists(path):
return os.remove(path)
def _rmrf(self, path):
if os.path.exists(path):
return shutil.rmtree(path)
def _mkpath(self, path):
if not os.path.exists(path):
return os.makedirs(path)
def run(self, args):
self._rmf(self._stdoutfile)
self._rmf(self._stderrfile)
start = datetime.now()
exception = None
myargs = [self.path, self.name]
myargs.extend(args)
run_env = None
if self._run_env:
run_env = self._run_env.copy()
for key in ['CURL_DEBUG']:
if key in os.environ and key not in run_env:
run_env[key] = os.environ[key]
try:
with open(self._stdoutfile, 'w') as cout, open(self._stderrfile, 'w') as cerr:
p = subprocess.run(myargs, stderr=cerr, stdout=cout,
cwd=self._run_dir, shell=False,
input=None, env=run_env,
timeout=self._timeout)
exitcode = p.returncode
except subprocess.TimeoutExpired:
log.warning(f'Timeout after {self._timeout}s: {args}')
exitcode = -1
exception = 'TimeoutExpired'
coutput = open(self._stdoutfile).readlines()
cerrput = open(self._stderrfile).readlines()
return ExecResult(args=myargs, exit_code=exitcode, exception=exception,
stdout=coutput, stderr=cerrput,
duration=datetime.now() - start)
def dump_logs(self):
lines = []
lines.append('>>--stdout ----------------------------------------------\n')
lines.extend(open(self._stdoutfile).readlines())
lines.append('>>--stderr ----------------------------------------------\n')
lines.extend(open(self._stderrfile).readlines())
lines.append('<<-------------------------------------------------------\n')
return ''.join(lines)
File diff suppressed because it is too large Load Diff
+174
View File
@@ -0,0 +1,174 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# SPDX-License-Identifier: curl
#
###########################################################################
#
import logging
import os
import socket
import subprocess
import time
from datetime import timedelta, datetime
from typing import Dict
from . import CurlClient
from .env import Env
from .ports import alloc_ports_and_do
log = logging.getLogger(__name__)
class Dante:
def __init__(self, env: Env):
self.env = env
self._cmd = env.danted
self._port = 0
self.name = 'danted'
self._port_skey = 'danted'
self._port_specs = {
'danted': socket.SOCK_STREAM,
}
self._dante_dir = os.path.join(env.gen_dir, self.name)
self._run_dir = os.path.join(self._dante_dir, 'run')
self._tmp_dir = os.path.join(self._dante_dir, 'tmp')
self._conf_file = os.path.join(self._dante_dir, 'test.conf')
self._dante_log = os.path.join(self._dante_dir, 'dante.log')
self._error_log = os.path.join(self._dante_dir, 'error.log')
self._pid_file = os.path.join(self._dante_dir, 'dante.pid')
self._process = None
self.clear_logs()
@property
def port(self) -> int:
return self._port
def clear_logs(self):
self._rmf(self._error_log)
self._rmf(self._dante_log)
def exists(self):
return os.path.exists(self._cmd)
def is_running(self):
if self._process:
self._process.poll()
return self._process.returncode is None
return False
def start_if_needed(self):
if not self.is_running():
return self.start()
return True
def stop(self, wait_dead=True):
self._mkpath(self._tmp_dir)
if self._process:
self._process.terminate()
self._process.wait(timeout=2)
self._process = None
return not wait_dead or True
return True
def restart(self):
self.stop()
return self.start()
def initial_start(self):
def startup(ports: Dict[str, int]) -> bool:
self._port = ports[self._port_skey]
if self.start():
self.env.update_ports(ports)
return True
self.stop()
self._port = 0
return False
return alloc_ports_and_do(self._port_specs, startup,
self.env.gen_root, max_tries=3)
def start(self, wait_live=True):
assert self._port > 0
self._mkpath(self._tmp_dir)
if self._process:
self.stop()
self._write_config()
args = [
self._cmd,
'-f', f'{self._conf_file}',
'-p', f'{self._pid_file}',
'-d', '0',
]
procerr = open(self._error_log, 'a')
self._process = subprocess.Popen(args=args, stderr=procerr)
if self._process.returncode is not None:
return False
return self.wait_live(timeout=timedelta(seconds=Env.SERVER_TIMEOUT))
def wait_live(self, timeout: timedelta):
curl = CurlClient(env=self.env, run_dir=self._tmp_dir,
timeout=timeout.total_seconds(), socks_args=[
'--socks5', f'127.0.0.1:{self._port}'
])
try_until = datetime.now() + timeout
while datetime.now() < try_until:
r = curl.http_get(url=f'http://{self.env.domain1}:{self.env.http_port}/')
if r.exit_code == 0:
return True
time.sleep(.1)
log.error(f"Server still not responding after {timeout}")
return False
def _rmf(self, path):
if os.path.exists(path):
return os.remove(path)
def _mkpath(self, path):
if not os.path.exists(path):
return os.makedirs(path)
def _write_config(self):
conf = [
f'errorlog: {self._error_log}',
f'logoutput: {self._dante_log}',
f'internal: 127.0.0.1 port = {self._port}',
'external: 127.0.0.1',
'clientmethod: none',
'socksmethod: none',
'client pass {',
' from: 127.0.0.0/24 to: 0.0.0.0/0',
' log: error',
'}',
'socks pass {',
' from: 0.0.0.0/0 to: 0.0.0.0/0',
' command: bindreply connect udpreply',
' log: error',
'}',
'\n',
]
with open(self._conf_file, 'w') as fd:
fd.write("\n".join(conf))
+774
View File
@@ -0,0 +1,774 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# SPDX-License-Identifier: curl
#
###########################################################################
#
import gzip
import logging
import os
import re
import shutil
import subprocess
import tempfile
from configparser import ConfigParser, ExtendedInterpolation
from datetime import timedelta
from typing import Optional, Dict, List
import pytest
from filelock import FileLock
from .certs import CertificateSpec, Credentials, TestCA
log = logging.getLogger(__name__)
def init_config_from(conf_path):
if os.path.isfile(conf_path):
config = ConfigParser(interpolation=ExtendedInterpolation())
config.read(conf_path)
return config
return None
TESTS_HTTPD_PATH = os.path.dirname(os.path.dirname(__file__))
PROJ_PATH = os.path.dirname(os.path.dirname(TESTS_HTTPD_PATH))
TOP_PATH = os.path.join(os.getcwd(), os.path.pardir)
CONFIG_PATH = os.path.join(TOP_PATH, 'tests', 'http', 'config.ini')
if not os.path.exists(CONFIG_PATH):
ALT_CONFIG_PATH = os.path.join(PROJ_PATH, 'tests', 'http', 'config.ini')
if not os.path.exists(ALT_CONFIG_PATH):
raise Exception(f'unable to find config.ini in {CONFIG_PATH} nor {ALT_CONFIG_PATH}')
TOP_PATH = PROJ_PATH
CONFIG_PATH = ALT_CONFIG_PATH
DEF_CONFIG = init_config_from(CONFIG_PATH)
CURL = os.path.join(TOP_PATH, 'src', 'curl')
CURLINFO = os.path.join(TOP_PATH, 'src', 'curlinfo')
class NghttpxUtil:
CMD = None
VERSION_FULL = None
@classmethod
def version(cls, cmd):
if cmd is None:
return None
if cls.VERSION_FULL is None or cmd != cls.CMD:
p = subprocess.run(args=[cmd, '--version'],
capture_output=True, text=True)
if p.returncode != 0:
raise RuntimeError(f'{cmd} --version failed with exit code: {p.returncode}')
cls.CMD = cmd
for line in p.stdout.splitlines(keepends=False):
if line.startswith('nghttpx '):
cls.VERSION_FULL = line
if cls.VERSION_FULL is None:
raise RuntimeError(f'{cmd}: unable to determine version')
return cls.VERSION_FULL
@staticmethod
def version_with_h3(version):
return re.match(r'.* ngtcp2/\d+\.\d+\.\d+.*', version) is not None
class EnvConfig:
def __init__(self, pytestconfig: Optional[pytest.Config] = None,
testrun_uid=None,
worker_id=None):
self.pytestconfig = pytestconfig
self.testrun_uid = testrun_uid
self.worker_id = worker_id if worker_id is not None else 'master'
self.tests_dir = TESTS_HTTPD_PATH
self.gen_root = self.gen_dir = os.path.join(self.tests_dir, 'gen')
if self.worker_id != 'master':
self.gen_dir = os.path.join(self.gen_dir, self.worker_id)
self.project_dir = os.path.dirname(os.path.dirname(self.tests_dir))
self.build_dir = TOP_PATH
self.config = DEF_CONFIG
# check cur and its features
self.curl = CURL
self.curlinfo = CURLINFO
if 'CURL' in os.environ:
self.curl = os.environ['CURL']
self.curl_props = {
'version_string': '',
'version': '',
'os': '',
'fullname': '',
'features_string': '',
'features': set(),
'protocols_string': '',
'protocols': set(),
'libs': set(),
'lib_versions': set(),
}
self.curl_is_debug = False
self.curl_protos = []
p = subprocess.run(args=[self.curl, '-V'],
capture_output=True, text=True)
if p.returncode != 0:
raise RuntimeError(f'{self.curl} -V failed with exit code: {p.returncode}')
if p.stderr.startswith('WARNING:'):
self.curl_is_debug = True
for line in p.stdout.splitlines(keepends=False):
if line.startswith('curl '):
self.curl_props['version_string'] = line
m = re.match(r'^curl (?P<version>\S+) (?P<os>\S+) (?P<libs>.*)$', line)
if m:
self.curl_props['fullname'] = m.group(0)
self.curl_props['version'] = m.group('version')
self.curl_props['os'] = m.group('os')
self.curl_props['lib_versions'] = {
lib.lower() for lib in m.group('libs').split(' ')
}
self.curl_props['libs'] = {
re.sub(r'/[a-z0-9.-]*', '', lib) for lib in self.curl_props['lib_versions']
}
if line.startswith('Features: '):
self.curl_props['features_string'] = line[10:]
self.curl_props['features'] = {
feat.lower() for feat in line[10:].split(' ')
}
if line.startswith('Protocols: '):
self.curl_props['protocols_string'] = line[11:]
self.curl_props['protocols'] = {
prot.lower() for prot in line[11:].split(' ')
}
p = subprocess.run(args=[self.curlinfo],
capture_output=True, text=True)
if p.returncode != 0:
raise RuntimeError(f'{self.curlinfo} failed with exit code: {p.returncode}')
self.curl_is_verbose = 'verbose-strings: ON' in p.stdout
self.ports = {}
self.httpd = self.config['httpd']['httpd']
self.apxs = self.config['httpd']['apxs']
if len(self.apxs) == 0:
self.apxs = None
self._httpd_version = None
self.examples_pem = {
'key': 'xxx',
'cert': 'xxx',
}
self.htdocs_dir = os.path.join(self.gen_dir, 'htdocs')
self.tld = 'http.curl.se'
self.domain1 = f"one.{self.tld}"
self.domain1brotli = f"brotli.one.{self.tld}"
self.domain2 = f"two.{self.tld}"
self.ftp_domain = f"ftp.{self.tld}"
self.proxy_domain = f"proxy.{self.tld}"
self.expired_domain = f"expired.{self.tld}"
self.cert_specs = [
CertificateSpec(domains=[self.domain1, self.domain1brotli, 'localhost', '127.0.0.1'], key_type='rsa2048'),
CertificateSpec(name='domain1-no-ip', domains=[self.domain1, self.domain1brotli], key_type='rsa2048'),
CertificateSpec(name='domain1-very-bad', domains=[self.domain1, 'dns:127.0.0.1'], key_type='rsa2048'),
CertificateSpec(domains=[self.domain2], key_type='rsa2048'),
CertificateSpec(domains=[self.ftp_domain], key_type='rsa2048'),
CertificateSpec(domains=[self.proxy_domain, '127.0.0.1'], key_type='rsa2048'),
CertificateSpec(domains=[self.expired_domain], key_type='rsa2048',
valid_from=timedelta(days=-100), valid_to=timedelta(days=-10)),
CertificateSpec(name="clientsX", sub_specs=[
CertificateSpec(name="user1", client=True),
]),
]
self.nghttpx = self.config['nghttpx']['nghttpx']
if len(self.nghttpx.strip()) == 0:
self.nghttpx = None
self._nghttpx_version = None
self.nghttpx_with_h3 = False
if self.nghttpx is not None:
try:
self._nghttpx_version = NghttpxUtil.version(self.nghttpx)
self.nghttpx_with_h3 = NghttpxUtil.version_with_h3(self._nghttpx_version)
except RuntimeError:
# not a working nghttpx
log.exception('checking nghttpx version')
self.nghttpx = None
self.caddy = self.config['caddy']['caddy']
self._caddy_version = None
if len(self.caddy.strip()) == 0:
self.caddy = None
if self.caddy is not None:
try:
p = subprocess.run(args=[self.caddy, 'version'],
capture_output=True, text=True)
if p.returncode != 0:
# not a working caddy
self.caddy = None
m = re.match(r'v?(\d+\.\d+\.\d+).*', p.stdout)
if m:
self._caddy_version = m.group(1)
else:
raise RuntimeError(f'Unable to determine cadd version from: {p.stdout}')
# TODO: specify specific exceptions here
except: # noqa: E722
self.caddy = None
self.vsftpd = self.config['vsftpd']['vsftpd']
if self.vsftpd == '':
self.vsftpd = None
self._vsftpd_version = None
if self.vsftpd is not None:
try:
with tempfile.TemporaryFile('w+') as tmp:
p = subprocess.run(args=[self.vsftpd, '-v'],
capture_output=True, text=True, stdin=tmp)
if p.returncode != 0:
# not a working vsftpd
self.vsftpd = None
if p.stderr:
ver_text = p.stderr
else:
# Oddly, some versions of vsftpd write to stdin (!)
# instead of stderr, which is odd but works. If there
# is nothing on stderr, read the file on stdin and use
# any data there instead.
tmp.seek(0)
ver_text = tmp.read()
m = re.match(r'vsftpd: version (\d+\.\d+\.\d+)', ver_text)
if m:
self._vsftpd_version = m.group(1)
elif len(p.stderr) == 0:
# vsftp does not use stdout or stderr for printing its version... -.-
self._vsftpd_version = 'unknown'
else:
raise Exception(f'Unable to determine VsFTPD version from: {p.stderr}')
except Exception:
self.vsftpd = None
self.danted = self.config['danted']['danted']
if self.danted == '':
self.danted = None
self._danted_version = None
if self.danted is not None:
try:
p = subprocess.run(args=[self.danted, '-v'],
capture_output=True, text=True)
assert p.returncode == 0
if p.returncode != 0:
# not a working vsftpd
self.danted = None
m = re.match(r'^Dante v(\d+\.\d+\.\d+).*', p.stdout)
if not m:
m = re.match(r'^Dante v(\d+\.\d+\.\d+).*', p.stderr)
if m:
self._danted_version = m.group(1)
else:
self.danted = None
raise Exception(f'Unable to determine danted version from: {p.stderr}')
except Exception:
self.danted = None
self._tcpdump = shutil.which('tcpdump')
@property
def httpd_version(self):
if self._httpd_version is None and self.apxs is not None:
try:
p = subprocess.run(args=[self.apxs, '-q', 'HTTPD_VERSION'],
capture_output=True, text=True)
if p.returncode != 0:
log.error(f'{self.apxs} failed to query HTTPD_VERSION: {p}')
else:
self._httpd_version = p.stdout.strip()
except Exception:
log.exception(f'{self.apxs} failed to run')
return self._httpd_version
def versiontuple(self, v):
v = re.sub(r'(\d+\.\d+(\.\d+)?)(-\S+)?', r'\1', v)
return tuple(map(int, v.split('.')))
def httpd_is_at_least(self, minv):
if self.httpd_version is None:
return False
hv = self.versiontuple(self.httpd_version)
return hv >= self.versiontuple(minv)
def caddy_is_at_least(self, minv):
if self.caddy_version is None:
return False
hv = self.versiontuple(self.caddy_version)
return hv >= self.versiontuple(minv)
def is_complete(self) -> bool:
return os.path.isfile(self.httpd) and \
self.apxs is not None and \
os.path.isfile(self.apxs)
def get_incomplete_reason(self) -> Optional[str]:
if self.httpd is None or len(self.httpd.strip()) == 0:
return 'httpd not configured, see `--with-test-httpd=<path>`'
if not os.path.isfile(self.httpd):
return f'httpd ({self.httpd}) not found'
if self.apxs is None:
return "command apxs not found (commonly provided in apache2-dev)"
if not os.path.isfile(self.apxs):
return f"apxs ({self.apxs}) not found"
return None
@property
def nghttpx_version(self):
return self._nghttpx_version
@property
def caddy_version(self):
return self._caddy_version
@property
def vsftpd_version(self):
return self._vsftpd_version
@property
def tcpdmp(self) -> Optional[str]:
return self._tcpdump
def clear_locks(self):
ca_lock = os.path.join(self.gen_root, 'ca/ca.lock')
if os.path.exists(ca_lock):
os.remove(ca_lock)
class Env:
SERVER_TIMEOUT = 30 # seconds to wait for server to come up/reload
CONFIG = EnvConfig()
@staticmethod
def setup_incomplete() -> bool:
return not Env.CONFIG.is_complete()
@staticmethod
def incomplete_reason() -> Optional[str]:
return Env.CONFIG.get_incomplete_reason()
@staticmethod
def have_nghttpx() -> bool:
return Env.CONFIG.nghttpx is not None
@staticmethod
def have_h3_server() -> bool:
return Env.CONFIG.nghttpx_with_h3
@staticmethod
def have_ssl_curl() -> bool:
return Env.curl_has_feature('ssl') or Env.curl_has_feature('multissl')
@staticmethod
def have_h2_curl() -> bool:
return 'http2' in Env.CONFIG.curl_props['features']
@staticmethod
def have_h3_curl() -> bool:
return 'http3' in Env.CONFIG.curl_props['features']
@staticmethod
def curl_uses_lib(libname: str) -> bool:
return libname.lower() in Env.CONFIG.curl_props['libs']
@staticmethod
def curl_uses_any_libs(libs: List[str]) -> bool:
for libname in libs:
if libname.lower() in Env.CONFIG.curl_props['libs']:
return True
return False
@staticmethod
def curl_uses_ossl_quic() -> bool:
if Env.have_h3_curl():
return not Env.curl_uses_lib('ngtcp2') and Env.curl_uses_lib('nghttp3')
return False
@staticmethod
def curl_version_string() -> str:
return Env.CONFIG.curl_props['version_string']
@staticmethod
def curl_features_string() -> str:
return Env.CONFIG.curl_props['features_string']
@staticmethod
def curl_has_feature(feature: str) -> bool:
return feature.lower() in Env.CONFIG.curl_props['features']
@staticmethod
def curl_protocols_string() -> str:
return Env.CONFIG.curl_props['protocols_string']
@staticmethod
def curl_has_protocol(protocol: str) -> bool:
return protocol.lower() in Env.CONFIG.curl_props['protocols']
@staticmethod
def curl_lib_version(libname: str) -> str:
prefix = f'{libname.lower()}/'
for lversion in Env.CONFIG.curl_props['lib_versions']:
if lversion.startswith(prefix):
return lversion[len(prefix):]
return 'unknown'
@staticmethod
def curl_lib_version_at_least(libname: str, min_version) -> bool:
lversion = Env.curl_lib_version(libname)
if lversion != 'unknown':
return Env.CONFIG.versiontuple(min_version) <= \
Env.CONFIG.versiontuple(lversion)
return False
@staticmethod
def curl_lib_version_before(libname: str, lib_version) -> bool:
lversion = Env.curl_lib_version(libname)
if lversion != 'unknown':
if m := re.match(r'(\d+\.\d+\.\d+).*', lversion):
lversion = m.group(1)
return Env.CONFIG.versiontuple(lib_version) > \
Env.CONFIG.versiontuple(lversion)
return False
@staticmethod
def curl_os() -> str:
return Env.CONFIG.curl_props['os']
@staticmethod
def curl_fullname() -> str:
return Env.CONFIG.curl_props['fullname']
@staticmethod
def curl_version() -> str:
return Env.CONFIG.curl_props['version']
@staticmethod
def curl_is_debug() -> bool:
return Env.CONFIG.curl_is_debug
@staticmethod
def curl_is_verbose() -> bool:
return Env.CONFIG.curl_is_verbose
@staticmethod
def curl_can_early_data() -> bool:
if Env.curl_uses_lib('gnutls'):
return Env.curl_lib_version_at_least('gnutls', '3.6.13')
return Env.curl_uses_any_libs(['wolfssl', 'quictls', 'openssl'])
@staticmethod
def curl_can_h3_early_data() -> bool:
return Env.curl_can_early_data() and \
Env.curl_uses_lib('ngtcp2')
@staticmethod
def have_h3() -> bool:
return Env.have_h3_curl() and Env.have_h3_server()
@staticmethod
def httpd_version() -> str:
return Env.CONFIG.httpd_version
@staticmethod
def nghttpx_version() -> str:
return Env.CONFIG.nghttpx_version
@staticmethod
def caddy_version() -> str:
return Env.CONFIG.caddy_version
@staticmethod
def caddy_is_at_least(minv) -> bool:
return Env.CONFIG.caddy_is_at_least(minv)
@staticmethod
def httpd_is_at_least(minv) -> bool:
return Env.CONFIG.httpd_is_at_least(minv)
@staticmethod
def has_caddy() -> bool:
return Env.CONFIG.caddy is not None
@staticmethod
def has_vsftpd() -> bool:
return Env.CONFIG.vsftpd is not None
@staticmethod
def vsftpd_version() -> str:
return Env.CONFIG.vsftpd_version
@staticmethod
def has_danted() -> bool:
return Env.CONFIG.danted is not None
@staticmethod
def tcpdump() -> Optional[str]:
return Env.CONFIG.tcpdmp
def __init__(self, pytestconfig=None, env_config=None):
if env_config:
Env.CONFIG = env_config
self._verbose = pytestconfig.option.verbose \
if pytestconfig is not None else 0
self._ca = None
self._test_timeout = 300.0 if self._verbose > 1 else 60.0 # seconds
def issue_certs(self):
if self._ca is None:
ca_dir = os.path.join(self.CONFIG.gen_root, 'ca')
os.makedirs(ca_dir, exist_ok=True)
lock_file = os.path.join(ca_dir, 'ca.lock')
with FileLock(lock_file):
self._ca = TestCA.create_root(name=self.CONFIG.tld,
store_dir=ca_dir,
key_type="rsa2048")
self._ca.issue_certs(self.CONFIG.cert_specs)
def setup(self):
os.makedirs(self.gen_dir, exist_ok=True)
os.makedirs(self.htdocs_dir, exist_ok=True)
self.issue_certs()
def get_credentials(self, domain) -> Optional[Credentials]:
creds = self.ca.get_credentials_for_name(domain)
if len(creds) > 0:
return creds[0]
return None
@property
def verbose(self) -> int:
return self._verbose
@property
def test_timeout(self) -> Optional[float]:
return self._test_timeout
@test_timeout.setter
def test_timeout(self, val: Optional[float]):
self._test_timeout = val
@property
def gen_dir(self) -> str:
return self.CONFIG.gen_dir
@property
def gen_root(self) -> str:
return self.CONFIG.gen_root
@property
def project_dir(self) -> str:
return self.CONFIG.project_dir
@property
def build_dir(self) -> str:
return self.CONFIG.build_dir
@property
def ca(self):
return self._ca
@property
def htdocs_dir(self) -> str:
return self.CONFIG.htdocs_dir
@property
def tld(self) -> str:
return self.CONFIG.tld
@property
def domain1(self) -> str:
return self.CONFIG.domain1
@property
def domain1brotli(self) -> str:
return self.CONFIG.domain1brotli
@property
def domain2(self) -> str:
return self.CONFIG.domain2
@property
def ftp_domain(self) -> str:
return self.CONFIG.ftp_domain
@property
def proxy_domain(self) -> str:
return self.CONFIG.proxy_domain
@property
def expired_domain(self) -> str:
return self.CONFIG.expired_domain
@property
def ports(self) -> Dict[str, int]:
return self.CONFIG.ports
def update_ports(self, ports: Dict[str, int]):
self.CONFIG.ports.update(ports)
@property
def http_port(self) -> int:
return self.CONFIG.ports.get('http', 0)
@property
def https_port(self) -> int:
return self.CONFIG.ports['https']
@property
def https_only_tcp_port(self) -> int:
return self.CONFIG.ports['https-tcp-only']
@property
def nghttpx_https_port(self) -> int:
return self.CONFIG.ports['nghttpx_https']
@property
def h3_port(self) -> int:
return self.https_port
@property
def proxy_port(self) -> int:
return self.CONFIG.ports['proxy']
@property
def proxys_port(self) -> int:
return self.CONFIG.ports['proxys']
@property
def ftp_port(self) -> int:
return self.CONFIG.ports['ftp']
@property
def ftps_port(self) -> int:
return self.CONFIG.ports['ftps']
@property
def h2proxys_port(self) -> int:
return self.CONFIG.ports['h2proxys']
def pts_port(self, proto: str = 'http/1.1') -> int:
# proxy tunnel port
return self.CONFIG.ports['h2proxys' if proto == 'h2' else 'proxys']
@property
def caddy(self) -> str:
return self.CONFIG.caddy
@property
def caddy_https_port(self) -> int:
return self.CONFIG.ports['caddys']
@property
def caddy_http_port(self) -> int:
return self.CONFIG.ports['caddy']
@property
def danted(self) -> str:
return self.CONFIG.danted
@property
def vsftpd(self) -> str:
return self.CONFIG.vsftpd
@property
def ws_port(self) -> int:
return self.CONFIG.ports['ws']
@property
def curl(self) -> str:
return self.CONFIG.curl
@property
def httpd(self) -> str:
return self.CONFIG.httpd
@property
def apxs(self) -> str:
return self.CONFIG.apxs
@property
def nghttpx(self) -> Optional[str]:
return self.CONFIG.nghttpx
@property
def slow_network(self) -> bool:
return "CURL_DBG_SOCK_WBLOCK" in os.environ or \
"CURL_DBG_SOCK_WPARTIAL" in os.environ
@property
def ci_run(self) -> bool:
return "CURL_CI" in os.environ
def port_for(self, alpn_proto: Optional[str] = None):
if alpn_proto is None or \
alpn_proto in ['h2', 'http/1.1', 'http/1.0', 'http/0.9']:
return self.https_port
if alpn_proto in ['h3']:
return self.h3_port
return self.http_port
def authority_for(self, domain: str, alpn_proto: Optional[str] = None):
return f'{domain}:{self.port_for(alpn_proto=alpn_proto)}'
def make_data_file(self, indir: str, fname: str, fsize: int,
line_length: int = 1024) -> str:
if line_length < 11:
raise RuntimeError('line_length less than 11 not supported')
fpath = os.path.join(indir, fname)
s10 = "0123456789"
s = round((line_length / 10) + 1) * s10
s = s[0:line_length-11]
with open(fpath, 'w') as fd:
for i in range(int(fsize / line_length)):
fd.write(f"{i:09d}-{s}\n")
remain = int(fsize % line_length)
if remain != 0:
i = int(fsize / line_length) + 1
fd.write(f"{i:09d}-{s}"[0:remain-1] + "\n")
return fpath
def make_data_gzipbomb(self, indir: str, fname: str, fsize: int) -> str:
fpath = os.path.join(indir, fname)
gzpath = f'{fpath}.gz'
varpath = f'{fpath}.var'
with open(fpath, 'w') as fd:
fd.write('not what we are looking for!\n')
count = int(fsize / 1024)
zero1k = bytearray(1024)
with gzip.open(gzpath, 'wb') as fd:
for _ in range(count):
fd.write(zero1k)
with open(varpath, 'w') as fd:
fd.write(f'URI: {fname}\n')
fd.write('\n')
fd.write(f'URI: {fname}.gz\n')
fd.write('Content-Type: text/plain\n')
fd.write('Content-Encoding: x-gzip\n')
fd.write('\n')
return fpath
+592
View File
@@ -0,0 +1,592 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# SPDX-License-Identifier: curl
#
###########################################################################
#
import inspect
import logging
import os
import shutil
import socket
import subprocess
from datetime import timedelta, datetime
from json import JSONEncoder
import time
from typing import List, Union, Optional, Dict
import copy
from .curl import CurlClient, ExecResult
from .env import Env
from .ports import alloc_ports_and_do
log = logging.getLogger(__name__)
class Httpd:
MODULES = [
'log_config', 'logio', 'unixd', 'version', 'watchdog',
'authn_core', 'authn_file',
'authz_user', 'authz_core', 'authz_host',
'auth_basic', 'auth_digest',
'alias', 'env', 'filter', 'headers', 'mime', 'setenvif', 'negotiation',
'socache_shmcb',
'rewrite', 'http2', 'ssl', 'proxy', 'proxy_http', 'proxy_connect',
'brotli',
'mpm_event',
]
COMMON_MODULES_DIRS = [
'/usr/lib/apache2/modules', # debian
'/usr/libexec/apache2/', # macos
]
MOD_CURLTEST = None
PORT_SPECS = {
'http': socket.SOCK_STREAM,
'https': socket.SOCK_STREAM,
'https-tcp-only': socket.SOCK_STREAM,
'proxy': socket.SOCK_STREAM,
'proxys': socket.SOCK_STREAM,
}
def __init__(self, env: Env):
self.env = env
self._apache_dir = os.path.join(env.gen_dir, 'apache')
self._run_dir = os.path.join(self._apache_dir, 'run')
self._lock_dir = os.path.join(self._apache_dir, 'locks')
self._docs_dir = os.path.join(self._apache_dir, 'docs')
self._conf_dir = os.path.join(self._apache_dir, 'conf')
self._conf_file = os.path.join(self._conf_dir, 'test.conf')
self._logs_dir = os.path.join(self._apache_dir, 'logs')
self._error_log = os.path.join(self._logs_dir, 'error_log')
self._tmp_dir = os.path.join(self._apache_dir, 'tmp')
self._basic_passwords = os.path.join(self._conf_dir, 'basic.passwords')
self._digest_passwords = os.path.join(self._conf_dir, 'digest.passwords')
self._mods_dir = None
self._auth_digest = True
self._proxy_auth_basic = False
# name used to lookup credentials for env.domain1
self._domain1_cred_name = env.domain1
self._extra_configs = {}
self._loaded_extra_configs = None
self._loaded_proxy_auth = None
self._loaded_domain1_cred_name = None
assert env.apxs
p = subprocess.run(args=[env.apxs, '-q', 'libexecdir'],
capture_output=True, text=True)
if p.returncode != 0:
raise Exception(f'{env.apxs} failed to query libexecdir: {p}')
self._mods_dir = p.stdout.strip()
if self._mods_dir is None:
raise Exception('apache modules dir cannot be found')
if not os.path.exists(self._mods_dir):
raise Exception(f'apache modules dir does not exist: {self._mods_dir}')
self._maybe_running = False
self.ports = {}
self._rmf(self._error_log)
self._init_curltest()
@property
def docs_dir(self):
return self._docs_dir
def clear_logs(self):
self._rmf(self._error_log)
def exists(self):
return os.path.exists(self.env.httpd)
def set_extra_config(self, domain: str, lines: Optional[Union[str, List[str]]]):
if lines is None:
self._extra_configs.pop(domain, None)
else:
self._extra_configs[domain] = lines
def reset_config(self):
self._extra_configs = {}
self.set_proxy_auth(False)
self._domain1_cred_name = self.env.domain1
def set_proxy_auth(self, active: bool):
self._proxy_auth_basic = active
def set_domain1_cred_name(self, name):
self._domain1_cred_name = name
def _run(self, args, intext=''):
env = os.environ.copy()
env['APACHE_RUN_DIR'] = self._run_dir
env['APACHE_RUN_USER'] = os.environ['USER']
env['APACHE_LOCK_DIR'] = self._lock_dir
env['APACHE_CONFDIR'] = self._apache_dir
p = subprocess.run(args, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
cwd=self.env.gen_dir,
input=intext.encode() if intext else None,
env=env)
start = datetime.now()
return ExecResult(args=args, exit_code=p.returncode,
stdout=p.stdout.decode().splitlines(),
stderr=p.stderr.decode().splitlines(),
duration=datetime.now() - start)
def _cmd_httpd(self, cmd: str):
args = [self.env.httpd,
"-d", self._apache_dir,
"-f", self._conf_file,
"-k", cmd]
return self._run(args=args)
def initial_start(self):
def startup(ports: Dict[str, int]) -> bool:
self.ports.update(ports)
if self.start():
self.env.update_ports(ports)
return True
self.stop()
self.ports.clear()
return False
return alloc_ports_and_do(Httpd.PORT_SPECS, startup,
self.env.gen_root, max_tries=3)
def start(self):
# assure ports are allocated
for key, _ in Httpd.PORT_SPECS.items():
assert self.ports[key] is not None
if self._maybe_running:
self.stop()
self._write_config()
with open(self._error_log, 'a') as fd:
fd.write('start of server\n')
with open(os.path.join(self._apache_dir, 'xxx'), 'a') as fd:
fd.write('start of server\n')
r = self._cmd_httpd('start')
if r.exit_code != 0 or len(r.stderr):
log.error(f'failed to start httpd: {r}')
self.stop()
return False
self._loaded_extra_configs = copy.deepcopy(self._extra_configs)
self._loaded_proxy_auth = self._proxy_auth_basic
return self.wait_live(timeout=timedelta(seconds=Env.SERVER_TIMEOUT))
def stop(self):
r = self._cmd_httpd('stop')
self._loaded_extra_configs = None
self._loaded_proxy_auth = None
if r.exit_code == 0:
return self.wait_dead(timeout=timedelta(seconds=Env.SERVER_TIMEOUT))
log.fatal(f'stopping httpd failed: {r}')
return r.exit_code == 0
def reload(self):
self._write_config()
r = self._cmd_httpd("graceful")
if r.exit_code != 0:
log.error(f'failed to reload httpd: {r}')
return False
self._loaded_extra_configs = None
self._loaded_proxy_auth = None
if r.exit_code != 0:
log.error(f'failed to reload httpd: {r}')
self._loaded_extra_configs = copy.deepcopy(self._extra_configs)
self._loaded_proxy_auth = self._proxy_auth_basic
return self.wait_live(timeout=timedelta(seconds=Env.SERVER_TIMEOUT))
def reload_if_config_changed(self):
if self._maybe_running and \
self._loaded_extra_configs == self._extra_configs and \
self._loaded_proxy_auth == self._proxy_auth_basic and \
self._loaded_domain1_cred_name == self._domain1_cred_name:
return True
return self.reload()
def wait_dead(self, timeout: timedelta):
curl = CurlClient(env=self.env, run_dir=self._tmp_dir)
try_until = datetime.now() + timeout
while datetime.now() < try_until:
r = curl.http_get(url=f'http://{self.env.domain1}:{self.ports["http"]}/')
if r.exit_code != 0:
self._maybe_running = False
return True
time.sleep(.1)
log.debug(f"Server still responding after {timeout}")
return False
def wait_live(self, timeout: timedelta):
curl = CurlClient(env=self.env, run_dir=self._tmp_dir,
timeout=timeout.total_seconds())
try_until = datetime.now() + timeout
while datetime.now() < try_until:
r = curl.http_get(url=f'http://{self.env.domain1}:{self.ports["http"]}/')
if r.exit_code == 0:
self._maybe_running = True
return True
time.sleep(.1)
log.error(f"Server still not responding after {timeout}")
return False
def _rmf(self, path):
if os.path.exists(path):
return os.remove(path)
def _mkpath(self, path):
if not os.path.exists(path):
return os.makedirs(path)
def _write_config(self):
domain1 = self.env.domain1
domain1brotli = self.env.domain1brotli
creds1 = self.env.get_credentials(self._domain1_cred_name)
assert creds1 # convince pytype this isn't None
self._loaded_domain1_cred_name = self._domain1_cred_name
domain2 = self.env.domain2
creds2 = self.env.get_credentials(domain2)
assert creds2 # convince pytype this isn't None
exp_domain = self.env.expired_domain
exp_creds = self.env.get_credentials(exp_domain)
assert exp_creds # convince pytype this isn't None
proxy_domain = self.env.proxy_domain
proxy_creds = self.env.get_credentials(proxy_domain)
assert proxy_creds # convince pytype this isn't None
self._mkpath(self._conf_dir)
self._mkpath(self._docs_dir)
self._mkpath(self._logs_dir)
self._mkpath(self._tmp_dir)
self._mkpath(os.path.join(self._docs_dir, 'two'))
with open(os.path.join(self._docs_dir, 'data.json'), 'w') as fd:
data = {
'server': f'{domain1}',
}
fd.write(JSONEncoder().encode(data))
with open(os.path.join(self._docs_dir, 'two/data.json'), 'w') as fd:
data = {
'server': f'{domain2}',
}
fd.write(JSONEncoder().encode(data))
if self._proxy_auth_basic:
with open(self._basic_passwords, 'w') as fd:
fd.write('proxy:$apr1$FQfeInbs$WQZbODJlVg60j0ogEIlTW/\n')
if self._auth_digest:
with open(self._digest_passwords, 'w') as fd:
fd.write('test:restricted area:57123e269fd73d71ae0656594e938e2f\n')
self._mkpath(os.path.join(self.docs_dir, 'restricted/digest'))
with open(os.path.join(self.docs_dir, 'restricted/digest/data.json'), 'w') as fd:
fd.write('{"area":"digest"}\n')
with open(self._conf_file, 'w') as fd:
for m in self.MODULES:
if os.path.exists(os.path.join(self._mods_dir, f'mod_{m}.so')):
fd.write(f'LoadModule {m}_module "{self._mods_dir}/mod_{m}.so"\n')
if Httpd.MOD_CURLTEST is not None:
fd.write(f'LoadModule curltest_module "{Httpd.MOD_CURLTEST}"\n')
conf = [ # base server config
f'ServerRoot "{self._apache_dir}"',
'DefaultRuntimeDir logs',
'PidFile httpd.pid',
f'ServerName {self.env.tld}',
f'ErrorLog {self._error_log}',
f'LogLevel {self._get_log_level()}',
'StartServers 4',
'ReadBufferSize 16000',
'KeepAliveTimeout 30', # CI may exceed the default of 5 sec
'H2MinWorkers 16',
'H2MaxWorkers 256',
f'TypesConfig "{self._conf_dir}/mime.types',
'SSLSessionCache "shmcb:ssl_gcache_data(32000)"',
'AddEncoding x-gzip .gz .tgz .gzip',
'AddHandler type-map .var',
]
conf.extend([f'Listen {port}' for _, port in self.ports.items()])
if 'base' in self._extra_configs:
conf.extend(self._extra_configs['base'])
conf.extend([ # plain http host for domain1
f'<VirtualHost *:{self.ports["http"]}>',
f' ServerName {domain1}',
' ServerAlias localhost',
f' DocumentRoot "{self._docs_dir}"',
' Protocols h2c http/1.1',
' H2Direct on',
])
conf.extend(self._curltest_conf(domain1))
conf.extend([
'</VirtualHost>',
'',
])
conf.extend([ # https host for domain1, h1 + h2
f'<VirtualHost *:{self.ports["https"]}>',
f' ServerName {domain1}',
' ServerAlias localhost',
' Protocols h2 http/1.1',
' SSLEngine on',
f' SSLCertificateFile {creds1.cert_file}',
f' SSLCertificateKeyFile {creds1.pkey_file}',
f' DocumentRoot "{self._docs_dir}"',
])
conf.extend(self._curltest_conf(domain1))
if domain1 in self._extra_configs:
conf.extend(self._extra_configs[domain1])
conf.extend([
'</VirtualHost>',
'',
])
conf.extend([ # https host for domain1, h1 + h2, tcp only
f'<VirtualHost *:{self.ports["https-tcp-only"]}>',
f' ServerName {domain1}',
' ServerAlias localhost',
' Protocols h2 http/1.1',
' SSLEngine on',
f' SSLCertificateFile {creds1.cert_file}',
f' SSLCertificateKeyFile {creds1.pkey_file}',
f' DocumentRoot "{self._docs_dir}"',
])
conf.extend(self._curltest_conf(domain1))
if domain1 in self._extra_configs:
conf.extend(self._extra_configs[domain1])
conf.extend([
'</VirtualHost>',
'',
])
# Alternate to domain1 with BROTLI compression
conf.extend([ # https host for domain1, h1 + h2
f'<VirtualHost *:{self.ports["https"]}>',
f' ServerName {domain1brotli}',
' Protocols h2 http/1.1',
' SSLEngine on',
f' SSLCertificateFile {creds1.cert_file}',
f' SSLCertificateKeyFile {creds1.pkey_file}',
f' DocumentRoot "{self._docs_dir}"',
' SetOutputFilter BROTLI_COMPRESS',
])
conf.extend(self._curltest_conf(domain1))
if domain1 in self._extra_configs:
conf.extend(self._extra_configs[domain1])
conf.extend([
'</VirtualHost>',
'',
])
conf.extend([ # plain http host for domain2
f'<VirtualHost *:{self.ports["http"]}>',
f' ServerName {domain2}',
' ServerAlias localhost',
f' DocumentRoot "{self._docs_dir}"',
' Protocols h2c http/1.1',
])
conf.extend(self._curltest_conf(domain2))
conf.extend([
'</VirtualHost>',
'',
])
self._mkpath(os.path.join(self._docs_dir, 'two'))
conf.extend([ # https host for domain2, no h2
f'<VirtualHost *:{self.ports["https"]}>',
f' ServerName {domain2}',
' Protocols http/1.1',
' SSLEngine on',
f' SSLCertificateFile {creds2.cert_file}',
f' SSLCertificateKeyFile {creds2.pkey_file}',
f' DocumentRoot "{self._docs_dir}/two"',
])
conf.extend(self._curltest_conf(domain2))
if domain2 in self._extra_configs:
conf.extend(self._extra_configs[domain2])
conf.extend([
'</VirtualHost>',
'',
])
conf.extend([ # https host for domain2, no h2, tcp only
f'<VirtualHost *:{self.ports["https-tcp-only"]}>',
f' ServerName {domain2}',
' Protocols http/1.1',
' SSLEngine on',
f' SSLCertificateFile {creds2.cert_file}',
f' SSLCertificateKeyFile {creds2.pkey_file}',
f' DocumentRoot "{self._docs_dir}/two"',
])
conf.extend(self._curltest_conf(domain2))
if domain2 in self._extra_configs:
conf.extend(self._extra_configs[domain2])
conf.extend([
'</VirtualHost>',
'',
])
self._mkpath(os.path.join(self._docs_dir, 'expired'))
conf.extend([ # https host for expired domain
f'<VirtualHost *:{self.ports["https"]}>',
f' ServerName {exp_domain}',
' Protocols h2 http/1.1',
' SSLEngine on',
f' SSLCertificateFile {exp_creds.cert_file}',
f' SSLCertificateKeyFile {exp_creds.pkey_file}',
f' DocumentRoot "{self._docs_dir}/expired"',
])
conf.extend(self._curltest_conf(exp_domain))
if exp_domain in self._extra_configs:
conf.extend(self._extra_configs[exp_domain])
conf.extend([
'</VirtualHost>',
'',
])
conf.extend([ # http forward proxy
f'<VirtualHost *:{self.ports["proxy"]}>',
f' ServerName {proxy_domain}',
' Protocols h2c http/1.1',
' ProxyRequests On',
' H2ProxyRequests On',
' ProxyVia On',
f' AllowCONNECT {self.ports["http"]} {self.ports["https"]}',
])
conf.extend(self._get_proxy_conf())
conf.extend([
'</VirtualHost>',
'',
])
conf.extend([ # https forward proxy
f'<VirtualHost *:{self.ports["proxys"]}>',
f' ServerName {proxy_domain}',
' Protocols h2 http/1.1',
' SSLEngine on',
f' SSLCertificateFile {proxy_creds.cert_file}',
f' SSLCertificateKeyFile {proxy_creds.pkey_file}',
' ProxyRequests On',
' H2ProxyRequests On',
' ProxyVia On',
f' AllowCONNECT {self.ports["http"]} {self.ports["https"]}',
])
conf.extend(self._get_proxy_conf())
conf.extend([
'</VirtualHost>',
'',
])
fd.write("\n".join(conf))
with open(os.path.join(self._conf_dir, 'mime.types'), 'w') as fd:
fd.write("\n".join([
'text/plain txt',
'text/html html',
'application/json json',
'application/x-gzip gzip',
'application/x-gzip gz',
''
]))
def _get_proxy_conf(self):
if self._proxy_auth_basic:
return [
' <Proxy "*">',
' AuthType Basic',
' AuthName "Restricted Proxy"',
' AuthBasicProvider file',
f' AuthUserFile "{self._basic_passwords}"',
' Require user proxy',
' </Proxy>',
]
else:
return [
' <Proxy "*">',
' Require ip 127.0.0.1',
' </Proxy>',
]
def _get_log_level(self):
if self.env.verbose > 3:
return 'trace2'
if self.env.verbose > 2:
return 'trace1'
if self.env.verbose > 1:
return 'debug'
return 'info'
def _curltest_conf(self, servername) -> List[str]:
lines = []
if Httpd.MOD_CURLTEST is not None:
lines.extend([
' Redirect 302 /data.json.302 /data.json',
' Redirect 301 /curltest/echo301 /curltest/echo',
' Redirect 302 /curltest/echo302 /curltest/echo',
' Redirect 303 /curltest/echo303 /curltest/echo',
' Redirect 307 /curltest/echo307 /curltest/echo',
' <Location /curltest/sslinfo>',
' SSLOptions StdEnvVars',
' SetHandler curltest-sslinfo',
' </Location>',
' <Location /curltest/echo>',
' SetHandler curltest-echo',
' </Location>',
' <Location /curltest/put>',
' SetHandler curltest-put',
' </Location>',
' <Location /curltest/tweak>',
' SetHandler curltest-tweak',
' </Location>',
' Redirect 302 /tweak /curltest/tweak',
' <Location /curltest/1_1>',
' SetHandler curltest-1_1-required',
' </Location>',
' <Location /curltest/shutdown_unclean>',
' SetHandler curltest-tweak',
' SetEnv force-response-1.0 1',
' </Location>',
' SetEnvIf Request_URI "/shutdown_unclean" ssl-unclean=1',
' RewriteEngine on',
' RewriteRule "^/curltest/put-redir-301$" "/curltest/put" [R=301]',
' RewriteRule "^/curltest/put-redir-302$" "/curltest/put" [R=302]',
' RewriteRule "^/curltest/put-redir-307$" "/curltest/put" [R=307]',
' RewriteRule "^/curltest/put-redir-308$" "/curltest/put" [R=308]',
])
if self._auth_digest:
lines.extend([
f' <Directory {self.docs_dir}/restricted/digest>',
' AuthType Digest',
' AuthName "restricted area"',
f' AuthDigestDomain "https://{servername}"',
' AuthBasicProvider file',
f' AuthUserFile "{self._digest_passwords}"',
' Require valid-user',
' </Directory>',
])
return lines
def _init_curltest(self):
if Httpd.MOD_CURLTEST is not None:
return
local_dir = os.path.dirname(inspect.getfile(Httpd))
out_dir = os.path.join(self.env.gen_dir, 'mod_curltest')
in_source = os.path.join(local_dir, 'mod_curltest/mod_curltest.c')
out_source = os.path.join(out_dir, 'mod_curltest.c')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if not os.path.exists(out_source) or \
os.stat(in_source).st_mtime > os.stat(out_source).st_mtime:
shutil.copy(in_source, out_source)
p = subprocess.run([
self.env.apxs, '-c', out_source
], capture_output=True, cwd=out_dir)
rv = p.returncode
if rv != 0:
log.error(f"compiling mod_curltest failed: {p.stderr}")
raise Exception(f"compiling mod_curltest failed: {p.stderr}")
Httpd.MOD_CURLTEST = os.path.join(out_dir, '.libs/mod_curltest.so')
@@ -0,0 +1,881 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
#include <assert.h>
#include <apr_optional.h>
#include <apr_optional_hooks.h>
#include <apr_strings.h>
#include <apr_cstr.h>
#include <apr_time.h>
#include <apr_want.h>
#include <httpd.h>
#include <http_protocol.h>
#include <http_request.h>
#include <http_log.h>
static void curltest_hooks(apr_pool_t *pool);
static int curltest_echo_handler(request_rec *r);
static int curltest_put_handler(request_rec *r);
static int curltest_tweak_handler(request_rec *r);
static int curltest_1_1_required(request_rec *r);
static int curltest_sslinfo_handler(request_rec *r);
AP_DECLARE_MODULE(curltest) =
{
STANDARD20_MODULE_STUFF,
NULL, /* func to create per dir config */
NULL, /* func to merge per dir config */
NULL, /* func to create per server config */
NULL, /* func to merge per server config */
NULL, /* command handlers */
curltest_hooks,
#ifdef AP_MODULE_FLAG_NONE
AP_MODULE_FLAG_ALWAYS_MERGE
#endif
};
static int curltest_post_config(apr_pool_t *p, apr_pool_t *plog,
apr_pool_t *ptemp, server_rec *s)
{
void *data = NULL;
const char *key = "mod_curltest_init_counter";
(void)plog;(void)ptemp;
apr_pool_userdata_get(&data, key, s->process->pool);
if(!data) {
/* dry run */
apr_pool_userdata_set((const void *)1, key,
apr_pool_cleanup_null, s->process->pool);
return APR_SUCCESS;
}
/* mess with the overall server here */
return APR_SUCCESS;
}
static void curltest_hooks(apr_pool_t *pool)
{
ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, "installing hooks");
/* Run once after configuration is set, but before mpm children initialize.
*/
ap_hook_post_config(curltest_post_config, NULL, NULL, APR_HOOK_MIDDLE);
/* curl test handlers */
ap_hook_handler(curltest_echo_handler, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_handler(curltest_put_handler, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_handler(curltest_tweak_handler, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_handler(curltest_1_1_required, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_handler(curltest_sslinfo_handler, NULL, NULL, APR_HOOK_MIDDLE);
}
#define SECS_PER_HOUR (60*60)
#define SECS_PER_DAY (24*SECS_PER_HOUR)
static apr_status_t duration_parse(apr_interval_time_t *ptimeout,
const char *value, const char *def_unit)
{
char *endp;
apr_int64_t n;
n = apr_strtoi64(value, &endp, 10);
if(errno) {
return errno;
}
if(!endp || !*endp) {
if(!def_unit)
def_unit = "s";
}
else if(endp == value) {
return APR_EINVAL;
}
else {
def_unit = endp;
}
switch(*def_unit) {
case 'D':
case 'd':
*ptimeout = apr_time_from_sec(n * SECS_PER_DAY);
break;
case 's':
case 'S':
*ptimeout = (apr_interval_time_t) apr_time_from_sec(n);
break;
case 'h':
case 'H':
/* Time is in hours */
*ptimeout = (apr_interval_time_t) apr_time_from_sec(n * SECS_PER_HOUR);
break;
case 'm':
case 'M':
switch(*(++def_unit)) {
/* Time is in milliseconds */
case 's':
case 'S':
*ptimeout = (apr_interval_time_t) n * 1000;
break;
/* Time is in minutes */
case 'i':
case 'I':
*ptimeout = (apr_interval_time_t) apr_time_from_sec(n * 60);
break;
default:
return APR_EGENERAL;
}
break;
case 'u':
case 'U':
switch(*(++def_unit)) {
/* Time is in microseconds */
case 's':
case 'S':
*ptimeout = (apr_interval_time_t) n;
break;
default:
return APR_EGENERAL;
}
break;
default:
return APR_EGENERAL;
}
return APR_SUCCESS;
}
static int status_from_str(const char *s, apr_status_t *pstatus)
{
if(!strcmp("timeout", s)) {
*pstatus = APR_TIMEUP;
return 1;
}
else if(!strcmp("reset", s)) {
*pstatus = APR_ECONNRESET;
return 1;
}
return 0;
}
static int curltest_echo_handler(request_rec *r)
{
conn_rec *c = r->connection;
apr_bucket_brigade *bb;
apr_bucket *b;
apr_status_t rv;
char buffer[8192];
const char *ct;
apr_off_t die_after_len = -1, total_read_len = 0;
apr_time_t read_delay = 0;
int just_die = 0, die_after_100 = 0;
long l;
if(strcmp(r->handler, "curltest-echo")) {
return DECLINED;
}
if(r->method_number != M_GET && r->method_number != M_POST) {
return DECLINED;
}
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "echo_handler: processing");
if(r->args) {
apr_array_header_t *args = NULL;
int i;
args = apr_cstr_split(r->args, "&", 1, r->pool);
for(i = 0; i < args->nelts; ++i) {
char *s, *val, *arg = APR_ARRAY_IDX(args, i, char *);
s = strchr(arg, '=');
if(s) {
*s = '\0';
val = s + 1;
if(!strcmp("die_after", arg)) {
die_after_len = (apr_off_t)apr_atoi64(val);
continue;
}
else if(!strcmp("just_die", arg)) {
just_die = 1;
continue;
}
else if(!strcmp("die_after_100", arg)) {
die_after_100 = 1;
continue;
}
else if(!strcmp("read_delay", arg)) {
rv = duration_parse(&read_delay, val, "s");
if(APR_SUCCESS == rv) {
continue;
}
}
}
}
}
if(just_die) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
"echo_handler: dying right away");
/* Generate no HTTP response at all. */
ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
r->connection->keepalive = AP_CONN_CLOSE;
return AP_FILTER_ERROR;
}
r->status = 200;
if(die_after_len >= 0) {
r->clength = die_after_len + 1;
r->chunked = 0;
apr_table_set(r->headers_out, "Content-Length",
apr_ltoa(r->pool, (long)r->clength));
}
else {
r->clength = -1;
r->chunked = 1;
apr_table_unset(r->headers_out, "Content-Length");
}
/* Discourage content-encodings */
apr_table_unset(r->headers_out, "Content-Encoding");
apr_table_setn(r->subprocess_env, "no-brotli", "1");
apr_table_setn(r->subprocess_env, "no-gzip", "1");
ct = apr_table_get(r->headers_in, "content-type");
ap_set_content_type(r, ct ? ct : "application/octet-stream");
if(apr_table_get(r->headers_in, "TE"))
apr_table_setn(r->headers_out, "Request-TE",
apr_table_get(r->headers_in, "TE"));
if(read_delay) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
"put_handler: read_delay");
apr_sleep(read_delay);
}
bb = apr_brigade_create(r->pool, c->bucket_alloc);
/* copy any request body into the response */
rv = ap_setup_client_block(r, REQUEST_CHUNKED_DECHUNK);
if(rv)
goto cleanup;
if(die_after_100) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
"echo_handler: dying after 100-continue");
/* Generate no HTTP response at all. */
ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
r->connection->keepalive = AP_CONN_CLOSE;
return AP_FILTER_ERROR;
}
if(ap_should_client_block(r)) {
while(0 < (l = ap_get_client_block(r, &buffer[0], sizeof(buffer)))) {
total_read_len += l;
if(die_after_len >= 0 && total_read_len >= die_after_len) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
"echo_handler: dying after %ld bytes as requested",
(long)total_read_len);
ap_pass_brigade(r->output_filters, bb);
ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
r->connection->keepalive = AP_CONN_CLOSE;
return DONE;
}
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
"echo_handler: copying %ld bytes from request body", l);
rv = apr_brigade_write(bb, NULL, NULL, buffer, l);
if(APR_SUCCESS != rv)
goto cleanup;
rv = ap_pass_brigade(r->output_filters, bb);
if(APR_SUCCESS != rv)
goto cleanup;
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
"echo_handler: passed %ld bytes from request body", l);
}
}
/* we are done */
b = apr_bucket_eos_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "echo_handler: request read");
if(r->trailers_in && !apr_is_empty_table(r->trailers_in)) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
"echo_handler: seeing incoming trailers");
apr_table_setn(r->trailers_out, "h2test-trailers-in",
apr_itoa(r->pool, 1));
}
rv = ap_pass_brigade(r->output_filters, bb);
cleanup:
if(rv == APR_SUCCESS ||
r->status != HTTP_OK ||
c->aborted) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r, "echo_handler: done");
return OK;
}
else {
/* no way to know what type of error occurred */
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r, "echo_handler failed");
return AP_FILTER_ERROR;
}
return DECLINED;
}
static int curltest_tweak_handler(request_rec *r)
{
conn_rec *c = r->connection;
apr_bucket_brigade *bb;
apr_bucket *b;
apr_status_t rv;
char buffer[16*1024];
int i, chunks = 3, error_bucket = 1;
size_t chunk_size = sizeof(buffer);
const char *request_id = "none";
apr_time_t delay = 0, chunk_delay = 0, close_delay = 0;
apr_array_header_t *args = NULL;
int http_status = 200;
apr_status_t error = APR_SUCCESS, body_error = APR_SUCCESS;
int close_conn = 0, with_cl = 0;
int x_hd_len = 0, x_hd1_len = 0;
if(strcmp(r->handler, "curltest-tweak")) {
return DECLINED;
}
if(r->method_number == M_DELETE) {
http_status = 204;
chunks = 0;
}
else if(r->method_number != M_GET && r->method_number != M_POST) {
return DECLINED;
}
if(r->args) {
args = apr_cstr_split(r->args, "&", 1, r->pool);
for(i = 0; i < args->nelts; ++i) {
char *s, *val, *arg = APR_ARRAY_IDX(args, i, char *);
s = strchr(arg, '=');
if(s) {
*s = '\0';
val = s + 1;
if(!strcmp("status", arg)) {
http_status = (int)apr_atoi64(val);
if(http_status > 0) {
continue;
}
}
else if(!strcmp("chunks", arg)) {
chunks = (int)apr_atoi64(val);
if(chunks >= 0) {
continue;
}
}
else if(!strcmp("chunk_size", arg)) {
chunk_size = (int)apr_atoi64(val);
if(chunk_size >= 0) {
if(chunk_size > sizeof(buffer)) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"chunk_size %zu too large", chunk_size);
ap_die(HTTP_BAD_REQUEST, r);
return OK;
}
continue;
}
}
else if(!strcmp("id", arg)) {
/* just an id for repeated requests with curl's url globbing */
request_id = val;
continue;
}
else if(!strcmp("error", arg)) {
if(status_from_str(val, &error)) {
continue;
}
}
else if(!strcmp("error_bucket", arg)) {
error_bucket = (int)apr_atoi64(val);
if(error_bucket >= 0) {
continue;
}
}
else if(!strcmp("body_error", arg)) {
if(status_from_str(val, &body_error)) {
continue;
}
}
else if(!strcmp("delay", arg)) {
rv = duration_parse(&delay, val, "s");
if(APR_SUCCESS == rv) {
continue;
}
}
else if(!strcmp("chunk_delay", arg)) {
rv = duration_parse(&chunk_delay, val, "s");
if(APR_SUCCESS == rv) {
continue;
}
}
else if(!strcmp("close_delay", arg)) {
rv = duration_parse(&close_delay, val, "s");
if(APR_SUCCESS == rv) {
continue;
}
}
else if(!strcmp("x-hd", arg)) {
x_hd_len = (int)apr_atoi64(val);
continue;
}
else if(!strcmp("x-hd1", arg)) {
x_hd1_len = (int)apr_atoi64(val);
continue;
}
}
else if(!strcmp("close", arg)) {
/* we are asked to close the connection */
close_conn = 1;
continue;
}
else if(!strcmp("with_cl", arg)) {
with_cl = 1;
continue;
}
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "query parameter not "
"understood: '%s' in %s",
arg, r->args);
ap_die(HTTP_BAD_REQUEST, r);
return OK;
}
}
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "error_handler: processing "
"request, %s", r->args? r->args : "(no args)");
r->status = http_status;
r->clength = with_cl ? (chunks * chunk_size) : -1;
r->chunked = (r->proto_num >= HTTP_VERSION(1, 1)) && !with_cl;
apr_table_setn(r->headers_out, "request-id", request_id);
if(r->clength >= 0) {
apr_table_set(r->headers_out, "Content-Length",
apr_ltoa(r->pool, (long)r->clength));
}
else
apr_table_unset(r->headers_out, "Content-Length");
/* Discourage content-encodings */
apr_table_unset(r->headers_out, "Content-Encoding");
if(x_hd_len > 0) {
int i, hd_len = (16 * 1024);
int n = (x_hd_len / hd_len);
char *hd_val = apr_palloc(r->pool, x_hd_len);
memset(hd_val, 'X', hd_len);
hd_val[hd_len - 1] = 0;
for(i = 0; i < n; ++i) {
apr_table_setn(r->headers_out,
apr_psprintf(r->pool, "X-Header-%d", i), hd_val);
}
if(x_hd_len % hd_len) {
hd_val[(x_hd_len % hd_len)] = 0;
apr_table_setn(r->headers_out,
apr_psprintf(r->pool, "X-Header-%d", i), hd_val);
}
}
if(x_hd1_len > 0) {
char *hd_val = apr_palloc(r->pool, x_hd1_len);
memset(hd_val, 'Y', x_hd1_len);
hd_val[x_hd1_len - 1] = 0;
apr_table_setn(r->headers_out, "X-Mega-Header", hd_val);
}
apr_table_setn(r->subprocess_env, "no-brotli", "1");
apr_table_setn(r->subprocess_env, "no-gzip", "1");
ap_set_content_type(r, "application/octet-stream");
bb = apr_brigade_create(r->pool, c->bucket_alloc);
if(delay) {
apr_sleep(delay);
}
if(error != APR_SUCCESS) {
return ap_map_http_request_error(error, HTTP_BAD_REQUEST);
}
/* flush response */
b = apr_bucket_flush_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
rv = ap_pass_brigade(r->output_filters, bb);
if(APR_SUCCESS != rv)
goto cleanup;
memset(buffer, 'X', sizeof(buffer));
for(i = 0; i < chunks; ++i) {
if(chunk_delay) {
apr_sleep(chunk_delay);
}
rv = apr_brigade_write(bb, NULL, NULL, buffer, chunk_size);
if(APR_SUCCESS != rv)
goto cleanup;
rv = ap_pass_brigade(r->output_filters, bb);
if(APR_SUCCESS != rv)
goto cleanup;
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
"error_handler: passed %lu bytes as response body",
(unsigned long)chunk_size);
if(body_error != APR_SUCCESS) {
rv = body_error;
goto cleanup;
}
}
/* we are done */
b = apr_bucket_eos_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
rv = ap_pass_brigade(r->output_filters, bb);
apr_brigade_cleanup(bb);
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r,
"error_handler: response passed");
cleanup:
if(close_conn) {
if(close_delay) {
b = apr_bucket_flush_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
rv = ap_pass_brigade(r->output_filters, bb);
apr_brigade_cleanup(bb);
apr_sleep(close_delay);
}
r->connection->keepalive = AP_CONN_CLOSE;
}
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r,
"error_handler: request cleanup, r->status=%d, aborted=%d, "
"close=%d", r->status, c->aborted, close_conn);
if(rv == APR_SUCCESS) {
return OK;
}
if(error_bucket) {
http_status = ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
b = ap_bucket_error_create(http_status, NULL, r->pool, c->bucket_alloc);
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r,
"error_handler: passing error bucket, status=%d",
http_status);
APR_BRIGADE_INSERT_TAIL(bb, b);
ap_pass_brigade(r->output_filters, bb);
}
return AP_FILTER_ERROR;
}
static int curltest_put_handler(request_rec *r)
{
conn_rec *c = r->connection;
apr_bucket_brigade *bb;
apr_bucket *b;
apr_status_t rv;
char buffer[128*1024];
const char *ct;
apr_off_t rbody_len = 0;
apr_off_t rbody_max_len = -1;
const char *s_rbody_len;
const char *request_id = "none";
apr_time_t read_delay = 0, chunk_delay = 0;
apr_array_header_t *args = NULL;
long l;
int i;
if(strcmp(r->handler, "curltest-put")) {
return DECLINED;
}
if(r->method_number != M_PUT) {
return DECLINED;
}
if(r->args) {
args = apr_cstr_split(r->args, "&", 1, r->pool);
for(i = 0; i < args->nelts; ++i) {
char *s, *val, *arg = APR_ARRAY_IDX(args, i, char *);
s = strchr(arg, '=');
if(s) {
*s = '\0';
val = s + 1;
if(!strcmp("id", arg)) {
/* just an id for repeated requests with curl's url globbing */
request_id = val;
continue;
}
else if(!strcmp("read_delay", arg)) {
rv = duration_parse(&read_delay, val, "s");
if(APR_SUCCESS == rv) {
continue;
}
}
else if(!strcmp("chunk_delay", arg)) {
rv = duration_parse(&chunk_delay, val, "s");
if(APR_SUCCESS == rv) {
continue;
}
}
else if(!strcmp("max_upload", arg)) {
rbody_max_len = (apr_off_t)apr_atoi64(val);
continue;
}
}
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "query parameter not "
"understood: '%s' in %s",
arg, r->args);
ap_die(HTTP_BAD_REQUEST, r);
return OK;
}
}
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "put_handler: processing");
r->status = 200;
r->clength = -1;
r->chunked = 1;
apr_table_unset(r->headers_out, "Content-Length");
/* Discourage content-encodings */
apr_table_unset(r->headers_out, "Content-Encoding");
apr_table_setn(r->headers_out, "request-id", request_id);
apr_table_setn(r->subprocess_env, "no-brotli", "1");
apr_table_setn(r->subprocess_env, "no-gzip", "1");
ct = apr_table_get(r->headers_in, "content-type");
ap_set_content_type(r, ct ? ct : "text/plain");
if(read_delay) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
"put_handler: read_delay");
apr_sleep(read_delay);
}
bb = apr_brigade_create(r->pool, c->bucket_alloc);
/* copy any request body into the response */
rv = ap_setup_client_block(r, REQUEST_CHUNKED_DECHUNK);
if(rv)
goto cleanup;
if(ap_should_client_block(r)) {
while(0 < (l = ap_get_client_block(r, &buffer[0], sizeof(buffer)))) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
"put_handler: read %ld bytes from request body", l);
if(chunk_delay) {
apr_sleep(chunk_delay);
}
rbody_len += l;
if((rbody_max_len > 0) && (rbody_len > rbody_max_len)) {
r->status = 413;
break;
}
}
}
/* we are done */
s_rbody_len = apr_psprintf(r->pool, "%"APR_OFF_T_FMT, rbody_len);
apr_table_setn(r->headers_out, "Received-Length", s_rbody_len);
rv = apr_brigade_puts(bb, NULL, NULL, s_rbody_len);
if(APR_SUCCESS != rv)
goto cleanup;
b = apr_bucket_eos_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "put_handler: request read");
rv = ap_pass_brigade(r->output_filters, bb);
if(r->status == 413) {
apr_sleep(apr_time_from_sec(1));
}
cleanup:
if(rv == APR_SUCCESS ||
r->status != HTTP_OK ||
c->aborted) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r, "put_handler: done");
return OK;
}
else {
/* no way to know what type of error occurred */
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r, "put_handler failed");
return AP_FILTER_ERROR;
}
return DECLINED;
}
static int curltest_1_1_required(request_rec *r)
{
conn_rec *c = r->connection;
apr_bucket_brigade *bb;
apr_bucket *b;
apr_status_t rv;
const char *ct;
if(strcmp(r->handler, "curltest-1_1-required")) {
return DECLINED;
}
if(HTTP_VERSION_MAJOR(r->proto_num) > 1) {
apr_table_setn(r->notes, "ssl-renegotiate-forbidden", "1");
ap_die(HTTP_FORBIDDEN, r);
return OK;
}
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "1_1_handler: processing");
r->status = 200;
r->clength = -1;
r->chunked = 1;
apr_table_unset(r->headers_out, "Content-Length");
/* Discourage content-encodings */
apr_table_unset(r->headers_out, "Content-Encoding");
apr_table_setn(r->subprocess_env, "no-brotli", "1");
apr_table_setn(r->subprocess_env, "no-gzip", "1");
ct = apr_table_get(r->headers_in, "content-type");
ap_set_content_type(r, ct ? ct : "text/plain");
bb = apr_brigade_create(r->pool, c->bucket_alloc);
/* flush response */
b = apr_bucket_flush_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
rv = ap_pass_brigade(r->output_filters, bb);
if(APR_SUCCESS != rv)
goto cleanup;
/* we are done */
rv = apr_brigade_printf(bb, NULL, NULL, "well done!");
if(APR_SUCCESS != rv)
goto cleanup;
b = apr_bucket_eos_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "1_1_handler: request read");
rv = ap_pass_brigade(r->output_filters, bb);
cleanup:
if(rv == APR_SUCCESS ||
r->status != HTTP_OK ||
c->aborted) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r, "1_1_handler: done");
return OK;
}
else {
/* no way to know what type of error occurred */
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r, "1_1_handler failed");
return AP_FILTER_ERROR;
}
return DECLINED;
}
static int brigade_env_var(request_rec *r, apr_bucket_brigade *bb,
const char *name)
{
const char *s;
s = apr_table_get(r->subprocess_env, name);
if(s)
return apr_brigade_printf(bb, NULL, NULL, ",\n \"%s\": \"%s\"", name, s);
return 0;
}
static int curltest_sslinfo_handler(request_rec *r)
{
conn_rec *c = r->connection;
apr_bucket_brigade *bb;
apr_bucket *b;
apr_status_t rv;
const char *request_id = NULL;
int close_conn = 0;
int i;
if(strcmp(r->handler, "curltest-sslinfo")) {
return DECLINED;
}
if(r->method_number != M_GET) {
return DECLINED;
}
if(r->args) {
apr_array_header_t *args = apr_cstr_split(r->args, "&", 1, r->pool);
for(i = 0; i < args->nelts; ++i) {
char *s, *val, *arg = APR_ARRAY_IDX(args, i, char *);
s = strchr(arg, '=');
if(s) {
*s = '\0';
val = s + 1;
if(!strcmp("id", arg)) {
/* just an id for repeated requests with curl's url globbing */
request_id = val;
continue;
}
}
else if(!strcmp("close", arg)) {
/* we are asked to close the connection */
close_conn = 1;
continue;
}
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "query parameter not "
"understood: '%s' in %s",
arg, r->args);
ap_die(HTTP_BAD_REQUEST, r);
return OK;
}
}
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "sslinfo: processing");
r->status = 200;
r->clength = -1;
r->chunked = 1;
apr_table_unset(r->headers_out, "Content-Length");
/* Discourage content-encodings */
apr_table_unset(r->headers_out, "Content-Encoding");
if(request_id)
apr_table_setn(r->headers_out, "request-id", request_id);
apr_table_setn(r->subprocess_env, "no-brotli", "1");
apr_table_setn(r->subprocess_env, "no-gzip", "1");
ap_set_content_type(r, "application/json");
bb = apr_brigade_create(r->pool, c->bucket_alloc);
apr_brigade_puts(bb, NULL, NULL, "{\n \"Name\": \"SSL-Information\"");
brigade_env_var(r, bb, "HTTPS");
brigade_env_var(r, bb, "SSL_PROTOCOL");
brigade_env_var(r, bb, "SSL_CIPHER");
brigade_env_var(r, bb, "SSL_SESSION_ID");
brigade_env_var(r, bb, "SSL_SESSION_RESUMED");
brigade_env_var(r, bb, "SSL_SRP_USER");
brigade_env_var(r, bb, "SSL_SRP_USERINFO");
brigade_env_var(r, bb, "SSL_TLS_SNI");
apr_brigade_puts(bb, NULL, NULL, "}\n");
/* flush response */
b = apr_bucket_flush_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
rv = ap_pass_brigade(r->output_filters, bb);
if(APR_SUCCESS != rv)
goto cleanup;
/* we are done */
b = apr_bucket_eos_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "1_1_handler: request read");
rv = ap_pass_brigade(r->output_filters, bb);
cleanup:
if(close_conn)
r->connection->keepalive = AP_CONN_CLOSE;
if(rv == APR_SUCCESS ||
r->status != HTTP_OK ||
c->aborted) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r, "1_1_handler: done");
return OK;
}
else {
/* no way to know what type of error occurred */
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r, "1_1_handler failed");
return AP_FILTER_ERROR;
}
return DECLINED;
}
+346
View File
@@ -0,0 +1,346 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# SPDX-License-Identifier: curl
#
###########################################################################
#
import logging
import os
import signal
import socket
import subprocess
import time
from typing import Optional, Dict
from datetime import datetime, timedelta
from .env import Env, NghttpxUtil
from .curl import CurlClient
from .ports import alloc_ports_and_do
log = logging.getLogger(__name__)
class Nghttpx:
def __init__(self, env: Env, name: str, domain: str, cred_name: str):
self.env = env
self._name = name
self._domain = domain
self._port = 0
self._https_port = 0
self._cmd = env.nghttpx
self._run_dir = os.path.join(env.gen_dir, name)
self._pid_file = os.path.join(self._run_dir, 'nghttpx.pid')
self._conf_file = os.path.join(self._run_dir, 'nghttpx.conf')
self._error_log = os.path.join(self._run_dir, 'nghttpx.log')
self._stderr = os.path.join(self._run_dir, 'nghttpx.stderr')
self._tmp_dir = os.path.join(self._run_dir, 'tmp')
self._process: Optional[subprocess.Popen] = None
self._cred_name = self._def_cred_name = cred_name
self._loaded_cred_name = ''
self._version = NghttpxUtil.version(self._cmd)
def supports_h3(self):
return NghttpxUtil.version_with_h3(self._version)
def set_cred_name(self, name: str):
self._cred_name = name
def reset_config(self):
self._cred_name = self._def_cred_name
def reload_if_config_changed(self):
if self._process and self._port > 0 and \
self._loaded_cred_name == self._cred_name:
return True
return self.reload()
@property
def https_port(self):
return self._https_port
def exists(self):
return self._cmd and os.path.exists(self._cmd)
def clear_logs(self):
self._rmf(self._error_log)
self._rmf(self._stderr)
def is_running(self):
if self._process:
self._process.poll()
return self._process.returncode is None
return False
def start_if_needed(self):
if not self.is_running():
return self.start()
return True
def initial_start(self):
self._rmf(self._pid_file)
self._rmf(self._error_log)
self._mkpath(self._run_dir)
self._write_config()
def start(self, wait_live=True):
pass
def stop(self, wait_dead=True):
self._mkpath(self._tmp_dir)
if self._process:
self._process.terminate()
self._process.wait(timeout=2)
self._process = None
return not wait_dead or self.wait_dead(timeout=timedelta(seconds=5))
return True
def restart(self):
self.stop()
return self.start()
def reload(self, timeout: timedelta = timedelta(seconds=Env.SERVER_TIMEOUT)):
if self._process:
running = self._process
self._process = None
os.kill(running.pid, signal.SIGQUIT)
end_wait = datetime.now() + timedelta(seconds=5)
if not self.start(wait_live=False):
self._process = running
return False
while datetime.now() < end_wait:
try:
log.debug(f'waiting for nghttpx({running.pid}) to exit.')
running.wait(1)
log.debug(f'nghttpx({running.pid}) terminated -> {running.returncode}')
running = None
break
except subprocess.TimeoutExpired:
log.warning(f'nghttpx({running.pid}), not shut down yet.')
os.kill(running.pid, signal.SIGQUIT)
if datetime.now() >= end_wait:
log.error(f'nghttpx({running.pid}), terminate forcefully.')
os.kill(running.pid, signal.SIGKILL)
running.terminate()
running.wait(1)
return self.wait_live(timeout=timeout)
return False
def wait_dead(self, timeout: timedelta):
curl = CurlClient(env=self.env, run_dir=self._tmp_dir)
try_until = datetime.now() + timeout
while datetime.now() < try_until:
if self._https_port > 0:
check_url = f'https://{self._domain}:{self._port}/'
r = curl.http_get(url=check_url, extra_args=[
'--trace', 'curl.trace', '--trace-time',
'--connect-timeout', '1'
])
else:
check_url = f'https://{self._domain}:{self._port}/'
r = curl.http_get(url=check_url, extra_args=[
'--trace', 'curl.trace', '--trace-time',
'--http3-only', '--connect-timeout', '1'
])
if r.exit_code != 0:
return True
log.debug(f'waiting for nghttpx to stop responding: {r}')
time.sleep(.1)
log.debug(f"Server still responding after {timeout}")
return False
def wait_live(self, timeout: timedelta):
curl = CurlClient(env=self.env, run_dir=self._tmp_dir)
try_until = datetime.now() + timeout
while datetime.now() < try_until:
if self._https_port > 0:
check_url = f'https://{self._domain}:{self._port}/'
r = curl.http_get(url=check_url, extra_args=[
'--trace', 'curl.trace', '--trace-time',
'--connect-timeout', '1'
])
else:
check_url = f'https://{self._domain}:{self._port}/'
r = curl.http_get(url=check_url, extra_args=[
'--http3-only', '--trace', 'curl.trace', '--trace-time',
'--connect-timeout', '1'
])
if r.exit_code == 0:
return True
time.sleep(.1)
log.error(f"Server still not responding after {timeout}")
return False
def _rmf(self, path):
if os.path.exists(path):
return os.remove(path)
def _mkpath(self, path):
if not os.path.exists(path):
return os.makedirs(path)
def _write_config(self):
with open(self._conf_file, 'w') as fd:
fd.write('# nghttpx test config')
fd.write("\n".join([
'# do we need something here?'
]))
class NghttpxQuic(Nghttpx):
PORT_SPECS = {
'nghttpx_https': socket.SOCK_STREAM,
}
def __init__(self, env: Env):
super().__init__(env=env, name='nghttpx-quic',
domain=env.domain1, cred_name=env.domain1)
self._https_port = env.https_port
def initial_start(self):
super().initial_start()
def startup(ports: Dict[str, int]) -> bool:
self._port = ports['nghttpx_https']
if self.start():
self.env.update_ports(ports)
return True
self.stop()
self._port = 0
return False
return alloc_ports_and_do(NghttpxQuic.PORT_SPECS, startup,
self.env.gen_root, max_tries=3)
def start(self, wait_live=True):
self._mkpath(self._tmp_dir)
if self._process:
self.stop()
creds = self.env.get_credentials(self._cred_name)
assert creds # convince pytype this isn't None
self._loaded_cred_name = self._cred_name
args = [self._cmd, f'--frontend=*,{self._port};tls']
if self.supports_h3():
args.extend([
f'--frontend=*,{self.env.h3_port};quic',
'--frontend-quic-early-data',
])
args.extend([
f'--backend=127.0.0.1,{self.env.https_port};{self._domain};sni={self._domain};proto=h2;tls',
f'--backend=127.0.0.1,{self.env.http_port}',
'--log-level=ERROR',
f'--pid-file={self._pid_file}',
f'--errorlog-file={self._error_log}',
f'--conf={self._conf_file}',
f'--cacert={self.env.ca.cert_file}',
creds.pkey_file,
creds.cert_file,
'--frontend-http3-window-size=1M',
'--frontend-http3-max-window-size=10M',
'--frontend-http3-connection-window-size=10M',
'--frontend-http3-max-connection-window-size=100M',
# f'--frontend-quic-debug-log',
])
ngerr = open(self._stderr, 'a')
self._process = subprocess.Popen(args=args, stderr=ngerr)
if self._process.returncode is not None:
return False
return not wait_live or self.wait_live(timeout=timedelta(seconds=Env.SERVER_TIMEOUT))
class NghttpxFwd(Nghttpx):
def __init__(self, env: Env):
super().__init__(env=env, name='nghttpx-fwd',
domain=env.proxy_domain,
cred_name=env.proxy_domain)
def initial_start(self):
super().initial_start()
def startup(ports: Dict[str, int]) -> bool:
self._port = ports['h2proxys']
if self.start():
self.env.update_ports(ports)
return True
self.stop()
self._port = 0
return False
return alloc_ports_and_do({'h2proxys': socket.SOCK_STREAM},
startup, self.env.gen_root, max_tries=3)
def start(self, wait_live=True):
assert self._port > 0
self._mkpath(self._tmp_dir)
if self._process:
self.stop()
creds = self.env.get_credentials(self._cred_name)
assert creds # convince pytype this isn't None
self._loaded_cred_name = self._cred_name
args = [
self._cmd,
'--http2-proxy',
f'--frontend=*,{self._port}',
f'--backend=127.0.0.1,{self.env.proxy_port}',
'--log-level=ERROR',
f'--pid-file={self._pid_file}',
f'--errorlog-file={self._error_log}',
f'--conf={self._conf_file}',
f'--cacert={self.env.ca.cert_file}',
creds.pkey_file,
creds.cert_file,
]
ngerr = open(self._stderr, 'a')
self._process = subprocess.Popen(args=args, stderr=ngerr)
if self._process.returncode is not None:
return False
return not wait_live or self.wait_live(timeout=timedelta(seconds=Env.SERVER_TIMEOUT))
def wait_dead(self, timeout: timedelta):
curl = CurlClient(env=self.env, run_dir=self._tmp_dir)
try_until = datetime.now() + timeout
while datetime.now() < try_until:
check_url = f'https://{self.env.proxy_domain}:{self._port}/'
r = curl.http_get(url=check_url)
if r.exit_code != 0:
return True
log.debug(f'waiting for nghttpx-fwd to stop responding: {r}')
time.sleep(.1)
log.debug(f"Server still responding after {timeout}")
return False
def wait_live(self, timeout: timedelta):
curl = CurlClient(env=self.env, run_dir=self._tmp_dir)
try_until = datetime.now() + timeout
while datetime.now() < try_until:
check_url = f'https://{self.env.proxy_domain}:{self._port}/'
r = curl.http_get(url=check_url, extra_args=[
'--trace', 'curl.trace', '--trace-time'
])
if r.exit_code == 0:
return True
time.sleep(.1)
log.error(f"Server still not responding after {timeout}")
return False
+63
View File
@@ -0,0 +1,63 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# SPDX-License-Identifier: curl
#
###########################################################################
#
import logging
import os
import socket
from collections.abc import Callable
from typing import Dict
from filelock import FileLock
log = logging.getLogger(__name__)
def alloc_port_set(port_specs: Dict[str, int]) -> Dict[str, int]:
socks = []
ports = {}
for name, ptype in port_specs.items():
try:
s = socket.socket(type=ptype)
s.bind(('127.0.0.1', 0))
ports[name] = s.getsockname()[1]
socks.append(s)
except Exception as e:
raise e
for s in socks:
s.close()
return ports
def alloc_ports_and_do(port_spec: Dict[str, int],
do_func: Callable[[Dict[str, int]], bool],
gen_dir, max_tries=1) -> bool:
lock_file = os.path.join(gen_dir, 'ports.lock')
with FileLock(lock_file):
for _ in range(max_tries):
port_set = alloc_port_set(port_spec)
if do_func(port_set):
return True
return False
+224
View File
@@ -0,0 +1,224 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# SPDX-License-Identifier: curl
#
###########################################################################
#
import logging
import os
import re
import socket
import subprocess
import time
from datetime import datetime, timedelta
from typing import List, Dict
from .curl import CurlClient, ExecResult
from .env import Env
from .ports import alloc_ports_and_do
log = logging.getLogger(__name__)
class VsFTPD:
def __init__(self, env: Env, with_ssl=False, ssl_implicit=False):
self.env = env
self._cmd = env.vsftpd
self._port = 0
self._with_ssl = with_ssl
self._ssl_implicit = ssl_implicit and with_ssl
self._scheme = 'ftps' if self._ssl_implicit else 'ftp'
if self._with_ssl:
self.name = 'vsftpds'
self._port_skey = 'ftps'
self._port_specs = {
'ftps': socket.SOCK_STREAM,
}
else:
self.name = 'vsftpd'
self._port_skey = 'ftp'
self._port_specs = {
'ftp': socket.SOCK_STREAM,
}
self._vsftpd_dir = os.path.join(env.gen_dir, self.name)
self._run_dir = os.path.join(self._vsftpd_dir, 'run')
self._docs_dir = os.path.join(self._vsftpd_dir, 'docs')
self._tmp_dir = os.path.join(self._vsftpd_dir, 'tmp')
self._conf_file = os.path.join(self._vsftpd_dir, 'test.conf')
self._pid_file = os.path.join(self._vsftpd_dir, 'vsftpd.pid')
self._error_log = os.path.join(self._vsftpd_dir, 'vsftpd.log')
self._process = None
self.clear_logs()
@property
def domain(self):
return self.env.ftp_domain
@property
def docs_dir(self):
return self._docs_dir
@property
def port(self) -> int:
return self._port
def clear_logs(self):
self._rmf(self._error_log)
def exists(self):
return os.path.exists(self._cmd)
def is_running(self):
if self._process:
self._process.poll()
return self._process.returncode is None
return False
def start_if_needed(self):
if not self.is_running():
return self.start()
return True
def stop(self, wait_dead=True):
self._mkpath(self._tmp_dir)
if self._process:
self._process.terminate()
self._process.wait(timeout=2)
self._process = None
return not wait_dead or self.wait_dead(timeout=timedelta(seconds=5))
return True
def restart(self):
self.stop()
return self.start()
def initial_start(self):
def startup(ports: Dict[str, int]) -> bool:
self._port = ports[self._port_skey]
if self.start():
self.env.update_ports(ports)
return True
self.stop()
self._port = 0
return False
return alloc_ports_and_do(self._port_specs, startup,
self.env.gen_root, max_tries=3)
def start(self, wait_live=True):
assert self._port > 0
self._mkpath(self._tmp_dir)
if self._process:
self.stop()
self._write_config()
args = [
self._cmd,
f'{self._conf_file}',
]
procerr = open(self._error_log, 'a')
self._process = subprocess.Popen(args=args, stderr=procerr)
if self._process.returncode is not None:
return False
return not wait_live or self.wait_live(timeout=timedelta(seconds=Env.SERVER_TIMEOUT))
def wait_dead(self, timeout: timedelta):
curl = CurlClient(env=self.env, run_dir=self._tmp_dir)
try_until = datetime.now() + timeout
while datetime.now() < try_until:
check_url = f'{self._scheme}://{self.domain}:{self.port}/'
r = curl.ftp_get(urls=[check_url], extra_args=['-v'])
if r.exit_code != 0:
return True
log.debug(f'waiting for vsftpd to stop responding: {r}')
time.sleep(.1)
log.debug(f"Server still responding after {timeout}")
return False
def wait_live(self, timeout: timedelta):
curl = CurlClient(env=self.env, run_dir=self._tmp_dir)
try_until = datetime.now() + timeout
while datetime.now() < try_until:
check_url = f'{self._scheme}://{self.domain}:{self.port}/'
r = curl.ftp_get(urls=[check_url], extra_args=[
'--trace', 'curl-start.trace', '--trace-time'
])
if r.exit_code == 0:
return True
time.sleep(.1)
log.error(f"Server still not responding after {timeout}")
return False
def _rmf(self, path):
if os.path.exists(path):
return os.remove(path)
def _mkpath(self, path):
if not os.path.exists(path):
return os.makedirs(path)
def _write_config(self):
self._mkpath(self._docs_dir)
self._mkpath(self._tmp_dir)
conf = [ # base server config
'listen=YES',
'run_as_launching_user=YES',
'#listen_address=127.0.0.1',
f'listen_port={self.port}',
'local_enable=NO',
'anonymous_enable=YES',
f'anon_root={self._docs_dir}',
'dirmessage_enable=YES',
'write_enable=YES',
'anon_upload_enable=YES',
'log_ftp_protocol=YES',
'xferlog_enable=YES',
'xferlog_std_format=NO',
f'vsftpd_log_file={self._error_log}',
'\n',
]
if self._with_ssl:
creds = self.env.get_credentials(self.domain)
assert creds # convince pytype this isn't None
conf.extend([
'ssl_enable=YES',
'debug_ssl=YES',
'allow_anon_ssl=YES',
f'rsa_cert_file={creds.cert_file}',
f'rsa_private_key_file={creds.pkey_file}',
# require_ssl_reuse=YES means ctrl and data connection need to use the same session
'require_ssl_reuse=NO',
])
if self._ssl_implicit:
conf.extend([
'implicit_ssl=YES',
])
with open(self._conf_file, 'w') as fd:
fd.write("\n".join(conf))
def get_data_ports(self, r: ExecResult) -> List[int]:
return [int(m.group(1)) for line in r.trace_lines if
(m := re.match(r'.*Established 2nd connection to .* \(\S+ port (\d+)\)', line))]
@@ -0,0 +1,65 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# SPDX-License-Identifier: curl
#
###########################################################################
#
import argparse
import asyncio
import logging
from websockets import server
from websockets.exceptions import ConnectionClosedError
async def echo(websocket):
try:
async for message in websocket:
await websocket.send(message)
except ConnectionClosedError:
pass
async def run_server(port):
async with server.serve(echo, "localhost", port):
await asyncio.Future() # run forever
def main():
parser = argparse.ArgumentParser(prog='scorecard', description="""
Run a websocket echo server.
""")
parser.add_argument("--port", type=int,
default=9876, help="port to listen on")
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s %(message)s",
level=logging.DEBUG,
)
asyncio.run(run_server(args.port))
if __name__ == "__main__":
main()