Skip to content

Commit

Permalink
Merge pull request #103 from kuefmz/fix_issue_head_request
Browse files Browse the repository at this point in the history
In case of HEAD request do not return body
  • Loading branch information
JJ-Author authored Oct 21, 2024
2 parents 5177a89 + 8a632fb commit 31eaec9
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 36 deletions.
6 changes: 2 additions & 4 deletions ontologytimemachine/custom_proxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def __init__(self, *args, **kwargs):
logger.info(f"Init - Object ID: {id(self)}")
super().__init__(*args, **kwargs)
self.config = config
logger.info(self.config)
logger.info(f"Config: {self.config}")

def before_upstream_connection(self, request: HttpParser) -> HttpParser | None:
# self.client.config = None
Expand Down Expand Up @@ -67,7 +67,7 @@ def before_upstream_connection(self, request: HttpParser) -> HttpParser | None:
and self.config.clientConfigViaProxyAuth
== ClientConfigViaProxyAuth.OPTIONAL
):
logger.info("Auth configuration is optional, not procided.")
logger.info("Auth configuration is optional, not provided.")
if config_from_auth and not hasattr(self.client, "config"):
self.client.config = config_from_auth
logger.info(f"New config: {config_from_auth}")
Expand Down Expand Up @@ -96,8 +96,6 @@ def before_upstream_connection(self, request: HttpParser) -> HttpParser | None:
logger.info("CONNECT request was blocked due to the configuration")
return None

# # If only ontology mode, return None in all other cases
logger.info(f"Config: {config}")
response = get_response_from_request(wrapped_request, config)
if response:
self.queue_response(response)
Expand Down
31 changes: 21 additions & 10 deletions ontologytimemachine/utils/proxy_logic.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,12 +145,19 @@ def is_archivo_ontology_request(wrapped_request):
return False


def request_ontology(url, headers, disableRemovingRedirects=False, timeout=5):
def request_ontology(
wrapped_request, url, headers, disableRemovingRedirects=False, timeout=5
):
allow_redirects = not disableRemovingRedirects
try:
response = requests.get(
url=url, headers=headers, allow_redirects=allow_redirects, timeout=5
)
if wrapped_request.is_head_request():
response = requests.head(
url=url, headers=headers, allow_redirects=allow_redirects, timeout=5
)
else:
response = requests.get(
url=url, headers=headers, allow_redirects=allow_redirects, timeout=5
)
logger.info("Successfully fetched ontology")
return response
except Exception as e:
Expand All @@ -176,7 +183,7 @@ def proxy_logic(wrapped_request, config):

if config.ontoVersion == OntoVersion.ORIGINAL:
ontology, _, _ = wrapped_request.get_request_url_host_path()
response = fetch_original(ontology, headers, config)
response = fetch_original(wrapped_request, ontology, headers, config)
elif config.ontoVersion == OntoVersion.ORIGINAL_FAILOVER_LIVE_LATEST:
response = fetch_failover(
wrapped_request, headers, config.disableRemovingRedirects
Expand All @@ -193,16 +200,20 @@ def proxy_logic(wrapped_request, config):


# Fetch from the original source, no matter what
def fetch_original(ontology, headers, disableRemovingRedirects):
def fetch_original(wrapped_request, ontology, headers, disableRemovingRedirects):
logger.info(f"Fetching original ontology from URL: {ontology}")
return request_ontology(ontology, headers, disableRemovingRedirects)
return request_ontology(
wrapped_request, ontology, headers, disableRemovingRedirects
)


# Failover mode
def fetch_failover(wrapped_request, headers, disableRemovingRedirects):
ontology, _, _ = wrapped_request.get_request_url_host_path()
logger.info(f"Fetching original ontology with failover from URL: {ontology}")
original_response = request_ontology(ontology, headers, disableRemovingRedirects)
original_response = request_ontology(
wrapped_request, ontology, headers, disableRemovingRedirects
)
if original_response.status_code in passthrough_status_codes:
requested_mimetypes_with_priority = parse_accept_header_with_priority(
headers["Accept"]
Expand Down Expand Up @@ -237,7 +248,7 @@ def fetch_latest_archived(wrapped_request, headers):
ontology, _, _ = wrapped_request.get_request_url_host_path()
dbpedia_url = f"{archivo_api}?o={ontology}&f={format}"
logger.info(f"Fetching from DBpedia Archivo API: {dbpedia_url}")
return request_ontology(dbpedia_url, headers)
return request_ontology(wrapped_request, dbpedia_url, headers)


def fetch_timestamp_archived(wrapped_request, headers, config):
Expand All @@ -251,7 +262,7 @@ def fetch_timestamp_archived(wrapped_request, headers, config):
ontology, _, _ = wrapped_request.get_request_url_host_path()
dbpedia_url = f"{archivo_api}?o={ontology}&f={format}&v={config.timestamp}"
logger.info(f"Fetching from DBpedia Archivo API: {dbpedia_url}")
return request_ontology(dbpedia_url, headers)
return request_ontology(wrapped_request, dbpedia_url, headers)


def fetch_dependency_manifest(ontology, headers, manifest):
Expand Down
22 changes: 0 additions & 22 deletions tests/test_proxy_logic.py

This file was deleted.

0 comments on commit 31eaec9

Please sign in to comment.