Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🐛 Source Amazon Seller Partner: Fix check for Vendor accounts #35331

Merged
merged 7 commits into from
Feb 19, 2024
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ data:
connectorSubtype: api
connectorType: source
definitionId: e55879a8-0ef8-4557-abcf-ab34c53ec460
dockerImageTag: 3.4.0
dockerImageTag: 3.5.0
dockerRepository: airbyte/source-amazon-seller-partner
documentationUrl: https://docs.airbyte.com/integrations/sources/amazon-seller-partner
githubIssueLabel: source-amazon-seller-partner
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#


import traceback
from os import getenv
from typing import Any, List, Mapping, Optional, Tuple

Expand Down Expand Up @@ -115,10 +116,11 @@ def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) ->

if config.get("account_type", "Seller") == "Seller":
stream_to_check = Orders(**stream_kwargs)
next(stream_to_check.read_records(sync_mode=SyncMode.full_refresh))
else:
stream_to_check = VendorSalesReports(**stream_kwargs)

next(stream_to_check.read_records(sync_mode=SyncMode.full_refresh))
stream_to_check = VendorOrders(**stream_kwargs)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we add a notice in the documentation? (e.g. "If you want to pass the check with a vendor or sales account, you'll need to have access to certain APIs.")

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed

stream_slices = list(stream_to_check.stream_slices(sync_mode=SyncMode.full_refresh))
next(stream_to_check.read_records(sync_mode=SyncMode.full_refresh, stream_slice=stream_slices[0]))

return True, None
except Exception as e:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import csv
import gzip
import json
import logging
import os
import time
from abc import ABC, abstractmethod
Expand Down Expand Up @@ -282,6 +283,15 @@ def _retrieve_report(self, report_id: str) -> Mapping[str, Any]:

return report_payload

def _retrieve_report_result(self, report_document_id: str) -> requests.Response:
request_headers = self.request_headers()
request = self._create_prepared_request(
path=self.path(document_id=report_document_id),
headers=dict(request_headers, **self.authenticator.get_auth_header()),
params=self.request_params(),
)
return self._send_request(request, {})

@default_backoff_handler(factor=5, max_tries=5)
def download_and_decompress_report_document(self, payload: dict) -> str:
"""
Expand Down Expand Up @@ -381,23 +391,29 @@ def read_records(
if processing_status == ReportProcessingStatus.DONE:
# retrieve and decrypt the report document
document_id = report_payload["reportDocumentId"]
request_headers = self.request_headers()
request = self._create_prepared_request(
path=self.path(document_id=document_id),
headers=dict(request_headers, **self.authenticator.get_auth_header()),
params=self.request_params(),
)
response = self._send_request(request, {})
response = self._retrieve_report_result(document_id)

for record in self.parse_response(response, stream_state, stream_slice):
if report_end_date:
record["dataEndTime"] = report_end_date.strftime(DATE_FORMAT)
yield record
elif processing_status == ReportProcessingStatus.FATAL:
# retrieve and decrypt the report document
try:
document_id = report_payload["reportDocumentId"]
response = self._retrieve_report_result(document_id)

document = self.download_and_decompress_report_document(response.json())
error_response = json.loads(document)
except Exception as e:
logging.error(f"Failed to retrieve the report result document for stream '{self.name}'. Exception: {e}")
error_response = "Failed to retrieve the report result document."

raise AirbyteTracedException(
internal_message=(
f"Failed to retrieve the report '{self.name}' for period "
f"{stream_slice['dataStartTime']}-{stream_slice['dataEndTime']} "
"due to Amazon Seller Partner platform issues. This will be read during the next sync."
f"{stream_slice['dataStartTime']}-{stream_slice['dataEndTime']}. "
f"This will be read during the next sync. Error: {error_response}"
)
)
elif processing_status == ReportProcessingStatus.CANCELLED:
Expand Down
Loading
Loading