Skip to content

Commit

Permalink
Merge pull request #73 from ssenart/develop
Browse files Browse the repository at this point in the history
[#72] Remove the warning message "UserWarning: Boolean Series key will be reindexed to match DataFrame index. df = pd.concat([df[(df["count"] >= 7)], df.tail(1)[df["count"] < 7]])"
  • Loading branch information
ssenart authored Oct 9, 2024
2 parents 6b0babf + 53e34fc commit 29f9e1a
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 8 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [1.2.4](https://github.com/ssenart/PyGazpar/compare/1.2.4...1.2.3) - 2024-10-09

### Fixed
- [#72](https://github.com/ssenart/PyGazpar/issues/72): Remove the warning message "UserWarning: Boolean Series key will be reindexed to match DataFrame index. df = pd.concat([df[(df["count"] >= 7)], df.tail(1)[df["count"] < 7]])".

## [1.2.3](https://github.com/ssenart/PyGazpar/compare/1.2.3...1.2.1) - 2024-10-05

### Added
Expand Down
14 changes: 7 additions & 7 deletions pygazpar/datasource.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,14 +94,14 @@ def _login(self, username: str, password: str) -> str:

params = json.loads(AUTH_TOKEN_PARAMS.format(session_token))

response = session.get(AUTH_TOKEN_URL, params=params, allow_redirects=True, cookies=jar)
response = session.get(AUTH_TOKEN_URL, params=params, allow_redirects=True, cookies=jar) # type: ignore

if response.status_code != 200:
raise Exception(f"An error occurred while getting the auth token. Status code: {response.status_code} - {response.text}")

auth_token = session.cookies.get("auth_token", domain="monespace.grdf.fr")

return auth_token
return auth_token # type: ignore

@abstractmethod
def _loadFromSession(self, auth_token: str, pceIdentifier: str, startDate: date, endDate: date, frequencies: Optional[List[Frequency]] = None) -> MeterReadingsByFrequency:
Expand Down Expand Up @@ -211,7 +211,7 @@ def __downloadFile(self, session: Session, url: str, path: str):

response = session.get(url)

if "text/html" in response.headers.get("Content-Type"):
if "text/html" in response.headers.get("Content-Type"): # type: ignore
raise Exception("An error occurred while loading data. Please check your credentials.")

if response.status_code != 200:
Expand Down Expand Up @@ -297,7 +297,7 @@ def _loadFromSession(self, auth_token: str, pceIdentifier: str, startDate: date,
try:
response = session.get(downloadUrl)

if "text/html" in response.headers.get("Content-Type"):
if "text/html" in response.headers.get("Content-Type"): # type: ignore
raise Exception("An error occurred while loading data. Please check your credentials.")

if response.status_code != 200:
Expand Down Expand Up @@ -466,7 +466,7 @@ def computeWeekly(daily: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
df = df.sort_values(by=['first_day_of_week'])

# Select rows where we have a full week (7 days) except for the current week.
df = pd.concat([df[(df["count"] >= 7)], df.tail(1)[df["count"] < 7]])
df = pd.concat([df[(df["count"] >= 7)], df.tail(1)[df.tail(1)["count"] < 7]])

# Select target columns.
df = df[["time_period", "start_index_m3", "end_index_m3", "volume_m3", "energy_kwh", "timestamp"]]
Expand Down Expand Up @@ -494,7 +494,7 @@ def computeMonthly(daily: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
df = df.sort_values(by=['first_day_of_month'])

# Select rows where we have a full month (more than 27 days) except for the current month.
df = pd.concat([df[(df["count"] >= 28)], df.tail(1)[df["count"] < 28]])
df = pd.concat([df[(df["count"] >= 28)], df.tail(1)[df.tail(1)["count"] < 28]])

# Rename columns for their target names.
df = df.rename(columns={"month_year": "time_period"})
Expand Down Expand Up @@ -525,7 +525,7 @@ def computeYearly(daily: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
df = df.sort_values(by=['year'])

# Select rows where we have almost a full year (more than 360) except for the current year.
df = pd.concat([df[(df["count"] >= 360)], df.tail(1)[df["count"] < 360]])
df = pd.concat([df[(df["count"] >= 360)], df.tail(1)[df.tail(1)["count"] < 360]])

# Rename columns for their target names.
df = df.rename(columns={"year": "time_period"})
Expand Down
2 changes: 1 addition & 1 deletion pygazpar/excelparser.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def parse(dataFilename: str, dataReadingFrequency: Frequency) -> List[Dict[str,

worksheet = workbook.active

res = parseByFrequency[dataReadingFrequency](worksheet)
res = parseByFrequency[dataReadingFrequency](worksheet) # type: ignore

workbook.close()

Expand Down

0 comments on commit 29f9e1a

Please sign in to comment.