Skip to content

Commit 741c727

Browse files
authoredNov 25, 2023
Setup pre-commit (#124)
* Setup pre-commit hooks for black, flake8, isort
1 parent 2747f0d commit 741c727

12 files changed

+633
-415
lines changed
 

‎.flake8

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
[flake8]
2+
max-line-length = 88
3+
extend-ignore = E203, E704

‎.pre-commit-config.yaml

+49
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
ci:
2+
autoupdate_schedule: monthly
3+
autofix_prs: false
4+
5+
repos:
6+
- repo: https://github.com/pre-commit/pre-commit-hooks
7+
rev: v4.4.0
8+
hooks:
9+
- id: trailing-whitespace
10+
- id: end-of-file-fixer
11+
- id: check-docstring-first
12+
- id: check-json
13+
- id: check-yaml
14+
- id: double-quote-string-fixer
15+
- id: debug-statements
16+
- id: mixed-line-ending
17+
18+
- repo: https://github.com/asottile/pyupgrade
19+
rev: v3.3.1
20+
hooks:
21+
- id: pyupgrade
22+
args:
23+
- '--py38-plus'
24+
25+
- repo: https://github.com/psf/black
26+
rev: 23.3.0
27+
hooks:
28+
- id: black
29+
- id: black-jupyter
30+
31+
- repo: https://github.com/keewis/blackdoc
32+
rev: v0.3.8
33+
hooks:
34+
- id: blackdoc
35+
36+
- repo: https://github.com/PyCQA/flake8
37+
rev: 6.0.0
38+
hooks:
39+
- id: flake8
40+
41+
- repo: https://github.com/PyCQA/isort
42+
rev: 5.12.0
43+
hooks:
44+
- id: isort
45+
46+
- repo: https://github.com/pre-commit/mirrors-prettier
47+
rev: v3.0.0-alpha.6
48+
hooks:
49+
- id: prettier

‎.prettierrc.toml

+2
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
semi = false
2+
singleQuote = true

‎dataretrieval/__init__.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
from importlib.metadata import version
2-
from importlib.metadata import PackageNotFoundError
1+
from importlib.metadata import PackageNotFoundError, version
2+
33
from dataretrieval.nadp import *
44
from dataretrieval.nwis import *
55
from dataretrieval.streamstats import *
@@ -10,4 +10,4 @@
1010
try:
1111
__version__ = version('dataretrieval')
1212
except PackageNotFoundError:
13-
__version__ = "version-unknown"
13+
__version__ = 'version-unknown'

‎dataretrieval/codes/states.py

+51-51
Original file line numberDiff line numberDiff line change
@@ -1,54 +1,54 @@
11
"""List of 2-digit state codes with commented full names."""
22
state_codes = [
3-
'al', # Alabama
4-
'ak', # Alaska
5-
'az', # Arizona
6-
'ar', # Arkansas
7-
'ca', # California
8-
'co', # Colorado
9-
'ct', # Connecticut
10-
'de', # Delaware
11-
'dc', # District of Columbia
12-
'fl', # Florida
13-
'ga', # Georgia
14-
'hi', # Hawaii
15-
'id', # Idaho
16-
'il', # Illinois
17-
'in', # Indiana
18-
'ia', # Iowa
19-
'ks', # Kansas
20-
'ky', # Kentucky
21-
'la', # Louisiana
22-
'me', # Maine
23-
'md', # Maryland
24-
'ma', # Massachusetts
25-
'mi', # Michigan
26-
'mn', # Minnesota
27-
'ms', # Mississippi
28-
'mo', # Missouri
29-
'mt', # Montana
30-
'ne', # Nebraska
31-
'nv', # Nevada
32-
'nh', # New Hampshire
33-
'nj', # New Jersey
34-
'nm', # New Mexico
35-
'ny', # New York
36-
'nc', # North Carolina
37-
'nd', # North Dakota
38-
'oh', # Ohio
39-
'ok', # Oklahoma
40-
'or', # Oregon
41-
'pa', # Pennsylvania
42-
'ri', # Rhode Island
43-
'sc', # South Carolina
44-
'sd', # South Dakota
45-
'tn', # Tennessee
46-
'tx', # Texas
47-
'ut', # Utah
48-
'vt', # Vermont
49-
'va', # Virginia
50-
'wa', # Washington
51-
'wv', # West Virginia
52-
'wi', # Wisconsin
53-
'wy', # Wyoming
3+
'al', # Alabama
4+
'ak', # Alaska
5+
'az', # Arizona
6+
'ar', # Arkansas
7+
'ca', # California
8+
'co', # Colorado
9+
'ct', # Connecticut
10+
'de', # Delaware
11+
'dc', # District of Columbia
12+
'fl', # Florida
13+
'ga', # Georgia
14+
'hi', # Hawaii
15+
'id', # Idaho
16+
'il', # Illinois
17+
'in', # Indiana
18+
'ia', # Iowa
19+
'ks', # Kansas
20+
'ky', # Kentucky
21+
'la', # Louisiana
22+
'me', # Maine
23+
'md', # Maryland
24+
'ma', # Massachusetts
25+
'mi', # Michigan
26+
'mn', # Minnesota
27+
'ms', # Mississippi
28+
'mo', # Missouri
29+
'mt', # Montana
30+
'ne', # Nebraska
31+
'nv', # Nevada
32+
'nh', # New Hampshire
33+
'nj', # New Jersey
34+
'nm', # New Mexico
35+
'ny', # New York
36+
'nc', # North Carolina
37+
'nd', # North Dakota
38+
'oh', # Ohio
39+
'ok', # Oklahoma
40+
'or', # Oregon
41+
'pa', # Pennsylvania
42+
'ri', # Rhode Island
43+
'sc', # South Carolina
44+
'sd', # South Dakota
45+
'tn', # Tennessee
46+
'tx', # Texas
47+
'ut', # Utah
48+
'vt', # Vermont
49+
'va', # Virginia
50+
'wa', # Washington
51+
'wv', # West Virginia
52+
'wi', # Wisconsin
53+
'wy', # Wyoming
5454
]

‎dataretrieval/nadp.py

+35-22
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
"""
2-
Tools for retrieving data from the National Atmospheric Deposition Program (NADP) including
3-
the National Trends Network (NTN), the Mercury Deposition Network (MDN).
2+
Tools for retrieving data from the National Atmospheric Deposition Program
3+
(NADP) including the National Trends Network (NTN), the Mercury Deposition
4+
Network (MDN).
45
56
National Trends Network
67
-----------------------
@@ -28,32 +29,43 @@
2829
2930
"""
3031

31-
import requests
32-
import zipfile
3332
import io
3433
import os
3534
import re
35+
import zipfile
3636
from os.path import basename
3737

38+
import requests
3839

3940
NADP_URL = 'https://nadp.slh.wisc.edu'
4041
NADP_MAP_EXT = 'filelib/maps'
4142

42-
NTN_CONC_PARAMS = ['pH', 'So4', 'NO3', 'NH4', 'Ca',
43-
'Mg', 'K', 'Na', 'Cl', 'Br']
44-
NTN_DEP_PARAMS = ['H', 'So4', 'NO3', 'NH4', 'Ca', 'Mg',
45-
'K', 'Na', 'Cl', 'Br', 'N', 'SPlusN']
43+
NTN_CONC_PARAMS = ['pH', 'So4', 'NO3', 'NH4', 'Ca', 'Mg', 'K', 'Na', 'Cl', 'Br']
44+
NTN_DEP_PARAMS = [
45+
'H',
46+
'So4',
47+
'NO3',
48+
'NH4',
49+
'Ca',
50+
'Mg',
51+
'K',
52+
'Na',
53+
'Cl',
54+
'Br',
55+
'N',
56+
'SPlusN',
57+
]
4658

4759
NTN_MEAS_TYPE = ['conc', 'dep', 'precip'] # concentration or deposition
4860

4961

5062
class NADP_ZipFile(zipfile.ZipFile):
51-
"""Extend zipfile.ZipFile for working on data from NADP
52-
"""
63+
"""Extend zipfile.ZipFile for working on data from NADP"""
64+
5365
def tif_name(self):
5466
"""Get the name of the tif file in the zip file."""
5567
filenames = self.namelist()
56-
r = re.compile(".*tif$")
68+
r = re.compile('.*tif$')
5769
tif_list = list(filter(r.match, filenames))
5870
return tif_list[0]
5971

@@ -93,23 +105,23 @@ def get_annual_MDN_map(measurement_type, year, path):
93105
94106
>>> # get map of mercury concentration in 2010 and extract it to a path
95107
>>> data_path = dataretrieval.nadp.get_annual_MDN_map(
96-
... measurement_type='conc', year='2010', path='somepath')
108+
... measurement_type='conc', year='2010', path='somepath'
109+
... )
97110
98111
"""
99-
url = '{}/{}/MDN/grids/'.format(NADP_URL, NADP_MAP_EXT)
112+
url = f'{NADP_URL}/{NADP_MAP_EXT}/MDN/grids/'
100113

101-
filename = 'Hg_{}_{}.zip'.format(measurement_type, year)
114+
filename = f'Hg_{measurement_type}_{year}.zip'
102115

103116
z = get_zip(url, filename)
104117

105118
if path:
106119
z.extractall(path)
107120

108-
return '{}{}{}'.format(path, os.sep, basename(filename))
121+
return f'{path}{os.sep}{basename(filename)}'
109122

110123

111-
def get_annual_NTN_map(measurement_type, measurement=None, year=None,
112-
path="."):
124+
def get_annual_NTN_map(measurement_type, measurement=None, year=None, path='.'):
113125
"""Download a NTN map from NDAP.
114126
115127
This function looks for a zip file containing gridded information at:
@@ -146,22 +158,23 @@ def get_annual_NTN_map(measurement_type, measurement=None, year=None,
146158
147159
>>> # get a map of precipitation in 2015 and extract it to a path
148160
>>> data_path = dataretrieval.nadp.get_annual_NTN_map(
149-
... measurement_type='Precip', year='2015', path='somepath')
161+
... measurement_type='Precip', year='2015', path='somepath'
162+
... )
150163
151164
"""
152-
url = '{}/{}/NTN/grids/{}/'.format(NADP_URL, NADP_MAP_EXT, year)
165+
url = f'{NADP_URL}/{NADP_MAP_EXT}/NTN/grids/{year}/'
153166

154-
filename = '{}_{}.zip'.format(measurement_type, year)
167+
filename = f'{measurement_type}_{year}.zip'
155168

156169
if measurement:
157-
filename = '{}_{}'.format(measurement, filename)
170+
filename = f'{measurement}_{filename}'
158171

159172
z = get_zip(url, filename)
160173

161174
if path:
162175
z.extractall(path)
163176

164-
return '{}{}{}'.format(path, os.sep, basename(filename))
177+
return f'{path}{os.sep}{basename(filename)}'
165178

166179

167180
def get_zip(url, filename):

‎dataretrieval/nwis.py

+355-247
Large diffs are not rendered by default.

‎dataretrieval/streamstats.py

+27-11
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
"""
77

88
import json
9+
910
import requests
1011

1112

@@ -35,12 +36,12 @@ def download_workspace(workspaceID, format=''):
3536

3637
r.raise_for_status()
3738
return r
38-
#data = r.raw.read()
39+
# data = r.raw.read()
3940

40-
#with open(filepath, 'wb') as f:
41+
# with open(filepath, 'wb') as f:
4142
# f.write(data)
4243

43-
#return
44+
# return
4445

4546

4647
def get_sample_watershed():
@@ -60,10 +61,18 @@ def get_sample_watershed():
6061
return get_watershed('NY', -74.524, 43.939)
6162

6263

63-
def get_watershed(rcode, xlocation, ylocation, crs=4326,
64-
includeparameters=True, includeflowtypes=False,
65-
includefeatures=True, simplify=True, format='geojson'):
66-
""" Get watershed object based on location
64+
def get_watershed(
65+
rcode,
66+
xlocation,
67+
ylocation,
68+
crs=4326,
69+
includeparameters=True,
70+
includeflowtypes=False,
71+
includefeatures=True,
72+
simplify=True,
73+
format='geojson',
74+
):
75+
"""Get watershed object based on location
6776
6877
**Streamstats documentation:**
6978
Returns a watershed object. The request configuration will determine the
@@ -104,10 +113,16 @@ def get_watershed(rcode, xlocation, ylocation, crs=4326,
104113
from the streamstats JSON object.
105114
106115
"""
107-
payload = {'rcode': rcode, 'xlocation': xlocation, 'ylocation': ylocation,
108-
'crs': crs, 'includeparameters': includeparameters,
109-
'includeflowtypes': includeflowtypes,
110-
'includefeatures': includefeatures, 'simplify': simplify}
116+
payload = {
117+
'rcode': rcode,
118+
'xlocation': xlocation,
119+
'ylocation': ylocation,
120+
'crs': crs,
121+
'includeparameters': includeparameters,
122+
'includeflowtypes': includeflowtypes,
123+
'includefeatures': includefeatures,
124+
'simplify': simplify,
125+
}
111126
url = 'https://streamstats.usgs.gov/streamstatsservices/watershed.geojson'
112127

113128
r = requests.get(url, params=payload)
@@ -131,6 +146,7 @@ def get_watershed(rcode, xlocation, ylocation, crs=4326,
131146

132147
class Watershed:
133148
"""Class to extract information from the streamstats JSON object."""
149+
134150
@classmethod
135151
def from_streamstats_json(cls, streamstats_json):
136152
"""Method that creates a Watershed object from a streamstats JSON."""

‎dataretrieval/utils.py

+55-46
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,14 @@
22
Useful utilities for data munging.
33
"""
44
import warnings
5+
56
import pandas as pd
67
import requests
8+
79
import dataretrieval
810
from dataretrieval.codes import tz
911

12+
1013
def to_str(listlike, delimiter=','):
1114
"""Translates list-like objects into strings.
1215
@@ -74,23 +77,25 @@ def format_datetime(df, date_field, time_field, tz_field):
7477
# create a datetime index from the columns in qwdata response
7578
df[tz_field] = df[tz_field].map(tz)
7679

77-
df['datetime'] = pd.to_datetime(df[date_field] + ' ' +
78-
df[time_field] + ' ' +
79-
df[tz_field],
80-
format='ISO8601',
81-
utc=True)
80+
df['datetime'] = pd.to_datetime(
81+
df[date_field] + ' ' + df[time_field] + ' ' + df[tz_field],
82+
format='ISO8601',
83+
utc=True,
84+
)
8285

8386
# if there are any incomplete dates, warn the user
84-
if any(pd.isna(df['datetime'])):
85-
count = sum(pd.isna(df['datetime']) == True)
87+
if df['datetime'].isna().any():
88+
count = df['datetime'].isna().sum()
8689
warnings.warn(
87-
f'Warning: {count} incomplete dates found, ' +
88-
'consider setting datetime_index to False.', UserWarning)
90+
f'Warning: {count} incomplete dates found, '
91+
+ 'consider setting datetime_index to False.',
92+
UserWarning,
93+
)
8994

9095
return df
9196

9297

93-
#This function may be deprecated once pandas.update support joins besides left.
98+
# This function may be deprecated once pandas.update support joins besides left.
9499
def update_merge(left, right, na_only=False, on=None, **kwargs):
95100
"""Performs a combination update and merge.
96101
@@ -113,30 +118,30 @@ def update_merge(left, right, na_only=False, on=None, **kwargs):
113118
add na_only parameter support
114119
115120
"""
116-
#df = left.merge(right, how='outer',
121+
# df = left.merge(right, how='outer',
117122
# left_index=True, right_index=True)
118123
df = left.merge(right, how='outer', on=on, **kwargs)
119124

120-
121125
# check for column overlap and resolve update
122126
for column in df.columns:
123-
#if duplicated column, use the value from right
127+
# if duplicated column, use the value from right
124128
if column[-2:] == '_x':
125-
name = column[:-2] # find column name
129+
name = column[:-2] # find column name
126130

127131
if na_only:
128-
df[name] = df[name+'_x'].fillna(df[name+'_y'])
132+
df[name] = df[name + '_x'].fillna(df[name + '_y'])
129133

130134
else:
131-
df[name] = df[name+'_x'].update(df[name+'_y'])
135+
df[name] = df[name + '_x'].update(df[name + '_y'])
132136

133137
df.drop([name + '_x', name + '_y'], axis=1, inplace=True)
134138

135139
return df
136140

141+
137142
class BaseMetadata:
138143
"""Base class for metadata.
139-
144+
140145
Attributes
141146
----------
142147
url : str
@@ -145,9 +150,9 @@ class BaseMetadata:
145150
Response elapsed time
146151
header: requests.structures.CaseInsensitiveDict
147152
Response headers
148-
153+
149154
"""
150-
155+
151156
def __init__(self, response) -> None:
152157
"""Generates a standard set of metadata informed by the response.
153158
@@ -168,30 +173,29 @@ def __init__(self, response) -> None:
168173
self.query_time = response.elapsed
169174
self.header = response.headers
170175
self.comment = None
171-
176+
172177
# # not sure what statistic_info is
173178
# self.statistic_info = None
174-
179+
175180
# # disclaimer seems to be only part of importWaterML1
176181
# self.disclaimer = None
177-
182+
178183
# These properties are to be set by `nwis` or `wqp`-specific metadata classes.
179184
@property
180185
def site_info(self):
181186
raise NotImplementedError(
182-
"site_info must be implemented by utils.BaseMetadata children"
187+
'site_info must be implemented by utils.BaseMetadata children'
183188
)
184-
189+
185190
@property
186191
def variable_info(self):
187192
raise NotImplementedError(
188-
"variable_info must be implemented by utils.BaseMetadata children"
193+
'variable_info must be implemented by utils.BaseMetadata children'
189194
)
190195

191-
192196
def __repr__(self) -> str:
193-
return f"{type(self).__name__}(url={self.url})"
194-
197+
return f'{type(self).__name__}(url={self.url})'
198+
195199

196200
def query(url, payload, delimiter=',', ssl_check=True):
197201
"""Send a query.
@@ -219,37 +223,40 @@ def query(url, payload, delimiter=',', ssl_check=True):
219223

220224
for key, value in payload.items():
221225
payload[key] = to_str(value, delimiter)
222-
#for index in range(len(payload)):
226+
# for index in range(len(payload)):
223227
# key, value = payload[index]
224228
# payload[index] = (key, to_str(value))
225229

226230
# define the user agent for the query
227-
user_agent = {
228-
'user-agent': f"python-dataretrieval/{dataretrieval.__version__}"}
231+
user_agent = {'user-agent': f'python-dataretrieval/{dataretrieval.__version__}'}
229232

230-
response = requests.get(url, params=payload,
231-
headers=user_agent, verify=ssl_check)
233+
response = requests.get(url, params=payload, headers=user_agent, verify=ssl_check)
232234

233235
if response.status_code == 400:
234-
raise ValueError("Bad Request, check that your parameters are correct. URL: {}".format(response.url))
236+
raise ValueError(
237+
f'Bad Request, check that your parameters are correct. URL: {response.url}'
238+
)
235239
elif response.status_code == 404:
236240
raise ValueError(
237-
"Page Not Found Error. May be the result of an empty query. " +
238-
f"URL: {response.url}")
241+
'Page Not Found Error. May be the result of an empty query. '
242+
+ f'URL: {response.url}'
243+
)
239244
elif response.status_code == 414:
240245
_reason = response.reason
241246
_example = """
242-
split_list = np.array_split(site_list, n) # n is number of chunks to divide query into \n
247+
# n is the number of chunks to divide the query into \n
248+
split_list = np.array_split(site_list, n)
243249
data_list = [] # list to store chunk results in \n
244250
# loop through chunks and make requests \n
245251
for site_list in split_list: \n
246-
data = nwis.get_record(sites=site_list, service='dv', start=start, end=end) \n
252+
data = nwis.get_record(sites=site_list, service='dv', \n
253+
start=start, end=end) \n
247254
data_list.append(data) # append results to list"""
248255
raise ValueError(
249-
"Request URL too long. Modify your query to use fewer sites. " +
250-
f"API response reason: {_reason}. Pseudo-code example of how to " +
251-
f"split your query: \n {_example}"
252-
)
256+
'Request URL too long. Modify your query to use fewer sites. '
257+
+ f'API response reason: {_reason}. Pseudo-code example of how to '
258+
+ f'split your query: \n {_example}'
259+
)
253260

254261
if response.text.startswith('No sites/data'):
255262
raise NoSitesError(response.url)
@@ -258,11 +265,13 @@ def query(url, payload, delimiter=',', ssl_check=True):
258265

259266

260267
class NoSitesError(Exception):
261-
"""Custom error class used when selection criteria returns no sites/data.
262-
"""
268+
"""Custom error class used when selection criteria returns no sites/data."""
269+
263270
def __init__(self, url):
264271
self.url = url
265272

266273
def __str__(self):
267-
return "No sites/data found using the selection criteria specified in url: {}".format(self.url)
268-
274+
return (
275+
'No sites/data found using the selection criteria specified in url: '
276+
'{url}'
277+
).format(url=self.url)

‎dataretrieval/waterwatch.py

+18-9
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,21 @@
11
from typing import Dict, List, Union
22

3-
import requests
43
import pandas as pd
4+
import requests
55

6-
ResponseFormat = "json" # json, xml
6+
ResponseFormat = 'json' # json, xml
77

88
# WaterWatch won't receive any new features but it will continue to operate.
9-
waterwatch_url = "https://waterwatch.usgs.gov/webservices/"
9+
waterwatch_url = 'https://waterwatch.usgs.gov/webservices/'
1010

1111

1212
def _read_json(data: Dict) -> pd.DataFrame:
1313
return pd.DataFrame(data).T
1414

15-
def get_flood_stage(sites: List[str] = None, fmt: str= "DF") -> Union[pd.DataFrame, Dict]:
15+
16+
def get_flood_stage(
17+
sites: List[str] = None, fmt: str = 'DF'
18+
) -> Union[pd.DataFrame, Dict]:
1619
"""
1720
Retrieves flood stages for a list of station numbers.
1821
@@ -37,7 +40,10 @@ def get_flood_stage(sites: List[str] = None, fmt: str= "DF") -> Union[pd.DataFra
3740
>> stations = ["07144100", "07144101"]
3841
>> res = get_flood_stage(stations, fmt="dict") # dictionary output
3942
>> print(res)
40-
{'07144100': {'action_stage': '20', 'flood_stage': '22', 'moderate_flood_stage': '25', 'major_flood_stage': '26'},
43+
{'07144100': {'action_stage': '20',
44+
'flood_stage': '22',
45+
'moderate_flood_stage': '25',
46+
'major_flood_stage': '26'},
4147
'07144101': None}
4248
>> print(get_flood_stage(stations))
4349
>> print(res)
@@ -47,13 +53,16 @@ def get_flood_stage(sites: List[str] = None, fmt: str= "DF") -> Union[pd.DataFra
4753
50057000 16 20 24 30
4854
4955
"""
50-
res = requests.get(waterwatch_url + 'floodstage', params={"format": ResponseFormat})
56+
res = requests.get(waterwatch_url + 'floodstage', params={'format': ResponseFormat})
5157

5258
if res.ok:
5359
json_res = res.json()
54-
stages = {site['site_no']: {k: v for k, v in site.items() if k != 'site_no'} for site in json_res['sites']}
60+
stages = {
61+
site['site_no']: {k: v for k, v in site.items() if k != 'site_no'}
62+
for site in json_res['sites']
63+
}
5564
else:
56-
raise requests.RequestException(f"[{res.status_code}] - {res.reason}")
65+
raise requests.RequestException(f'[{res.status_code}] - {res.reason}')
5766

5867
if not sites:
5968
stations_stages = stages
@@ -65,7 +74,7 @@ def get_flood_stage(sites: List[str] = None, fmt: str= "DF") -> Union[pd.DataFra
6574
except KeyError:
6675
stations_stages[site] = None
6776

68-
if fmt == "dict":
77+
if fmt == 'dict':
6978
return stations_stages
7079
else:
7180
return _read_json(stations_stages)

‎dataretrieval/wqp.py

+29-26
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,12 @@
88
- implement other services like Organization, Activity, etc.
99
1010
"""
11-
import pandas as pd
12-
from io import StringIO
13-
from .utils import query, BaseMetadata
1411
import warnings
12+
from io import StringIO
13+
14+
import pandas as pd
15+
16+
from .utils import BaseMetadata, query
1517

1618

1719
def get_results(ssl_check=True, **kwargs):
@@ -73,16 +75,15 @@ def get_results(ssl_check=True, **kwargs):
7375
7476
>>> # Get results within a radial distance of a point
7577
>>> df, md = dataretrieval.wqp.get_results(
76-
... lat='44.2', long='-88.9', within='0.5')
78+
... lat='44.2', long='-88.9', within='0.5'
79+
... )
7780
7881
>>> # Get results within a bounding box
79-
>>> df, md = dataretrieval.wqp.get_results(
80-
... bBox='-92.8,44.2,-88.9,46.0')
82+
>>> df, md = dataretrieval.wqp.get_results(bBox='-92.8,44.2,-88.9,46.0')
8183
8284
"""
8385
kwargs = _alter_kwargs(kwargs)
84-
response = query(wqp_url('Result'), kwargs, delimiter=';',
85-
ssl_check=ssl_check)
86+
response = query(wqp_url('Result'), kwargs, delimiter=';', ssl_check=ssl_check)
8687

8788
df = pd.read_csv(StringIO(response.text), delimiter=',')
8889
return df, WQP_Metadata(response)
@@ -112,7 +113,8 @@ def what_sites(ssl_check=True, **kwargs):
112113
113114
>>> # Get sites within a radial distance of a point
114115
>>> df, md = dataretrieval.wqp.what_sites(
115-
... lat='44.2', long='-88.9', within='2.5')
116+
... lat='44.2', long='-88.9', within='2.5'
117+
... )
116118
117119
"""
118120
kwargs = _alter_kwargs(kwargs)
@@ -222,8 +224,8 @@ def what_activities(ssl_check=True, **kwargs):
222224
>>> # Get activities within Washington D.C.
223225
>>> # during a specific time period
224226
>>> df, md = dataretrieval.wqp.what_activities(
225-
... statecode='US:11', startDateLo='12-30-2019',
226-
... startDateHi='01-01-2020')
227+
... statecode='US:11', startDateLo='12-30-2019', startDateHi='01-01-2020'
228+
... )
227229
228230
"""
229231
kwargs = _alter_kwargs(kwargs)
@@ -262,8 +264,11 @@ def what_detection_limits(ssl_check=True, **kwargs):
262264
>>> # Get detection limits for Nitrite measurements in Rhode Island
263265
>>> # between specific dates
264266
>>> df, md = dataretrieval.wqp.what_detection_limits(
265-
... statecode='US:44', characteristicName='Nitrite',
266-
... startDateLo='01-01-2021', startDateHi='02-20-2021')
267+
... statecode='US:44',
268+
... characteristicName='Nitrite',
269+
... startDateLo='01-01-2021',
270+
... startDateHi='02-20-2021',
271+
... )
267272
268273
"""
269274
kwargs = _alter_kwargs(kwargs)
@@ -299,8 +304,7 @@ def what_habitat_metrics(ssl_check=True, **kwargs):
299304
.. code::
300305
301306
>>> # Get habitat metrics for a state (Rhode Island in this case)
302-
>>> df, md = dataretrieval.wqp.what_habitat_metrics(
303-
... statecode='US:44')
307+
>>> df, md = dataretrieval.wqp.what_habitat_metrics(statecode='US:44')
304308
305309
"""
306310
kwargs = _alter_kwargs(kwargs)
@@ -338,8 +342,8 @@ def what_project_weights(ssl_check=True, **kwargs):
338342
>>> # Get project weights for a state (North Dakota in this case)
339343
>>> # within a set time period
340344
>>> df, md = dataretrieval.wqp.what_project_weights(
341-
... statecode='US:38', startDateLo='01-01-2006',
342-
... startDateHi='01-01-2009')
345+
... statecode='US:38', startDateLo='01-01-2006', startDateHi='01-01-2009'
346+
... )
343347
344348
"""
345349
kwargs = _alter_kwargs(kwargs)
@@ -377,8 +381,8 @@ def what_activity_metrics(ssl_check=True, **kwargs):
377381
>>> # Get activity metrics for a state (North Dakota in this case)
378382
>>> # within a set time period
379383
>>> df, md = dataretrieval.wqp.what_activity_metrics(
380-
... statecode='US:38', startDateLo='07-01-2006',
381-
... startDateHi='12-01-2006')
384+
... statecode='US:38', startDateLo='07-01-2006', startDateHi='12-01-2006'
385+
... )
382386
383387
"""
384388
kwargs = _alter_kwargs(kwargs)
@@ -392,15 +396,14 @@ def what_activity_metrics(ssl_check=True, **kwargs):
392396

393397

394398
def wqp_url(service):
395-
"""Construct the WQP URL for a given service.
396-
"""
399+
"""Construct the WQP URL for a given service."""
397400
base_url = 'https://www.waterqualitydata.us/data/'
398-
return '{}{}/Search?'.format(base_url, service)
401+
return f'{base_url}{service}/Search?'
399402

400403

401404
class WQP_Metadata(BaseMetadata):
402405
"""Metadata class for WQP service, derived from BaseMetadata.
403-
406+
404407
Attributes
405408
----------
406409
url : str
@@ -418,12 +421,12 @@ class WQP_Metadata(BaseMetadata):
418421
def __init__(self, response, **parameters) -> None:
419422
"""Generates a standard set of metadata informed by the response with specific
420423
metadata for WQP data.
421-
424+
422425
Parameters
423426
----------
424427
response: Response
425428
Response object from requests module
426-
429+
427430
parameters: unpacked dictionary
428431
Unpacked dictionary of the parameters supplied in the request
429432
@@ -433,7 +436,7 @@ def __init__(self, response, **parameters) -> None:
433436
A ``dataretrieval`` custom :obj:`dataretrieval.wqp.WQP_Metadata` object.
434437
435438
"""
436-
439+
437440
super().__init__(response)
438441

439442
self._parameters = parameters

‎pyproject.toml

+6
Original file line numberDiff line numberDiff line change
@@ -41,3 +41,9 @@ repository = "https://github.com/DOI-USGS/dataretrieval-python.git"
4141

4242
[tool.setuptools_scm]
4343
write_to = "dataretrieval/_version.py"
44+
45+
[tool.isort]
46+
profile = "black"
47+
48+
[tool.black]
49+
skip-string-normalization = true

0 commit comments

Comments
 (0)
Please sign in to comment.