Skip to content

Commit

Permalink
format with black
Browse files Browse the repository at this point in the history
  • Loading branch information
sheppard committed Jun 6, 2023
1 parent f2d437b commit 3f8f2bb
Show file tree
Hide file tree
Showing 21 changed files with 323 additions and 267 deletions.
90 changes: 41 additions & 49 deletions itertable/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
load_url,
load_string,
guess_type,
flattened
flattened,
)

try:
Expand All @@ -42,51 +42,42 @@


__all__ = (
'BaseIter',

'BaseLoader',
'FileLoader',
'Zipper',
'ZipFileLoader',
'StringLoader',
'NetLoader',
'ZipNetLoader',

'CsvParser',
'JsonParser',
'XmlParser',
'ExcelParser',
'OldExcelParser',

'BaseMapper',
'DictMapper',
'TupleMapper',
'TimeSeriesMapper',
'make_date_mapper',

'make_iter',
'load_file',
'load_url',
'load_string',
'guess_type',
'flattened',

'VERSION',

'CsvFileIter',
'CsvNetIter',
'CsvStringIter',

'JsonFileIter',
'JsonNetIter',
'JsonStringIter',

'XmlFileIter',
'XmlNetIter',
'XmlStringIter',

'OldExcelFileIter',
'ExcelFileIter',
"BaseIter",
"BaseLoader",
"FileLoader",
"Zipper",
"ZipFileLoader",
"StringLoader",
"NetLoader",
"ZipNetLoader",
"CsvParser",
"JsonParser",
"XmlParser",
"ExcelParser",
"OldExcelParser",
"BaseMapper",
"DictMapper",
"TupleMapper",
"TimeSeriesMapper",
"make_date_mapper",
"make_iter",
"load_file",
"load_url",
"load_string",
"guess_type",
"flattened",
"VERSION",
"CsvFileIter",
"CsvNetIter",
"CsvStringIter",
"JsonFileIter",
"JsonNetIter",
"JsonStringIter",
"XmlFileIter",
"XmlNetIter",
"XmlStringIter",
"OldExcelFileIter",
"ExcelFileIter",
)

# Some useful pre-mixed classes
Expand All @@ -109,10 +100,11 @@

try:
from .gis import GisIter, ShapeIter, WktIter

__all__ += (
'GisIter',
'ShapeIter',
'WktIter',
"GisIter",
"ShapeIter",
"WktIter",
)
except ImportError:
pass
2 changes: 1 addition & 1 deletion itertable/__main__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from .commands import cat


if __name__ == '__main__':
if __name__ == "__main__":
cat()
25 changes: 13 additions & 12 deletions itertable/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@ def refresh(self):
if self.parsed:
return

if getattr(self, 'empty_file', False):
if getattr(self, "empty_file", False):
self.data = []
else:
self.parse()
if hasattr(self, 'file'):
if hasattr(self, "file"):
f = self.file
if hasattr(f, 'close') and not getattr(f, 'closed', False):
if hasattr(f, "close") and not getattr(f, "closed", False):
f.close()

self.parsed = True
Expand All @@ -47,13 +47,13 @@ def parse(self):
pass

def dump(self, file=None):
""
""""""
if file is None:
file = self.file
file.write(str(self.data))

def save(self):
""
""""""
self.dump(self.file)

field_names = None
Expand All @@ -65,12 +65,12 @@ def get_field_names(self):
if self.field_names is not None:
# Support specifying field_names as string (like namedtuple does)
if isinstance(self.field_names, str):
return self.field_names.replace(',', ' ').split()
return self.field_names.replace(",", " ").split()
else:
return self.field_names

# If no defined field names, try to retrieve from data
if not getattr(self, 'data', None):
if not getattr(self, "data", None):
return None

if self._auto_field_names:
Expand Down Expand Up @@ -110,7 +110,7 @@ def compute_index(self, recompute=False):
if key_field is None:
return None

if getattr(self, '_index_cache', None) is not None and not recompute:
if getattr(self, "_index_cache", None) is not None and not recompute:
return self._index_cache

index = {}
Expand Down Expand Up @@ -202,10 +202,10 @@ def copy(self, other, save=True):

def get_no_pickle(self):
return (
self.no_pickle +
self.no_pickle_loader +
self.no_pickle_mapper +
self.no_pickle_parser
self.no_pickle
+ self.no_pickle_loader
+ self.no_pickle_mapper
+ self.no_pickle_parser
)

def __getstate__(self):
Expand All @@ -222,6 +222,7 @@ def item_dict(self, item):

def as_dataframe(self):
from pandas import DataFrame

key = self.get_key_field()
if key:
data = [self.item_dict(row) for row in self.values()]
Expand Down
16 changes: 8 additions & 8 deletions itertable/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@


@click.command()
@click.argument('source')
@click.argument('source_options', required=False)
@click.option('--format', '-f', default='csv', help='Output format')
@click.argument("source")
@click.argument("source_options", required=False)
@click.option("--format", "-f", default="csv", help="Output format")
def cat(source, source_options, format):
"""
Display contents of a file or IterTable class. SOURCE can be either a
Expand All @@ -31,8 +31,8 @@ def cat(source, source_options, format):
# Parse option string
options = {}
if source_options:
for opt in source_options.split(','):
key, val = opt.split('=')
for opt in source_options.split(","):
key, val = opt.split("=")
if val.isdigit():
val = int(val)
options[key] = val
Expand All @@ -42,13 +42,13 @@ def cat(source, source_options, format):
input = load_file(source, options=options)
except IterException as e:
raise click.ClickException(str(e))
elif 'http' in source and '://' in source:
elif "http" in source and "://" in source:
try:
input = load_url(source, options=options)
except IterException as e:
raise click.ClickException(str(e))
else:
parts = source.split('.')
parts = source.split(".")
class_name = parts[-1]
module_name = ".".join(parts[:-1])
try:
Expand All @@ -69,5 +69,5 @@ def cat(source, source_options, format):
output.save()
result = output.string
if output.binary:
result = result.decode('utf-8')
result = result.decode("utf-8")
print(result)
7 changes: 5 additions & 2 deletions itertable/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,13 @@ def __str__(self):
if self.args and self.args[0]:
text = self.args[0]
has_html = False
for tag in '<html', '<body', '<div':
for tag in "<html", "<body", "<div":
if tag in text or tag.upper() in text:
has_html = True
if has_html and BeautifulSoup:
html = BeautifulSoup(text).body
if html:
text = html.get_text('\n')
text = html.get_text("\n")
return text
elif self.code is not None:
return "%s Error" % self.code
Expand All @@ -38,14 +38,17 @@ def __str__(self):

class ParseFailed(IterException):
"""Error parsing data!"""

pass


class MappingFailed(IterException):
"""Error processing data!"""

pass


class NoData(IterException):
"""No data returned!"""

pass
10 changes: 5 additions & 5 deletions itertable/gis/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,18 @@ class MetaSyncIter(BaseIter):
"""
Custom sync() to handle transfering Fiona metadata (except for driver)
"""

def sync(self, other, save=True):
driver = other.meta.get('driver', None)
driver = other.meta.get("driver", None)
other.meta = self.meta.copy()
if driver:
other.meta['driver'] = driver
other.meta["driver"] = driver
super(MetaSyncIter, self).sync(other, save)

def get_field_names(self):
if self.field_names is None and self.meta is not None:
return (
['id', 'geometry']
+ list(self.meta['schema']['properties'].keys())
return ["id", "geometry"] + list(
self.meta["schema"]["properties"].keys()
)
return super(MetaSyncIter, self).get_field_names()

Expand Down
Loading

0 comments on commit 3f8f2bb

Please sign in to comment.