ruff: enable RUF ruleset

This commit is contained in:
Dima Gerasimov 2024-08-28 00:00:54 +01:00 committed by karlicoss
parent 664c40e3e8
commit b594377a59
15 changed files with 31 additions and 27 deletions

View file

@ -81,7 +81,7 @@ def entries() -> Iterable[Entry]:
cmds = [base] # rely on default cmds = [base] # rely on default
else: else:
# otherwise, 'merge' them # otherwise, 'merge' them
cmds = [base + ['--logfile', f] for f in inps] cmds = [[*base, '--logfile', f] for f in inps]
import ijson.backends.yajl2_cffi as ijson # type: ignore import ijson.backends.yajl2_cffi as ijson # type: ignore
from subprocess import Popen, PIPE from subprocess import Popen, PIPE

View file

@ -104,7 +104,7 @@ def measurements() -> Iterable[Res[Measurement]]:
f'SELECT "{path.name}" as name, Time, Temperature, Humidity, Pressure, Dewpoint FROM data ORDER BY log_index' f'SELECT "{path.name}" as name, Time, Temperature, Humidity, Pressure, Dewpoint FROM data ORDER BY log_index'
) )
oldfmt = True oldfmt = True
db_dts = list(db.execute('SELECT last_download FROM info'))[0][0] [(db_dts,)] = db.execute('SELECT last_download FROM info')
if db_dts == 'N/A': if db_dts == 'N/A':
# ??? happens for 20180923-20180928 # ??? happens for 20180923-20180928
continue continue
@ -137,7 +137,7 @@ def measurements() -> Iterable[Res[Measurement]]:
processed_tables |= set(log_tables) processed_tables |= set(log_tables)
# todo use later? # todo use later?
frequencies = [list(db.execute(f'SELECT interval from {t.replace("_log", "_meta")}'))[0][0] for t in log_tables] frequencies = [list(db.execute(f'SELECT interval from {t.replace("_log", "_meta")}'))[0][0] for t in log_tables] # noqa: RUF015
# todo could just filter out the older datapoints?? dunno. # todo could just filter out the older datapoints?? dunno.

View file

@ -392,7 +392,7 @@ def module_install(*, user: bool, module: Sequence[str], parallel: bool=False, b
# I think it only helps for pypi artifacts (not git!), # I think it only helps for pypi artifacts (not git!),
# and only if they weren't cached # and only if they weren't cached
for r in requirements: for r in requirements:
cmds.append(pre_cmd + [r]) cmds.append([*pre_cmd, r])
else: else:
if parallel: if parallel:
warning('parallel install is not supported on this platform, installing sequentially...') warning('parallel install is not supported on this platform, installing sequentially...')

View file

@ -153,7 +153,7 @@ def test_sort_res_by() -> None:
Exc('last'), Exc('last'),
] ]
results2 = sort_res_by(ress + [0], lambda x: int(x)) results2 = sort_res_by([*ress, 0], lambda x: int(x))
assert results2 == [Exc('last'), 0] + results[:-1] assert results2 == [Exc('last'), 0] + results[:-1]
assert sort_res_by(['caba', 'a', 'aba', 'daba'], key=lambda x: len(x)) == ['a', 'aba', 'caba', 'daba'] assert sort_res_by(['caba', 'a', 'aba', 'daba'], key=lambda x: len(x)) == ['a', 'aba', 'caba', 'daba']
@ -166,7 +166,7 @@ def test_sort_res_by() -> None:
def set_error_datetime(e: Exception, dt: Optional[datetime]) -> None: def set_error_datetime(e: Exception, dt: Optional[datetime]) -> None:
if dt is None: if dt is None:
return return
e.args = e.args + (dt,) e.args = (*e.args, dt)
# todo not sure if should return new exception? # todo not sure if should return new exception?

View file

@ -61,7 +61,7 @@ def import_source(
warnings.warn(f"""If you don't want to use this module, to hide this message, add '{module_name}' to your core config disabled_modules in your config, like: warnings.warn(f"""If you don't want to use this module, to hide this message, add '{module_name}' to your core config disabled_modules in your config, like:
class core: class core:
disabled_modules = [{repr(module_name)}] disabled_modules = [{module_name!r}]
""") """)
# try to check if this is a config error or based on dependencies not being installed # try to check if this is a config error or based on dependencies not being installed
if isinstance(err, (ImportError, AttributeError)): if isinstance(err, (ImportError, AttributeError)):

View file

@ -67,21 +67,21 @@ def match_structure(
export_dir export_dir
exp_2020 exp_2020
   channel_data channel_data
      data1 data1
      data2 data2
   index.json index.json
   messages messages
      messages.csv messages.csv
   profile profile
   settings.json settings.json
exp_2021 exp_2021
channel_data channel_data
   data1 data1
   data2 data2
index.json index.json
messages messages
   messages.csv messages.csv
profile profile
settings.json settings.json

View file

@ -21,7 +21,7 @@ def user_forced() -> Sequence[str]:
def _abbr_to_timezone_map() -> Dict[str, pytz.BaseTzInfo]: def _abbr_to_timezone_map() -> Dict[str, pytz.BaseTzInfo]:
# also force UTC to always correspond to utc # also force UTC to always correspond to utc
# this makes more sense than Zulu it ends up by default # this makes more sense than Zulu it ends up by default
timezones = pytz.all_timezones + ['UTC'] + list(user_forced()) timezones = [*pytz.all_timezones, 'UTC', *user_forced()]
res: Dict[str, pytz.BaseTzInfo] = {} res: Dict[str, pytz.BaseTzInfo] = {}
for tzname in timezones: for tzname in timezones:

View file

@ -74,7 +74,7 @@ def _discover_path_importables(pkg_pth: Path, pkg_name: str) -> Iterable[HPIModu
continue continue
rel_pt = pkg_dir_path.relative_to(pkg_pth) rel_pt = pkg_dir_path.relative_to(pkg_pth)
pkg_pref = '.'.join((pkg_name, ) + rel_pt.parts) pkg_pref = '.'.join((pkg_name, *rel_pt.parts))
yield from _walk_packages( yield from _walk_packages(
(str(pkg_dir_path), ), prefix=f'{pkg_pref}.', (str(pkg_dir_path), ), prefix=f'{pkg_pref}.',

View file

@ -26,7 +26,7 @@ class Helper:
assert actual == expected, (key, actual, expected) assert actual == expected, (key, actual, expected)
def zoom(self, key: str) -> 'Helper': def zoom(self, key: str) -> 'Helper':
return self.manager.helper(item=self.item.pop(key), path=self.path + (key,)) return self.manager.helper(item=self.item.pop(key), path=(*self.path, key))
def is_empty(x) -> bool: def is_empty(x) -> bool:

View file

@ -122,7 +122,7 @@ class TakeoutHTMLParser(HTMLParser):
# JamiexxVEVO # JamiexxVEVO
# Jun 21, 2018, 5:48:34 AM # Jun 21, 2018, 5:48:34 AM
# Products: # Products:
# YouTube # YouTube
def handle_data(self, data): def handle_data(self, data):
if self.state == State.OUTSIDE: if self.state == State.OUTSIDE:
if data[:-1].strip() in ("Watched", "Visited"): if data[:-1].strip() in ("Watched", "Visited"):

View file

@ -70,7 +70,7 @@ def locations_to_gpx(locations: Iterable[LocationProtocol], buffer: TextIO) -> I
) )
except AttributeError: except AttributeError:
yield TypeError( yield TypeError(
f"Expected a Location or Location-like object, got {type(location)} {repr(location)}" f"Expected a Location or Location-like object, got {type(location)} {location!r}"
) )
continue continue
gpx_segment.points.append(point) gpx_segment.points.append(point)

View file

@ -209,7 +209,7 @@ def print_all() -> None:
if isinstance(p, Exception): if isinstance(p, Exception):
print('ERROR!', p) print('ERROR!', p)
else: else:
print(f"{str(p.dt):25} {p.path} {p.geo}") print(f"{p.dt!s:25} {p.path} {p.geo}")
# todo cachew -- improve AttributeError: type object 'tuple' has no attribute '__annotations__' -- improve errors? # todo cachew -- improve AttributeError: type object 'tuple' has no attribute '__annotations__' -- improve errors?
# todo cachew -- invalidate if function code changed? # todo cachew -- invalidate if function code changed?

View file

@ -27,7 +27,7 @@ class polar(user_config):
''' '''
Polar config is optional, you only need it if you want to specify custom 'polar_dir' Polar config is optional, you only need it if you want to specify custom 'polar_dir'
''' '''
polar_dir: PathIsh = Path('~/.polar').expanduser() polar_dir: PathIsh = Path('~/.polar').expanduser() # noqa: RUF009
defensive: bool = True # pass False if you want it to fail faster on errors (useful for debugging) defensive: bool = True # pass False if you want it to fail faster on errors (useful for debugging)

View file

@ -155,7 +155,7 @@ _SELECT_OWN_TWEETS = '_SELECT_OWN_TWEETS'
def get_own_user_id(conn) -> str: def get_own_user_id(conn) -> str:
# unclear what's the reliable way to query it, so we use multiple different ones and arbitrate # unclear what's the reliable way to query it, so we use multiple different ones and arbitrate
# NOTE: 'SELECT DISTINCT ev_owner_id FROM lists' doesn't work, might include lists from other people? # NOTE: 'SELECT DISTINCT ev_owner_id FROM lists' doesn't work, might include lists from other people?
res = set() res: Set[str] = set()
for q in [ for q in [
'SELECT DISTINCT list_mapping_user_id FROM list_mapping', 'SELECT DISTINCT list_mapping_user_id FROM list_mapping',
'SELECT DISTINCT owner_id FROM cursors', 'SELECT DISTINCT owner_id FROM cursors',
@ -164,7 +164,8 @@ def get_own_user_id(conn) -> str:
for (r,) in conn.execute(q): for (r,) in conn.execute(q):
res.add(r) res.add(r)
assert len(res) == 1, res assert len(res) == 1, res
return str(list(res)[0]) [r] = res
return r
# NOTE: # NOTE:

View file

@ -6,6 +6,7 @@ lint.extend-select = [
"C4", # flake8-comprehensions -- unnecessary list/map/dict calls "C4", # flake8-comprehensions -- unnecessary list/map/dict calls
"UP", # detect deprecated python stdlib stuff "UP", # detect deprecated python stdlib stuff
"FBT", # detect use of boolean arguments "FBT", # detect use of boolean arguments
"RUF", # various ruff-specific rules
] ]
lint.ignore = [ lint.ignore = [
@ -38,4 +39,6 @@ lint.ignore = [
"UP006", # use type instead of Type "UP006", # use type instead of Type
"UP007", # use X | Y instead of Union "UP007", # use X | Y instead of Union
### ###
"RUF100", # unused noqa -- handle later
"RUF012", # mutable class attrs should be annotated with ClassVar... ugh pretty annoying for user configs
] ]