ruff: enable FBT rules to detect boolean arguments use without kwargs
This commit is contained in:
parent
118c2d4484
commit
664c40e3e8
19 changed files with 50 additions and 39 deletions
|
@ -7,7 +7,7 @@ class Combine:
|
|||
self.modules = modules
|
||||
|
||||
@cdf
|
||||
def dataframe(self, with_temperature: bool=True) -> DataFrameT:
|
||||
def dataframe(self, *, with_temperature: bool=True) -> DataFrameT:
|
||||
import pandas as pd
|
||||
# todo include 'source'?
|
||||
df = pd.concat([m.dataframe() for m in self.modules])
|
||||
|
|
|
@ -438,7 +438,7 @@ def _ui_getchar_pick(choices: Sequence[str], prompt: str = 'Select from: ') -> i
|
|||
return result_map[ch]
|
||||
|
||||
|
||||
def _locate_functions_or_prompt(qualified_names: List[str], prompt: bool = True) -> Iterable[Callable[..., Any]]:
|
||||
def _locate_functions_or_prompt(qualified_names: List[str], *, prompt: bool = True) -> Iterable[Callable[..., Any]]:
|
||||
from .query import QueryException, locate_qualified_function
|
||||
from .stats import is_data_provider
|
||||
|
||||
|
@ -588,7 +588,7 @@ def query_hpi_functions(
|
|||
|
||||
@click.group()
|
||||
@click.option("--debug", is_flag=True, default=False, help="Show debug logs")
|
||||
def main(debug: bool) -> None:
|
||||
def main(*, debug: bool) -> None:
|
||||
'''
|
||||
Human Programming Interface
|
||||
|
||||
|
@ -637,7 +637,7 @@ def _module_autocomplete(ctx: click.Context, args: Sequence[str], incomplete: st
|
|||
@click.option('-q', '--quick', is_flag=True, help='Only run partial checks (first 100 items)')
|
||||
@click.option('-S', '--skip-config-check', 'skip_conf', is_flag=True, help='Skip configuration check')
|
||||
@click.argument('MODULE', nargs=-1, required=False, shell_complete=_module_autocomplete)
|
||||
def doctor_cmd(verbose: bool, list_all: bool, quick: bool, skip_conf: bool, module: Sequence[str]) -> None:
|
||||
def doctor_cmd(*, verbose: bool, list_all: bool, quick: bool, skip_conf: bool, module: Sequence[str]) -> None:
|
||||
'''
|
||||
Run various checks
|
||||
|
||||
|
@ -671,7 +671,7 @@ def config_create_cmd() -> None:
|
|||
|
||||
@main.command(name='modules', short_help='list available modules')
|
||||
@click.option('--all', 'list_all', is_flag=True, help='List all modules, including disabled')
|
||||
def module_cmd(list_all: bool) -> None:
|
||||
def module_cmd(*, list_all: bool) -> None:
|
||||
'''List available modules'''
|
||||
list_modules(list_all=list_all)
|
||||
|
||||
|
@ -684,7 +684,7 @@ def module_grp() -> None:
|
|||
|
||||
@module_grp.command(name='requires', short_help='print module reqs')
|
||||
@click.argument('MODULES', shell_complete=_module_autocomplete, nargs=-1, required=True)
|
||||
def module_requires_cmd(modules: Sequence[str]) -> None:
|
||||
def module_requires_cmd(*, modules: Sequence[str]) -> None:
|
||||
'''
|
||||
Print MODULES requirements
|
||||
|
||||
|
@ -701,7 +701,7 @@ def module_requires_cmd(modules: Sequence[str]) -> None:
|
|||
is_flag=True,
|
||||
help='Bypass PEP 668 and install dependencies into the system-wide python package directory.')
|
||||
@click.argument('MODULES', shell_complete=_module_autocomplete, nargs=-1, required=True)
|
||||
def module_install_cmd(user: bool, parallel: bool, break_system_packages: bool, modules: Sequence[str]) -> None:
|
||||
def module_install_cmd(*, user: bool, parallel: bool, break_system_packages: bool, modules: Sequence[str]) -> None:
|
||||
'''
|
||||
Install dependencies for modules using pip
|
||||
|
||||
|
@ -782,6 +782,7 @@ def module_install_cmd(user: bool, parallel: bool, break_system_packages: bool,
|
|||
help='ignore any errors returned as objects from the functions')
|
||||
@click.argument('FUNCTION_NAME', nargs=-1, required=True, shell_complete=_module_autocomplete)
|
||||
def query_cmd(
|
||||
*,
|
||||
function_name: Sequence[str],
|
||||
output: str,
|
||||
stream: bool,
|
||||
|
|
|
@ -28,6 +28,7 @@ DEFAULT_GLOB = '*'
|
|||
def get_files(
|
||||
pp: Paths,
|
||||
glob: str=DEFAULT_GLOB,
|
||||
*,
|
||||
sort: bool=True,
|
||||
guess_compression: bool=True,
|
||||
) -> Tuple[Path, ...]:
|
||||
|
|
|
@ -96,6 +96,7 @@ class DenyList:
|
|||
def filter(
|
||||
self,
|
||||
itr: Iterator[T],
|
||||
*,
|
||||
invert: bool = False,
|
||||
) -> Iterator[T]:
|
||||
denyf = functools.partial(self._allow, deny_map=self.load())
|
||||
|
@ -103,7 +104,7 @@ class DenyList:
|
|||
return filter(lambda x: not denyf(x), itr)
|
||||
return filter(denyf, itr)
|
||||
|
||||
def deny(self, key: str, value: Any, write: bool = False) -> None:
|
||||
def deny(self, key: str, value: Any, *, write: bool = False) -> None:
|
||||
'''
|
||||
add a key/value pair to the denylist
|
||||
'''
|
||||
|
@ -111,7 +112,7 @@ class DenyList:
|
|||
self._load()
|
||||
self._deny_raw({key: self._stringify_value(value)}, write=write)
|
||||
|
||||
def _deny_raw(self, data: Dict[str, Any], write: bool = False) -> None:
|
||||
def _deny_raw(self, data: Dict[str, Any], *, write: bool = False) -> None:
|
||||
self._deny_raw_list.append(data)
|
||||
if write:
|
||||
self.write()
|
||||
|
|
|
@ -135,7 +135,7 @@ def main() -> None:
|
|||
@main.command(name='populate', short_help='populate influxdb')
|
||||
@click.option('--reset', is_flag=True, help='Reset Influx measurements before inserting', show_default=True)
|
||||
@click.argument('FUNCTION_NAME', type=str, required=True)
|
||||
def populate(function_name: str, reset: bool) -> None:
|
||||
def populate(*, function_name: str, reset: bool) -> None:
|
||||
from .__main__ import _locate_functions_or_prompt
|
||||
[provider] = list(_locate_functions_or_prompt([function_name]))
|
||||
# todo could have a non-interactive version which populates from all data sources for the provider?
|
||||
|
|
|
@ -131,7 +131,7 @@ class UnconsumedError(Exception):
|
|||
|
||||
# TODO think about error policy later...
|
||||
@contextmanager
|
||||
def wrap(j, throw=True) -> Iterator[Zoomable]:
|
||||
def wrap(j, *, throw=True) -> Iterator[Zoomable]:
|
||||
w, children = _wrap(j)
|
||||
|
||||
yield w
|
||||
|
|
|
@ -132,6 +132,7 @@ def attribute_func(obj: T, where: Where, default: Optional[U] = None) -> Optiona
|
|||
|
||||
def _generate_order_by_func(
|
||||
obj_res: Res[T],
|
||||
*,
|
||||
key: Optional[str] = None,
|
||||
where_function: Optional[Where] = None,
|
||||
default: Optional[U] = None,
|
||||
|
@ -274,6 +275,7 @@ def _wrap_unsorted(itr: Iterator[ET], orderfunc: OrderFunc) -> Tuple[Iterator[Un
|
|||
# the second being items for which orderfunc returned a non-none value
|
||||
def _handle_unsorted(
|
||||
itr: Iterator[ET],
|
||||
*,
|
||||
orderfunc: OrderFunc,
|
||||
drop_unsorted: bool,
|
||||
wrap_unsorted: bool
|
||||
|
@ -503,7 +505,12 @@ Will attempt to call iter() on the value""")
|
|||
# note: can't just attach sort unsortable values in the same iterable as the
|
||||
# other items because they don't have any lookups for order_key or functions
|
||||
# to handle items in the order_by_lookup dictionary
|
||||
unsortable, itr = _handle_unsorted(itr, order_by_chosen, drop_unsorted, wrap_unsorted)
|
||||
unsortable, itr = _handle_unsorted(
|
||||
itr,
|
||||
orderfunc=order_by_chosen,
|
||||
drop_unsorted=drop_unsorted,
|
||||
wrap_unsorted=wrap_unsorted,
|
||||
)
|
||||
|
||||
# run the sort, with the computed order by function
|
||||
itr = iter(sorted(itr, key=order_by_chosen, reverse=reverse)) # type: ignore[arg-type]
|
||||
|
|
|
@ -30,7 +30,7 @@ Stats = Dict[str, Any]
|
|||
|
||||
|
||||
class StatsFun(Protocol):
|
||||
def __call__(self, quick: bool = False) -> Stats: ...
|
||||
def __call__(self, *, quick: bool = False) -> Stats: ...
|
||||
|
||||
|
||||
# global state that turns on/off quick stats
|
||||
|
@ -176,7 +176,7 @@ def guess_stats(module: ModuleType) -> Optional[StatsFun]:
|
|||
if len(providers) == 0:
|
||||
return None
|
||||
|
||||
def auto_stats(quick: bool = False) -> Stats:
|
||||
def auto_stats(*, quick: bool = False) -> Stats:
|
||||
res = {}
|
||||
for k, v in providers.items():
|
||||
res.update(stat(v, quick=quick, name=k))
|
||||
|
@ -355,7 +355,7 @@ def _stat_item(item):
|
|||
return _guess_datetime(item)
|
||||
|
||||
|
||||
def _stat_iterable(it: Iterable[Any], quick: bool = False) -> Stats:
|
||||
def _stat_iterable(it: Iterable[Any], *, quick: bool = False) -> Stats:
|
||||
from more_itertools import first, ilen, take
|
||||
|
||||
# todo not sure if there is something in more_itertools to compute this?
|
||||
|
|
|
@ -12,7 +12,7 @@ from .logging import make_logger
|
|||
logger = make_logger(__name__, level="info")
|
||||
|
||||
|
||||
def _structure_exists(base_dir: Path, paths: Sequence[str], partial: bool = False) -> bool:
|
||||
def _structure_exists(base_dir: Path, paths: Sequence[str], *, partial: bool = False) -> bool:
|
||||
"""
|
||||
Helper function for match_structure to check if
|
||||
all subpaths exist at some base directory
|
||||
|
|
|
@ -47,5 +47,5 @@ class DummyExecutor(Executor):
|
|||
|
||||
return f
|
||||
|
||||
def shutdown(self, wait: bool = True, **kwargs) -> None:
|
||||
def shutdown(self, wait: bool = True, **kwargs) -> None: # noqa: FBT001,FBT002
|
||||
self._shutdown = True
|
||||
|
|
|
@ -44,7 +44,7 @@ def workouts() -> Iterable[Res[Workout]]:
|
|||
from .core.pandas import check_dataframe, DataFrameT
|
||||
|
||||
@check_dataframe
|
||||
def dataframe(defensive: bool=True) -> DataFrameT:
|
||||
def dataframe(*, defensive: bool=True) -> DataFrameT:
|
||||
def it():
|
||||
for w in workouts():
|
||||
if isinstance(w, Exception):
|
||||
|
|
|
@ -91,7 +91,7 @@ def _cachew_depends_on() -> List[str]:
|
|||
|
||||
# ResultsType is a Union of all of the models in google_takeout_parser
|
||||
@mcachew(depends_on=_cachew_depends_on, logger=logger, force_file=True)
|
||||
def events(disable_takeout_cache: bool = DISABLE_TAKEOUT_CACHE) -> CacheResults:
|
||||
def events(disable_takeout_cache: bool = DISABLE_TAKEOUT_CACHE) -> CacheResults: # noqa: FBT001
|
||||
error_policy = config.error_policy
|
||||
count = 0
|
||||
emitted = GoogleEventSet()
|
||||
|
|
|
@ -174,7 +174,7 @@ def hhmm(time: datetime):
|
|||
# return fromstart / tick
|
||||
|
||||
|
||||
def plot_one(sleep: SleepEntry, fig, axes, xlims=None, showtext=True):
|
||||
def plot_one(sleep: SleepEntry, fig, axes, xlims=None, *, showtext=True):
|
||||
import matplotlib.dates as mdates # type: ignore[import-not-found]
|
||||
|
||||
span = sleep.completed - sleep.created
|
||||
|
|
|
@ -24,7 +24,7 @@ def fallback_estimators() -> Iterator[LocationEstimator]:
|
|||
yield _home_estimate
|
||||
|
||||
|
||||
def estimate_location(dt: DateExact, first_match: bool=False, under_accuracy: Optional[int] = None) -> FallbackLocation:
|
||||
def estimate_location(dt: DateExact, *, first_match: bool=False, under_accuracy: Optional[int] = None) -> FallbackLocation:
|
||||
loc = estimate_from(dt, estimators=list(fallback_estimators()), first_match=first_match, under_accuracy=under_accuracy)
|
||||
# should never happen if the user has home configured
|
||||
if loc is None:
|
||||
|
|
|
@ -18,7 +18,7 @@ class FallbackLocation(LocationProtocol):
|
|||
elevation: Optional[float] = None
|
||||
datasource: Optional[str] = None # which module provided this, useful for debugging
|
||||
|
||||
def to_location(self, end: bool = False) -> Location:
|
||||
def to_location(self, *, end: bool = False) -> Location:
|
||||
'''
|
||||
by default the start date is used for the location
|
||||
If end is True, the start date + duration is used
|
||||
|
|
|
@ -18,7 +18,7 @@ def getzone(dt: datetime) -> str:
|
|||
|
||||
|
||||
@pytest.mark.parametrize('fast', [False, True])
|
||||
def test_iter_tzs(fast: bool, config) -> None:
|
||||
def test_iter_tzs(*, fast: bool, config) -> None:
|
||||
# TODO hmm.. maybe need to make sure we start with empty config?
|
||||
config.time.tz.via_location.fast = fast
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ logger = make_logger(__name__)
|
|||
|
||||
|
||||
@lru_cache(None)
|
||||
def _timezone_finder(fast: bool) -> Any:
|
||||
def _timezone_finder(*, fast: bool) -> Any:
|
||||
if fast:
|
||||
# less precise, but faster
|
||||
from timezonefinder import TimezoneFinderL as Finder
|
||||
|
@ -304,7 +304,7 @@ def localize(dt: datetime) -> datetime_aware:
|
|||
return tz.localize(dt)
|
||||
|
||||
|
||||
def stats(quick: bool = False) -> Stats:
|
||||
def stats(*, quick: bool = False) -> Stats:
|
||||
if quick:
|
||||
prev, config.sort_locations = config.sort_locations, False
|
||||
res = {'first': next(_iter_local_dates())}
|
||||
|
|
|
@ -58,7 +58,7 @@ def _parse_one(p: Path) -> Iterator[Res[Competition]]:
|
|||
h.pop_if_primitive('version', 'id')
|
||||
|
||||
h = h.zoom('result')
|
||||
h.check('success', True)
|
||||
h.check('success', expected=True)
|
||||
h.check('status', 200)
|
||||
h.pop_if_primitive('metadata')
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ lint.extend-select = [
|
|||
"E", # pycodestyle -- default, but extend just in case
|
||||
"C4", # flake8-comprehensions -- unnecessary list/map/dict calls
|
||||
"UP", # detect deprecated python stdlib stuff
|
||||
"FBT", # detect use of boolean arguments
|
||||
]
|
||||
|
||||
lint.ignore = [
|
||||
|
|
Loading…
Add table
Reference in a new issue