docs: fix typos

found via `codespell -L copie,datas,pres,fo,tooks,noo,ue,ket,frop`
This commit is contained in:
Kian-Meng Ang 2023-03-26 17:10:33 +08:00 committed by karlicoss
parent 919c84fb5a
commit d2ef23fcb4
50 changed files with 69 additions and 69 deletions

View file

@ -143,7 +143,7 @@ def config_ok() -> bool:
else:
info(f'import order: {paths}')
# first try doing as much as possible without actually imporing my.config
# first try doing as much as possible without actually importing my.config
from .preinit import get_mycfg_dir
cfg_path = get_mycfg_dir()
# alternative is importing my.config and then getting cfg_path from its __file__/__path__
@ -267,7 +267,7 @@ def modules_check(*, verbose: bool, list_all: bool, quick: bool, for_modules: Li
# todo more specific command?
error(f'{click.style("FAIL", fg="red")}: {m:<50} loading failed{vw}')
# check that this is an import error in particular, not because
# of a ModuleNotFoundError because some dependency wasnt installed
# of a ModuleNotFoundError because some dependency wasn't installed
if isinstance(e, (ImportError, AttributeError)):
warn_my_config_import_error(e)
if verbose:
@ -441,7 +441,7 @@ def _locate_functions_or_prompt(qualified_names: List[str], prompt: bool = True)
from .query import locate_qualified_function, QueryException
from .stats import is_data_provider
# if not connected to a terminal, cant prompt
# if not connected to a terminal, can't prompt
if not sys.stdout.isatty():
prompt = False
@ -471,7 +471,7 @@ def _locate_functions_or_prompt(qualified_names: List[str], prompt: bool = True)
else:
choices = [f.__name__ for f in data_providers]
if prompt is False:
# theres more than one possible data provider in this module,
# there's more than one possible data provider in this module,
# STDOUT is not a TTY, can't prompt
eprint("During fallback, more than one possible data provider, can't prompt since STDOUT is not a TTY")
eprint("Specify one of:")
@ -576,7 +576,7 @@ def main(debug: bool) -> None:
# acts as a contextmanager of sorts - any subcommand will then run
# in something like /tmp/hpi_temp_dir
# to avoid importing relative modules by accident during development
# maybe can be removed later if theres more test coverage/confidence that nothing
# maybe can be removed later if there's more test coverage/confidence that nothing
# would happen?
# use a particular directory instead of a random one, since

View file

@ -433,7 +433,7 @@ def warn_if_empty(f):
QUICK_STATS = False
# incase user wants to use the stats functions/quick option
# in case user wants to use the stats functions/quick option
# elsewhere -- can use this decorator instead of editing
# the global state directly
@contextmanager

View file

@ -127,7 +127,7 @@ else:
TypedDict = Dict
# bisect_left doesnt have a 'key' parameter (which we use)
# bisect_left doesn't have a 'key' parameter (which we use)
# till python3.10
if sys.version_info[:2] <= (3, 9):
from typing import List, TypeVar, Any, Optional, Callable

View file

@ -1,5 +1,5 @@
"""
A helper module for defining denylists for sources programatically
A helper module for defining denylists for sources programmatically
(in lamens terms, this lets you remove some output from a module you don't want)
For docs, see doc/DENYLIST.md

View file

@ -119,7 +119,7 @@ def _extract_requirements(a: ast.Module) -> Requires:
elif isinstance(c, ast.Str):
deps.append(c.s)
else:
raise RuntimeError(f"Expecting string contants only in {REQUIRES} declaration")
raise RuntimeError(f"Expecting string constants only in {REQUIRES} declaration")
return tuple(deps)
return None

View file

@ -1,7 +1,7 @@
'''
A hook to insert user's config directory into Python's search path.
Ideally that would be in __init__.py (so it's executed without having to import explicityly)
Ideally that would be in __init__.py (so it's executed without having to import explicitly)
But, with namespace packages, we can't have __init__.py in the parent subpackage
(see http://python-notes.curiousefficiency.org/en/latest/python_concepts/import_traps.html#the-init-py-trap)

View file

@ -46,7 +46,7 @@ def _zstd_open(path: Path, *args, **kwargs) -> IO:
# TODO use the 'dependent type' trick for return type?
def kopen(path: PathIsh, *args, mode: str='rt', **kwargs) -> IO:
# just in case, but I think this shouldn't be necessary anymore
# since when we cann .read_text, encoding is passed already
# since when we call .read_text, encoding is passed already
if mode in {'r', 'rt'}:
encoding = kwargs.get('encoding', 'utf8')
else:

View file

@ -145,7 +145,7 @@ class CollapseDebugHandler(logging.StreamHandler):
import os
columns, _ = os.get_terminal_size(0)
# ugh. the columns thing is meh. dunno I guess ultimately need curses for that
# TODO also would be cool to have a terminal post-processor? kinda like tail but aware of logging keyworkds (INFO/DEBUG/etc)
# TODO also would be cool to have a terminal post-processor? kinda like tail but aware of logging keywords (INFO/DEBUG/etc)
self.stream.write(msg + ' ' * max(0, columns - len(msg)) + ('' if cur else '\n'))
self.flush()
except:

View file

@ -74,7 +74,7 @@ No 'error' column detected. You probably forgot to handle errors defensively, wh
from typing import Any, Callable, TypeVar
FuncT = TypeVar('FuncT', bound=Callable[..., DataFrameT])
# TODO ugh. typing this is a mess... shoul I use mypy_extensions.VarArg/KwArgs?? or what??
# TODO ugh. typing this is a mess... should I use mypy_extensions.VarArg/KwArgs?? or what??
from decorator import decorator
@decorator
def check_dataframe(f: FuncT, error_col_policy: ErrorColPolicy='add_if_missing', *args, **kwargs) -> DataFrameT:

View file

@ -26,7 +26,7 @@ ET = Res[T]
U = TypeVar("U")
# In a perfect world, the return value from a OrderFunc would just be U,
# not Optional[U]. However, since this has to deal with so many edge
# cases, theres a possibility that the functions generated by
# cases, there's a possibility that the functions generated by
# _generate_order_by_func can't find an attribute
OrderFunc = Callable[[ET], Optional[U]]
Where = Callable[[ET], bool]
@ -54,7 +54,7 @@ def locate_function(module_name: str, function_name: str) -> Callable[[], Iterab
for (fname, func) in inspect.getmembers(mod, inspect.isfunction):
if fname == function_name:
return func
# incase the function is defined dynamically,
# in case the function is defined dynamically,
# like with a globals().setdefault(...) or a module-level __getattr__ function
func = getattr(mod, function_name, None)
if func is not None and callable(func):
@ -244,7 +244,7 @@ def _drop_unsorted(itr: Iterator[ET], orderfunc: OrderFunc) -> Iterator[ET]:
# try getting the first value from the iterator
# similar to my.core.common.warn_if_empty? this doesnt go through the whole iterator though
# similar to my.core.common.warn_if_empty? this doesn't go through the whole iterator though
def _peek_iter(itr: Iterator[ET]) -> Tuple[Optional[ET], Iterator[ET]]:
itr = more_itertools.peekable(itr)
try:
@ -290,7 +290,7 @@ def _handle_unsorted(
return iter([]), itr
# handles creating an order_value functon, using a lookup for
# handles creating an order_value function, using a lookup for
# different types. ***This consumes the iterator***, so
# you should definitely itertoolts.tee it beforehand
# as to not exhaust the values
@ -374,7 +374,7 @@ def select(
by allowing you to provide custom predicates (functions) which can sort
by a function, an attribute, dict key, or by the attributes values.
Since this supports mixed types, theres always a possibility
Since this supports mixed types, there's always a possibility
of KeyErrors or AttributeErrors while trying to find some value to order by,
so this provides multiple mechanisms to deal with that

View file

@ -220,7 +220,7 @@ def _create_range_filter(
# inclusivity here? Is [after, before) currently,
# items are included on the lower bound but not the
# upper bound
# typically used for datetimes so doesnt have to
# typically used for datetimes so doesn't have to
# be exact in that case
def generated_predicate(obj: Any) -> bool:
ov: Any = attr_func(obj)
@ -294,7 +294,7 @@ def select_range(
# some operations to do before ordering/filtering
if drop_exceptions or raise_exceptions or where is not None:
# doesnt wrap unsortable items, because we pass no order related kwargs
# doesn't wrap unsortable items, because we pass no order related kwargs
itr = select(itr, where=where, drop_exceptions=drop_exceptions, raise_exceptions=raise_exceptions)
order_by_chosen: Optional[OrderFunc] = None
@ -356,7 +356,7 @@ Specify a type or a key to order the value by""")
#
# this select is also run if the user didn't specify anything to
# order by, and is just returning the data in the same order as
# as the srouce iterable
# as the source iterable
# i.e. none of the range-related filtering code ran, this is just a select
itr = select(itr,
order_by=order_by_chosen,
@ -483,7 +483,7 @@ def test_parse_range() -> None:
assert res2 == RangeTuple(after=start_date.timestamp(), before=end_date.timestamp(), within=None)
# cant specify all three
# can't specify all three
with pytest.raises(QueryException, match=r"Cannot specify 'after', 'before' and 'within'"):
dt_parse_range(unparsed_range=RangeTuple(str(start_date), str(end_date.timestamp()), "7d"))

View file

@ -96,7 +96,7 @@ def _dumps_factory(**kwargs) -> Callable[[Any], str]:
# is rust-based and compiling on rarer architectures may not work
# out of the box
#
# unlike the builtin JSON modue which serializes NamedTuples as lists
# unlike the builtin JSON module which serializes NamedTuples as lists
# (even if you provide a default function), simplejson correctly
# serializes namedtuples to dictionaries
@ -157,7 +157,7 @@ def dumps(
def test_serialize_fallback() -> None:
import json as jsn # dont cause possible conflicts with module code
# cant use a namedtuple here, since the default json.dump serializer
# can't use a namedtuple here, since the default json.dump serializer
# serializes namedtuples as tuples, which become arrays
# just test with an array of mixed objects
X = [5, datetime.timedelta(seconds=5.0)]
@ -216,7 +216,7 @@ def test_default_serializer() -> None:
def _serialize_with_default(o: Any) -> Any:
if isinstance(o, Unserializable):
return {"x": o.x, "y": o.y}
raise TypeError("Couldnt serialize")
raise TypeError("Couldn't serialize")
# this serializes both Unserializable, which is a custom type otherwise
# not handled, and timedelta, which is handled by the '_default_encode'

View file

@ -94,7 +94,7 @@ def sqlite_copy_and_open(db: PathIsh) -> sqlite3.Connection:
# NOTE hmm, so this kinda works
# V = TypeVar('V', bound=Tuple[Any, ...])
# def select(cols: V, rest: str, *, db: sqlite3.Connetion) -> Iterator[V]:
# def select(cols: V, rest: str, *, db: sqlite3.Connection) -> Iterator[V]:
# but sadly when we pass columns (Tuple[str, ...]), it seems to bind this type to V?
# and then the return type ends up as Iterator[Tuple[str, ...]], which isn't desirable :(
# a bit annoying to have this copy-pasting, but hopefully not a big issue

View file

@ -35,7 +35,7 @@ def is_data_provider(fun: Any) -> bool:
1. returns iterable or something like that
2. takes no arguments? (otherwise not callable by stats anyway?)
3. doesn't start with an underscore (those are probably helper functions?)
4. functions isnt the 'inputs' function (or ends with '_inputs')
4. functions isn't the 'inputs' function (or ends with '_inputs')
"""
# todo maybe for 2 allow default arguments? not sure
# one example which could benefit is my.pdfs

View file

@ -246,7 +246,7 @@ def stats():
sys.path = orig_path
# shouldn't crash at least
assert res is None # good as far as discovery is concerned
assert xx.read_text() == 'some precious data' # make sure module wasn't evauluated
assert xx.read_text() == 'some precious data' # make sure module wasn't evaluated
### tests end