From d2ef23fcb4a8d9938621fc39b04d853b7c5d2d78 Mon Sep 17 00:00:00 2001 From: Kian-Meng Ang Date: Sun, 26 Mar 2023 17:10:33 +0800 Subject: [PATCH] docs: fix typos found via `codespell -L copie,datas,pres,fo,tooks,noo,ue,ket,frop` --- CHANGELOG.md | 2 +- doc/DESIGN.org | 2 +- doc/MODULES.org | 2 +- doc/MODULE_DESIGN.org | 4 ++-- doc/SETUP.org | 2 +- misc/check-twitter.sh | 6 +++--- my/arbtt.py | 2 +- my/body/exercise/cross_trainer.py | 2 +- my/coding/commits.py | 2 +- my/core/__main__.py | 10 +++++----- my/core/common.py | 2 +- my/core/compat.py | 2 +- my/core/denylist.py | 2 +- my/core/discovery_pure.py | 2 +- my/core/init.py | 2 +- my/core/kompress.py | 2 +- my/core/logging.py | 2 +- my/core/pandas.py | 2 +- my/core/query.py | 10 +++++----- my/core/query_range.py | 8 ++++---- my/core/serialize.py | 6 +++--- my/core/sqlite.py | 2 +- my/core/stats.py | 2 +- my/core/util.py | 2 +- my/demo.py | 2 +- my/emfit/__init__.py | 2 +- my/fbmessenger/android.py | 2 +- my/github/ghexport.py | 2 +- my/hackernews/dogsheep.py | 2 +- my/instagram/android.py | 2 +- my/jawbone/__init__.py | 2 +- my/location/fallback/common.py | 2 +- my/location/fallback/via_ip.py | 2 +- my/location/google.py | 2 +- my/pdfs.py | 2 +- my/photos/main.py | 2 +- my/photos/utils.py | 4 ++-- my/reddit/rexport.py | 2 +- my/taplog.py | 2 +- my/time/tz/via_location.py | 6 +++--- my/tinder/android.py | 2 +- my/youtube/takeout.py | 2 +- tests/bluemaestro.py | 2 +- tests/config.py | 2 +- tests/core/test_denylist.py | 2 +- tests/core/test_kompress.py | 2 +- tests/demo.py | 2 +- tests/extra/polar.py | 2 +- tests/pdfs.py | 2 +- tests/tz.py | 2 +- 50 files changed, 69 insertions(+), 69 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index edaaf02..3dd19df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,7 +17,7 @@ General/my.core changes: - 746c3da0cadcba3b179688783186d8a0bd0999c5 core.pandas: allow specifying schema; add tests - 5313984d8fea2b6eef6726b7b346c1f4316acd01 add `tmp_config` context manager for test & adhoc patching - df9a7f7390aee6c69f1abf1c8d1fc7659ebb957c core.pandas: add check for 'error' column + add empty one by default -- e81dddddf083ffd81aa7e2b715bd34f59949479c proprely resolve class properties in make_config + add test +- e81dddddf083ffd81aa7e2b715bd34f59949479c properly resolve class properties in make_config + add test Modules: - some innitial work on filling **InfluxDB** with HPI data diff --git a/doc/DESIGN.org b/doc/DESIGN.org index b8d40f9..81137d2 100644 --- a/doc/DESIGN.org +++ b/doc/DESIGN.org @@ -4,7 +4,7 @@ note: this doc is in progress - interoperable - # note: this link doesnt work in org, but does for the github preview + # note: this link doesn't work in org, but does for the github preview This is the main motivation and [[file:../README.org#why][why]] I created HPI in the first place. Ideally it should be possible to hook into anything you can imagine -- regardless the database/programming language/etc. diff --git a/doc/MODULES.org b/doc/MODULES.org index 9e2dbcf..9f48024 100644 --- a/doc/MODULES.org +++ b/doc/MODULES.org @@ -190,7 +190,7 @@ For an extensive/complex example, you can check out ~@seanbreckenridge~'s [[http fast: bool = True # sort locations by date - # incase multiple sources provide them out of order + # in case multiple sources provide them out of order sort_locations: bool = True # if the accuracy for the location is more than 5km (this diff --git a/doc/MODULE_DESIGN.org b/doc/MODULE_DESIGN.org index 691dd1c..d57f8fb 100644 --- a/doc/MODULE_DESIGN.org +++ b/doc/MODULE_DESIGN.org @@ -113,7 +113,7 @@ Not all HPI Modules are currently at that level of complexity -- some are simple A related concern is how to structure namespace packages to allow users to easily extend them, and how this conflicts with single file modules (Keep reading below for more information on namespace packages/extension) If a module is converted from a single file module to a namespace with multiple files, it seems this is a breaking change, see [[https://github.com/karlicoss/HPI/issues/89][#89]] for an example of this. The current workaround is to leave it a regular python package with an =__init__.py= for some amount of time and send a deprecation warning, and then eventually remove the =__init__.py= file to convert it into a namespace package. For an example, see the [[https://github.com/karlicoss/HPI/blob/8422c6e420f5e274bd1da91710663be6429c666c/my/reddit/__init__.py][reddit init file]]. -Its quite a pain to have to convert a file from a single file module to a namespace module, so if theres *any* possibility that you might convert it to a namespace package, might as well just start it off as one, to avoid the pain down the road. As an example, say you were creating something to parse ~zsh~ history. Instead of creating ~my/zsh.py~, it would be better to create ~my/zsh/parser.py~. That lets users override the file using editable/namespace packages, and it also means in the future its much more trivial to extend it to something like: +Its quite a pain to have to convert a file from a single file module to a namespace module, so if there's *any* possibility that you might convert it to a namespace package, might as well just start it off as one, to avoid the pain down the road. As an example, say you were creating something to parse ~zsh~ history. Instead of creating ~my/zsh.py~, it would be better to create ~my/zsh/parser.py~. That lets users override the file using editable/namespace packages, and it also means in the future its much more trivial to extend it to something like: #+begin_src my/zsh @@ -161,7 +161,7 @@ There's no requirement to follow this entire structure when you start off, the e Note: this section covers some of the complexities and benefits with this being a namespace package and/or editable install, so it assumes some familiarity with python/imports -HPI is installed as a namespace package, which allows an additional way to add your own modules. For the details on namespace packges, see [[https://www.python.org/dev/peps/pep-0420/][PEP420]], or the [[https://packaging.python.org/guides/packaging-namespace-packages][packaging docs for a summary]], but for our use case, a sufficient description might be: Namespace packages let you split a package across multiple directories on disk. +HPI is installed as a namespace package, which allows an additional way to add your own modules. For the details on namespace packages, see [[https://www.python.org/dev/peps/pep-0420/][PEP420]], or the [[https://packaging.python.org/guides/packaging-namespace-packages][packaging docs for a summary]], but for our use case, a sufficient description might be: Namespace packages let you split a package across multiple directories on disk. Without adding a bulky/boilerplate-y plugin framework to HPI, as that increases the barrier to entry, [[https://packaging.python.org/guides/creating-and-discovering-plugins/#using-namespace-packages][namespace packages offers an alternative]] with little downsides. diff --git a/doc/SETUP.org b/doc/SETUP.org index aff5158..6605f66 100644 --- a/doc/SETUP.org +++ b/doc/SETUP.org @@ -452,7 +452,7 @@ connect the data with other apps and libraries! See more in [[file:../README.org::#how-do-you-use-it]["How do you use it?"]] section. -Also check out [[https://beepb00p.xyz/myinfra.html#hpi][my personal infrastructure map]] to see wher I'm using HPI. +Also check out [[https://beepb00p.xyz/myinfra.html#hpi][my personal infrastructure map]] to see where I'm using HPI. * Adding/modifying modules # TODO link to 'overlays' documentation? diff --git a/misc/check-twitter.sh b/misc/check-twitter.sh index 318ff71..1552673 100755 --- a/misc/check-twitter.sh +++ b/misc/check-twitter.sh @@ -21,7 +21,7 @@ check '2011-05-12 Thu 17:51.*set ><' # this would probs be from twint or something? check '2013-06-01 Sat 18:48.* Iterable[Entry]: if len(inps) == 0: cmds = [base] # rely on default else: - # otherise, 'merge' them + # otherwise, 'merge' them cmds = [base + ['--logfile', f] for f in inps] import ijson.backends.yajl2_cffi as ijson # type: ignore diff --git a/my/body/exercise/cross_trainer.py b/my/body/exercise/cross_trainer.py index 58c32b2..b25985c 100644 --- a/my/body/exercise/cross_trainer.py +++ b/my/body/exercise/cross_trainer.py @@ -146,7 +146,7 @@ def dataframe() -> DataFrameT: # todo careful about 'how'? we need it to preserve the errors # maybe pd.merge is better suited for this?? df = edf.join(mdf, how='outer', rsuffix='_manual') - # todo reindex? so we dont' have Nan leftovers + # todo reindex? so we don't have Nan leftovers # todo set date anyway? maybe just squeeze into the index?? noendo = df['error'] == NO_ENDOMONDO diff --git a/my/coding/commits.py b/my/coding/commits.py index 5b15db1..7786055 100644 --- a/my/coding/commits.py +++ b/my/coding/commits.py @@ -59,7 +59,7 @@ class Commit: committed_dt: datetime authored_dt: datetime message: str - repo: str # TODO put canonical name here straightaway?? + repo: str # TODO put canonical name here straight away?? sha: str ref: Optional[str] = None # TODO filter so they are authored by me diff --git a/my/core/__main__.py b/my/core/__main__.py index 76db469..05f5a2c 100644 --- a/my/core/__main__.py +++ b/my/core/__main__.py @@ -143,7 +143,7 @@ def config_ok() -> bool: else: info(f'import order: {paths}') - # first try doing as much as possible without actually imporing my.config + # first try doing as much as possible without actually importing my.config from .preinit import get_mycfg_dir cfg_path = get_mycfg_dir() # alternative is importing my.config and then getting cfg_path from its __file__/__path__ @@ -267,7 +267,7 @@ def modules_check(*, verbose: bool, list_all: bool, quick: bool, for_modules: Li # todo more specific command? error(f'{click.style("FAIL", fg="red")}: {m:<50} loading failed{vw}') # check that this is an import error in particular, not because - # of a ModuleNotFoundError because some dependency wasnt installed + # of a ModuleNotFoundError because some dependency wasn't installed if isinstance(e, (ImportError, AttributeError)): warn_my_config_import_error(e) if verbose: @@ -441,7 +441,7 @@ def _locate_functions_or_prompt(qualified_names: List[str], prompt: bool = True) from .query import locate_qualified_function, QueryException from .stats import is_data_provider - # if not connected to a terminal, cant prompt + # if not connected to a terminal, can't prompt if not sys.stdout.isatty(): prompt = False @@ -471,7 +471,7 @@ def _locate_functions_or_prompt(qualified_names: List[str], prompt: bool = True) else: choices = [f.__name__ for f in data_providers] if prompt is False: - # theres more than one possible data provider in this module, + # there's more than one possible data provider in this module, # STDOUT is not a TTY, can't prompt eprint("During fallback, more than one possible data provider, can't prompt since STDOUT is not a TTY") eprint("Specify one of:") @@ -576,7 +576,7 @@ def main(debug: bool) -> None: # acts as a contextmanager of sorts - any subcommand will then run # in something like /tmp/hpi_temp_dir # to avoid importing relative modules by accident during development - # maybe can be removed later if theres more test coverage/confidence that nothing + # maybe can be removed later if there's more test coverage/confidence that nothing # would happen? # use a particular directory instead of a random one, since diff --git a/my/core/common.py b/my/core/common.py index 6ad8146..7adfd7a 100644 --- a/my/core/common.py +++ b/my/core/common.py @@ -433,7 +433,7 @@ def warn_if_empty(f): QUICK_STATS = False -# incase user wants to use the stats functions/quick option +# in case user wants to use the stats functions/quick option # elsewhere -- can use this decorator instead of editing # the global state directly @contextmanager diff --git a/my/core/compat.py b/my/core/compat.py index dcf97cc..8bdb401 100644 --- a/my/core/compat.py +++ b/my/core/compat.py @@ -127,7 +127,7 @@ else: TypedDict = Dict -# bisect_left doesnt have a 'key' parameter (which we use) +# bisect_left doesn't have a 'key' parameter (which we use) # till python3.10 if sys.version_info[:2] <= (3, 9): from typing import List, TypeVar, Any, Optional, Callable diff --git a/my/core/denylist.py b/my/core/denylist.py index fcf3e2b..8c18e06 100644 --- a/my/core/denylist.py +++ b/my/core/denylist.py @@ -1,5 +1,5 @@ """ -A helper module for defining denylists for sources programatically +A helper module for defining denylists for sources programmatically (in lamens terms, this lets you remove some output from a module you don't want) For docs, see doc/DENYLIST.md diff --git a/my/core/discovery_pure.py b/my/core/discovery_pure.py index 5c9dbed..c88ef1c 100644 --- a/my/core/discovery_pure.py +++ b/my/core/discovery_pure.py @@ -119,7 +119,7 @@ def _extract_requirements(a: ast.Module) -> Requires: elif isinstance(c, ast.Str): deps.append(c.s) else: - raise RuntimeError(f"Expecting string contants only in {REQUIRES} declaration") + raise RuntimeError(f"Expecting string constants only in {REQUIRES} declaration") return tuple(deps) return None diff --git a/my/core/init.py b/my/core/init.py index 9e1fc4d..2e47e87 100644 --- a/my/core/init.py +++ b/my/core/init.py @@ -1,7 +1,7 @@ ''' A hook to insert user's config directory into Python's search path. -Ideally that would be in __init__.py (so it's executed without having to import explicityly) +Ideally that would be in __init__.py (so it's executed without having to import explicitly) But, with namespace packages, we can't have __init__.py in the parent subpackage (see http://python-notes.curiousefficiency.org/en/latest/python_concepts/import_traps.html#the-init-py-trap) diff --git a/my/core/kompress.py b/my/core/kompress.py index 0274e6c..a44b9d1 100644 --- a/my/core/kompress.py +++ b/my/core/kompress.py @@ -46,7 +46,7 @@ def _zstd_open(path: Path, *args, **kwargs) -> IO: # TODO use the 'dependent type' trick for return type? def kopen(path: PathIsh, *args, mode: str='rt', **kwargs) -> IO: # just in case, but I think this shouldn't be necessary anymore - # since when we cann .read_text, encoding is passed already + # since when we call .read_text, encoding is passed already if mode in {'r', 'rt'}: encoding = kwargs.get('encoding', 'utf8') else: diff --git a/my/core/logging.py b/my/core/logging.py index 6cfa12b..a948dd8 100644 --- a/my/core/logging.py +++ b/my/core/logging.py @@ -145,7 +145,7 @@ class CollapseDebugHandler(logging.StreamHandler): import os columns, _ = os.get_terminal_size(0) # ugh. the columns thing is meh. dunno I guess ultimately need curses for that - # TODO also would be cool to have a terminal post-processor? kinda like tail but aware of logging keyworkds (INFO/DEBUG/etc) + # TODO also would be cool to have a terminal post-processor? kinda like tail but aware of logging keywords (INFO/DEBUG/etc) self.stream.write(msg + ' ' * max(0, columns - len(msg)) + ('' if cur else '\n')) self.flush() except: diff --git a/my/core/pandas.py b/my/core/pandas.py index 370c119..8ccacd2 100644 --- a/my/core/pandas.py +++ b/my/core/pandas.py @@ -74,7 +74,7 @@ No 'error' column detected. You probably forgot to handle errors defensively, wh from typing import Any, Callable, TypeVar FuncT = TypeVar('FuncT', bound=Callable[..., DataFrameT]) -# TODO ugh. typing this is a mess... shoul I use mypy_extensions.VarArg/KwArgs?? or what?? +# TODO ugh. typing this is a mess... should I use mypy_extensions.VarArg/KwArgs?? or what?? from decorator import decorator @decorator def check_dataframe(f: FuncT, error_col_policy: ErrorColPolicy='add_if_missing', *args, **kwargs) -> DataFrameT: diff --git a/my/core/query.py b/my/core/query.py index 43574d0..ed29649 100644 --- a/my/core/query.py +++ b/my/core/query.py @@ -26,7 +26,7 @@ ET = Res[T] U = TypeVar("U") # In a perfect world, the return value from a OrderFunc would just be U, # not Optional[U]. However, since this has to deal with so many edge -# cases, theres a possibility that the functions generated by +# cases, there's a possibility that the functions generated by # _generate_order_by_func can't find an attribute OrderFunc = Callable[[ET], Optional[U]] Where = Callable[[ET], bool] @@ -54,7 +54,7 @@ def locate_function(module_name: str, function_name: str) -> Callable[[], Iterab for (fname, func) in inspect.getmembers(mod, inspect.isfunction): if fname == function_name: return func - # incase the function is defined dynamically, + # in case the function is defined dynamically, # like with a globals().setdefault(...) or a module-level __getattr__ function func = getattr(mod, function_name, None) if func is not None and callable(func): @@ -244,7 +244,7 @@ def _drop_unsorted(itr: Iterator[ET], orderfunc: OrderFunc) -> Iterator[ET]: # try getting the first value from the iterator -# similar to my.core.common.warn_if_empty? this doesnt go through the whole iterator though +# similar to my.core.common.warn_if_empty? this doesn't go through the whole iterator though def _peek_iter(itr: Iterator[ET]) -> Tuple[Optional[ET], Iterator[ET]]: itr = more_itertools.peekable(itr) try: @@ -290,7 +290,7 @@ def _handle_unsorted( return iter([]), itr -# handles creating an order_value functon, using a lookup for +# handles creating an order_value function, using a lookup for # different types. ***This consumes the iterator***, so # you should definitely itertoolts.tee it beforehand # as to not exhaust the values @@ -374,7 +374,7 @@ def select( by allowing you to provide custom predicates (functions) which can sort by a function, an attribute, dict key, or by the attributes values. - Since this supports mixed types, theres always a possibility + Since this supports mixed types, there's always a possibility of KeyErrors or AttributeErrors while trying to find some value to order by, so this provides multiple mechanisms to deal with that diff --git a/my/core/query_range.py b/my/core/query_range.py index ea625e5..179e4ea 100644 --- a/my/core/query_range.py +++ b/my/core/query_range.py @@ -220,7 +220,7 @@ def _create_range_filter( # inclusivity here? Is [after, before) currently, # items are included on the lower bound but not the # upper bound - # typically used for datetimes so doesnt have to + # typically used for datetimes so doesn't have to # be exact in that case def generated_predicate(obj: Any) -> bool: ov: Any = attr_func(obj) @@ -294,7 +294,7 @@ def select_range( # some operations to do before ordering/filtering if drop_exceptions or raise_exceptions or where is not None: - # doesnt wrap unsortable items, because we pass no order related kwargs + # doesn't wrap unsortable items, because we pass no order related kwargs itr = select(itr, where=where, drop_exceptions=drop_exceptions, raise_exceptions=raise_exceptions) order_by_chosen: Optional[OrderFunc] = None @@ -356,7 +356,7 @@ Specify a type or a key to order the value by""") # # this select is also run if the user didn't specify anything to # order by, and is just returning the data in the same order as - # as the srouce iterable + # as the source iterable # i.e. none of the range-related filtering code ran, this is just a select itr = select(itr, order_by=order_by_chosen, @@ -483,7 +483,7 @@ def test_parse_range() -> None: assert res2 == RangeTuple(after=start_date.timestamp(), before=end_date.timestamp(), within=None) - # cant specify all three + # can't specify all three with pytest.raises(QueryException, match=r"Cannot specify 'after', 'before' and 'within'"): dt_parse_range(unparsed_range=RangeTuple(str(start_date), str(end_date.timestamp()), "7d")) diff --git a/my/core/serialize.py b/my/core/serialize.py index c0cbae9..ca68fef 100644 --- a/my/core/serialize.py +++ b/my/core/serialize.py @@ -96,7 +96,7 @@ def _dumps_factory(**kwargs) -> Callable[[Any], str]: # is rust-based and compiling on rarer architectures may not work # out of the box # - # unlike the builtin JSON modue which serializes NamedTuples as lists + # unlike the builtin JSON module which serializes NamedTuples as lists # (even if you provide a default function), simplejson correctly # serializes namedtuples to dictionaries @@ -157,7 +157,7 @@ def dumps( def test_serialize_fallback() -> None: import json as jsn # dont cause possible conflicts with module code - # cant use a namedtuple here, since the default json.dump serializer + # can't use a namedtuple here, since the default json.dump serializer # serializes namedtuples as tuples, which become arrays # just test with an array of mixed objects X = [5, datetime.timedelta(seconds=5.0)] @@ -216,7 +216,7 @@ def test_default_serializer() -> None: def _serialize_with_default(o: Any) -> Any: if isinstance(o, Unserializable): return {"x": o.x, "y": o.y} - raise TypeError("Couldnt serialize") + raise TypeError("Couldn't serialize") # this serializes both Unserializable, which is a custom type otherwise # not handled, and timedelta, which is handled by the '_default_encode' diff --git a/my/core/sqlite.py b/my/core/sqlite.py index 7c02940..80dbc3f 100644 --- a/my/core/sqlite.py +++ b/my/core/sqlite.py @@ -94,7 +94,7 @@ def sqlite_copy_and_open(db: PathIsh) -> sqlite3.Connection: # NOTE hmm, so this kinda works # V = TypeVar('V', bound=Tuple[Any, ...]) -# def select(cols: V, rest: str, *, db: sqlite3.Connetion) -> Iterator[V]: +# def select(cols: V, rest: str, *, db: sqlite3.Connection) -> Iterator[V]: # but sadly when we pass columns (Tuple[str, ...]), it seems to bind this type to V? # and then the return type ends up as Iterator[Tuple[str, ...]], which isn't desirable :( # a bit annoying to have this copy-pasting, but hopefully not a big issue diff --git a/my/core/stats.py b/my/core/stats.py index ba32be7..8923996 100644 --- a/my/core/stats.py +++ b/my/core/stats.py @@ -35,7 +35,7 @@ def is_data_provider(fun: Any) -> bool: 1. returns iterable or something like that 2. takes no arguments? (otherwise not callable by stats anyway?) 3. doesn't start with an underscore (those are probably helper functions?) - 4. functions isnt the 'inputs' function (or ends with '_inputs') + 4. functions isn't the 'inputs' function (or ends with '_inputs') """ # todo maybe for 2 allow default arguments? not sure # one example which could benefit is my.pdfs diff --git a/my/core/util.py b/my/core/util.py index 64bf6fe..f12b578 100644 --- a/my/core/util.py +++ b/my/core/util.py @@ -246,7 +246,7 @@ def stats(): sys.path = orig_path # shouldn't crash at least assert res is None # good as far as discovery is concerned - assert xx.read_text() == 'some precious data' # make sure module wasn't evauluated + assert xx.read_text() == 'some precious data' # make sure module wasn't evaluated ### tests end diff --git a/my/demo.py b/my/demo.py index 3a9d1b3..1023795 100644 --- a/my/demo.py +++ b/my/demo.py @@ -46,7 +46,7 @@ from .core import Json, get_files @dataclass class Item: ''' - Some completely arbirary artificial stuff, just for testing + Some completely arbitrary artificial stuff, just for testing ''' username: str raw: Json diff --git a/my/emfit/__init__.py b/my/emfit/__init__.py index a081416..0a1eb73 100644 --- a/my/emfit/__init__.py +++ b/my/emfit/__init__.py @@ -38,7 +38,7 @@ def datas() -> Iterable[Res[Emfit]]: import dataclasses # data from emfit is coming in UTC. There is no way (I think?) to know the 'real' timezone, and local times matter more for sleep analysis - # TODO actully this is wrong?? check this.. + # TODO actually this is wrong?? check this.. emfit_tz = config.timezone for x in dal.sleeps(config.export_path): diff --git a/my/fbmessenger/android.py b/my/fbmessenger/android.py index 616a6af..38551b4 100644 --- a/my/fbmessenger/android.py +++ b/my/fbmessenger/android.py @@ -177,7 +177,7 @@ def messages() -> Iterator[Res[Message]]: reply_to_id = x.reply_to_id # hmm, reply_to be missing due to the synthetic nature of export, so have to be defensive reply_to = None if reply_to_id is None else msgs.get(reply_to_id) - # also would be interesting to merge together entities rather than resuling messages from different sources.. + # also would be interesting to merge together entities rather than resulting messages from different sources.. # then the merging thing could be moved to common? try: sender = senders[x.sender_id] diff --git a/my/github/ghexport.py b/my/github/ghexport.py index c9ba7ea..67042fc 100644 --- a/my/github/ghexport.py +++ b/my/github/ghexport.py @@ -128,7 +128,7 @@ def _get_summary(e) -> Tuple[str, Optional[Link], Optional[EventId], Optional[Bo rt = pl['ref_type'] ref = pl['ref'] if what == 'created': - # FIXME should handle delection?... + # FIXME should handle deletion?... eid = EventIds.repo_created(dts=dts, name=rname, ref_type=rt, ref=ref) mref = '' if ref is None else ' ' + ref # todo link to branch? only contains weird API link though diff --git a/my/hackernews/dogsheep.py b/my/hackernews/dogsheep.py index 462cbc0..aac0b1a 100644 --- a/my/hackernews/dogsheep.py +++ b/my/hackernews/dogsheep.py @@ -58,7 +58,7 @@ def items() -> Iterator[Res[Item]]: type=r['type'], created=datetime.fromtimestamp(r['time']), title=r['title'], - # todo hmm maybe a method to stip off html tags would be nice + # todo hmm maybe a method to strip off html tags would be nice text_html=r['text'], url=r['url'], ) diff --git a/my/instagram/android.py b/my/instagram/android.py index 8e62363..48e8021 100644 --- a/my/instagram/android.py +++ b/my/instagram/android.py @@ -71,7 +71,7 @@ class _Message(_BaseMessage): @dataclass(unsafe_hash=True) class Message(_BaseMessage): user: User - # TODO could also extract Thread objec? not sure if useful + # TODO could also extract Thread object? not sure if useful # reply_to: Optional[Message] diff --git a/my/jawbone/__init__.py b/my/jawbone/__init__.py index 89f104a..9f53abe 100644 --- a/my/jawbone/__init__.py +++ b/my/jawbone/__init__.py @@ -242,7 +242,7 @@ def plot_one(sleep: SleepEntry, fig: Figure, axes: Axes, xlims=None, showtext=Tr def predicate(sleep: SleepEntry): """ - Filter for comparing similar sleep sesssions + Filter for comparing similar sleep sessions """ start = sleep.created.time() end = sleep.completed.time() diff --git a/my/location/fallback/common.py b/my/location/fallback/common.py index fa1d4c5..fd508c6 100644 --- a/my/location/fallback/common.py +++ b/my/location/fallback/common.py @@ -64,7 +64,7 @@ class FallbackLocation(LocationProtocol): ) -# a location estimator can return multiple fallbacks, incase there are +# a location estimator can return multiple fallbacks, in case there are # differing accuracies/to allow for possible matches to be computed # iteratively LocationEstimator = Callable[[DateExact], Iterator[FallbackLocation]] diff --git a/my/location/fallback/via_ip.py b/my/location/fallback/via_ip.py index 1da2315..303074f 100644 --- a/my/location/fallback/via_ip.py +++ b/my/location/fallback/via_ip.py @@ -50,7 +50,7 @@ def fallback_locations() -> Iterator[FallbackLocation]: ) -# for compatibility with my.location.via_ip, this shouldnt be used by other modules +# for compatibility with my.location.via_ip, this shouldn't be used by other modules def locations() -> Iterator[Location]: medium("locations is deprecated, should use fallback_locations or estimate_location") yield from map(FallbackLocation.to_location, fallback_locations()) diff --git a/my/location/google.py b/my/location/google.py index 21ba3ed..fdddd92 100644 --- a/my/location/google.py +++ b/my/location/google.py @@ -82,7 +82,7 @@ def _iter_via_grep(fo) -> Iterable[TsLatLon]: # todo could also use pool? not sure if that would really be faster... -# earch thread could process 100K at once? +# search thread could process 100K at once? # would need to find out a way to know when to stop? process in some sort of sqrt progression?? diff --git a/my/pdfs.py b/my/pdfs.py index 1314f0e..5355d8a 100644 --- a/my/pdfs.py +++ b/my/pdfs.py @@ -79,7 +79,7 @@ class Annotation(NamedTuple): def _as_annotation(*, raw: pdfannots.Annotation, path: str) -> Annotation: d = vars(raw) pos = raw.pos - # make mypy happy (pos alwasy present for Annotation https://github.com/0xabu/pdfannots/blob/dbdfefa158971e1746fae2da139918e9f59439ea/pdfannots/types.py#L302) + # make mypy happy (pos always present for Annotation https://github.com/0xabu/pdfannots/blob/dbdfefa158971e1746fae2da139918e9f59439ea/pdfannots/types.py#L302) assert pos is not None d['page'] = pos.page.pageno return Annotation( diff --git a/my/photos/main.py b/my/photos/main.py index 69e5a46..c491ac1 100644 --- a/my/photos/main.py +++ b/my/photos/main.py @@ -43,7 +43,7 @@ class Photo(NamedTuple): if self.path.startswith(bp): return self.path[len(bp):] else: - raise RuntimeError(f'Weird path {self.path}, cant match against anything') + raise RuntimeError(f"Weird path {self.path}, can't match against anything") @property def name(self) -> str: diff --git a/my/photos/utils.py b/my/photos/utils.py index 15d7659..8c16dc5 100644 --- a/my/photos/utils.py +++ b/my/photos/utils.py @@ -48,7 +48,7 @@ def _get_exif_data(image) -> Exif: def to_degree(value) -> float: """Helper function to convert the GPS coordinates - stored in the EXIF to degress in float format""" + stored in the EXIF to digress in float format""" (d, m, s) = value return d + (m / 60.0) + (s / 3600.0) @@ -65,7 +65,7 @@ from datetime import datetime from typing import Optional # TODO surely there is a library that does it?? -# TODO this belogs to a private overlay or something +# TODO this belongs to a private overlay or something # basically have a function that patches up dates after the files were yielded.. _DT_REGEX = re.compile(r'\D(\d{8})\D*(\d{6})\D') def dt_from_path(p: Path) -> Optional[datetime]: diff --git a/my/reddit/rexport.py b/my/reddit/rexport.py index 0924e55..a8ce651 100644 --- a/my/reddit/rexport.py +++ b/my/reddit/rexport.py @@ -197,7 +197,7 @@ def _get_events(backups: Sequence[Path], parallel: bool=True) -> Iterator[Event] # eh. I guess just take max and it will always be correct? assert not first yield Event( - dt=bdt, # TODO average wit ps.save_dt? + dt=bdt, # TODO average with ps.save_dt? text="unfavorited", kind=ps, eid=f'unf-{ps.sid}', diff --git a/my/taplog.py b/my/taplog.py index 6353c14..51eeb72 100644 --- a/my/taplog.py +++ b/my/taplog.py @@ -39,7 +39,7 @@ class Entry(NamedTuple): def timestamp(self) -> datetime: ts = self.row['timestamp'] # already with timezone apparently - # TODO not sure if should stil localize though? it only kept tz offset, not real tz + # TODO not sure if should still localize though? it only kept tz offset, not real tz return datetime.fromisoformat(ts) # TODO also has gps info! diff --git a/my/time/tz/via_location.py b/my/time/tz/via_location.py index e111a4a..7716be0 100644 --- a/my/time/tz/via_location.py +++ b/my/time/tz/via_location.py @@ -35,7 +35,7 @@ class config(user_config): fast: bool = True # sort locations by date - # incase multiple sources provide them out of order + # in case multiple sources provide them out of order sort_locations: bool = True # if the accuracy for the location is more than 5km, don't use @@ -94,7 +94,7 @@ def _locations() -> Iterator[Tuple[LatLon, datetime]]: except Exception as e: from my.core.warnings import high - logger.exception("Could not setup via_location using my.location.all provider, falling back to legacy google implemetation", exc_info=e) + logger.exception("Could not setup via_location using my.location.all provider, falling back to legacy google implementation", exc_info=e) high("Setup my.google.takeout.parser, then my.location.all for better google takeout/location data") import my.location.google @@ -134,7 +134,7 @@ def _find_tz_for_locs(finder: Any, locs: Iterable[Tuple[LatLon, datetime]]) -> I def _iter_local_dates() -> Iterator[DayWithZone]: finder = _timezone_finder(fast=config.fast) # rely on the default #pdt = None - # TODO: warnings doesnt actually warn? + # TODO: warnings doesn't actually warn? # warnings = [] locs: Iterable[Tuple[LatLon, datetime]] diff --git a/my/tinder/android.py b/my/tinder/android.py index 18b59d8..a820947 100644 --- a/my/tinder/android.py +++ b/my/tinder/android.py @@ -102,7 +102,7 @@ def _handle_db(db: sqlite3.Connection) -> Iterator[Res[_Entity]]: try: yield _parse_person(row) except Exception as e: - # todo attach error contex? + # todo attach error context? yield e for row in db.execute('SELECT * FROM match'): diff --git a/my/youtube/takeout.py b/my/youtube/takeout.py index 3d284b6..a3a2dda 100644 --- a/my/youtube/takeout.py +++ b/my/youtube/takeout.py @@ -68,7 +68,7 @@ def watched() -> Iterable[Res[Watched]]: continue if title.startswith('Subscribed to') and url.startswith('https://www.youtube.com/channel/'): - # todo might be interesting to process somwhere? + # todo might be interesting to process somewhere? continue # all titles contain it, so pointless to include 'Watched ' diff --git a/tests/bluemaestro.py b/tests/bluemaestro.py index 283bd77..c932d73 100644 --- a/tests/bluemaestro.py +++ b/tests/bluemaestro.py @@ -32,7 +32,7 @@ def test() -> None: assert len(tp) == 1 # should be unique - # 2.5 K + 4 K datapoints, somwhat overlapping + # 2.5 K + 4 K datapoints, somewhat overlapping assert len(res2020) < 6000 diff --git a/tests/config.py b/tests/config.py index 49138c3..cef3787 100644 --- a/tests/config.py +++ b/tests/config.py @@ -8,7 +8,7 @@ def test_dynamic_configuration(notes: Path) -> None: from my.core.cfg import tmp_config with tmp_config() as C: C.orgmode = NS(paths=[notes]) - # TODO ugh. this belongs to tz provider or global config or someting + # TODO ugh. this belongs to tz provider or global config or something C.weight = NS(default_timezone=pytz.timezone('Europe/London')) from my.body.weight import from_orgmode diff --git a/tests/core/test_denylist.py b/tests/core/test_denylist.py index d6f4c49..4e55a1f 100644 --- a/tests/core/test_denylist.py +++ b/tests/core/test_denylist.py @@ -72,7 +72,7 @@ def test_denylist(tmp_path: Path) -> None: d.deny(key="dt", value=datetime(2020, 2, 1)) # test internal behavior, _deny_raw_list should have been updated, - # but _deny_map doesnt get updated by a call to .deny + # but _deny_map doesn't get updated by a call to .deny # # if we change this just update the test, is just here to ensure # this is the behaviour diff --git a/tests/core/test_kompress.py b/tests/core/test_kompress.py index 97539cb..0e7d71b 100644 --- a/tests/core/test_kompress.py +++ b/tests/core/test_kompress.py @@ -98,7 +98,7 @@ def test_zippath() -> None: ], rpaths - # TODO hmm this doesn't work atm, wheras Path does + # TODO hmm this doesn't work atm, whereas Path does # not sure if it should be defensive or something... # ZipPath('doesnotexist') # same for this one diff --git a/tests/demo.py b/tests/demo.py index 436bc63..6ac937c 100644 --- a/tests/demo.py +++ b/tests/demo.py @@ -19,7 +19,7 @@ def test_dynamic_config_1(tmp_path: Path) -> None: assert item1.username == 'user' -# exactly the same test, but using a different config, to test out the behavious w.r.t. import order +# exactly the same test, but using a different config, to test out the behaviour w.r.t. import order def test_dynamic_config_2(tmp_path: Path) -> None: # doesn't work without it! # because the config from test_dybamic_config_1 is cached in my.demo.demo diff --git a/tests/extra/polar.py b/tests/extra/polar.py index 0fddcf3..1091f2a 100644 --- a/tests/extra/polar.py +++ b/tests/extra/polar.py @@ -38,7 +38,7 @@ PARAMS = [ def prepare(request): dotpolar = request.param class user_config: - if dotpolar != '': # defaul + if dotpolar != '': # default polar_dir = Path(ROOT / dotpolar) defensive = False diff --git a/tests/pdfs.py b/tests/pdfs.py index ae6318d..343a209 100644 --- a/tests/pdfs.py +++ b/tests/pdfs.py @@ -8,7 +8,7 @@ from .common import testdata def test_module(with_config) -> None: - # TODO crap. if module is imported too early (on the top level, it makes it super hard to overrride config) + # TODO crap. if module is imported too early (on the top level, it makes it super hard to override config) # need to at least detect it... from my.pdfs import annotations, annotated_pdfs diff --git a/tests/tz.py b/tests/tz.py index 8f80800..f2498a2 100644 --- a/tests/tz.py +++ b/tests/tz.py @@ -52,7 +52,7 @@ def test_tz() -> None: tz = LTZ._get_tz(datetime.min) assert tz is not None else: - # seems this fails because windows doesnt support same date ranges + # seems this fails because windows doesn't support same date ranges # https://stackoverflow.com/a/41400321/ with pytest.raises(OSError): LTZ._get_tz(datetime.min)