merge all rss
This commit is contained in:
parent
35b6d6ff97
commit
420d444633
4 changed files with 26 additions and 10 deletions
|
@ -8,5 +8,3 @@ class Subscription(NamedTuple):
|
||||||
id: str
|
id: str
|
||||||
subscribed: bool=True
|
subscribed: bool=True
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,6 @@ from dateutil.parser import isoparse
|
||||||
@listify
|
@listify
|
||||||
def parse_file(f: Path):
|
def parse_file(f: Path):
|
||||||
raw = json.loads(f.read_text())
|
raw = json.loads(f.read_text())
|
||||||
print(raw)
|
|
||||||
for r in raw:
|
for r in raw:
|
||||||
yield Subscription(
|
yield Subscription(
|
||||||
# TODO created_at?
|
# TODO created_at?
|
||||||
|
|
|
@ -6,7 +6,7 @@ import json
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, List
|
from typing import Dict, List
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from dateutil.parser import isoparse
|
import pytz
|
||||||
|
|
||||||
|
|
||||||
@listify
|
@listify
|
||||||
|
@ -14,12 +14,13 @@ def parse_file(f: Path):
|
||||||
raw = json.loads(f.read_text())
|
raw = json.loads(f.read_text())
|
||||||
for r in raw:
|
for r in raw:
|
||||||
# err, some even don't have website..
|
# err, some even don't have website..
|
||||||
website = r.get('website')
|
rid = r['id']
|
||||||
|
website = r.get('website', rid) # meh
|
||||||
yield Subscription(
|
yield Subscription(
|
||||||
# TODO created_at?
|
# TODO created_at?
|
||||||
title=r['title'],
|
title=r['title'],
|
||||||
url=website,
|
url=website,
|
||||||
id=r['id'],
|
id=rid,
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_states() -> Dict[datetime, List[Subscription]]:
|
def get_states() -> Dict[datetime, List[Subscription]]:
|
||||||
|
@ -27,6 +28,7 @@ def get_states() -> Dict[datetime, List[Subscription]]:
|
||||||
for f in sorted(Path(paths.feedly.export_dir).glob('*.json')):
|
for f in sorted(Path(paths.feedly.export_dir).glob('*.json')):
|
||||||
dts = f.stem.split('_')[-1]
|
dts = f.stem.split('_')[-1]
|
||||||
dt = datetime.strptime(dts, '%Y%m%d%H%M%S')
|
dt = datetime.strptime(dts, '%Y%m%d%H%M%S')
|
||||||
|
dt = pytz.utc.localize(dt)
|
||||||
subs = parse_file(f)
|
subs = parse_file(f)
|
||||||
res[dt] = subs
|
res[dt] = subs
|
||||||
return res
|
return res
|
||||||
|
|
25
my/rss.py
25
my/rss.py
|
@ -1,12 +1,29 @@
|
||||||
|
from itertools import chain
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from ._rss import Subscription
|
||||||
|
|
||||||
from . import feedbin
|
from . import feedbin
|
||||||
from . import feedly
|
from . import feedly
|
||||||
from ._rss import Subscription
|
|
||||||
# TODO google reader?
|
# TODO google reader?
|
||||||
|
|
||||||
|
|
||||||
def get_history():
|
def get_all_subscriptions() -> List[Subscription]:
|
||||||
"""
|
"""
|
||||||
It's useful to keep track of websites you unsubscribed from too,
|
Keeps track of everything I ever subscribed to. It's useful to keep track of unsubscribed too
|
||||||
so you don't try to subscribe again (or at least take into account why you unsubscribed before)
|
so you don't try to subscribe again (or at least take into account why you unsubscribed before)
|
||||||
"""
|
"""
|
||||||
pass
|
states = {}
|
||||||
|
states.update(feedly.get_states())
|
||||||
|
states.update(feedbin.get_states())
|
||||||
|
by_url = {}
|
||||||
|
for d, feeds in sorted(states.items()):
|
||||||
|
for f in feeds:
|
||||||
|
if f.url not in by_url:
|
||||||
|
by_url[f.url] = f
|
||||||
|
res = []
|
||||||
|
last = {x.url: x for x in max(states.items())[1]}
|
||||||
|
for u, x in sorted(by_url.items()):
|
||||||
|
present = u in last
|
||||||
|
res.append(x._replace(subscribed=present))
|
||||||
|
return res
|
||||||
|
|
Loading…
Add table
Reference in a new issue