288 lines
9.4 KiB
Python
Executable File
288 lines
9.4 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
import requests
|
|
import time
|
|
import json
|
|
import pint
|
|
import random
|
|
from typing import Optional
|
|
from mastodon import Mastodon
|
|
|
|
ureg = pint.UnitRegistry()
|
|
max_food_weight = 100000 # in mg
|
|
max_food_length = 1000 # in mm
|
|
max_food_volume = 2000 # in ml
|
|
|
|
meme = "*finishes {}* wow. that was truly the {} of {}."
|
|
|
|
'''
|
|
Mastodon.create_app(
|
|
'minecraftofbots',
|
|
api_base_url = 'https://botsin.space',
|
|
to_file = 'pytooter_clientcred.secret'
|
|
)
|
|
'''
|
|
|
|
bot_password=open('./bot_password', 'r').readlines()[0].strip('\n')
|
|
mastodon = Mastodon(
|
|
client_id = 'pytooter_clientcred.secret',
|
|
api_base_url = 'https://botsin.space'
|
|
)
|
|
mastodon.log_in(
|
|
'sose-iwnl+mcofbots@protonmail.com',
|
|
bot_password,
|
|
to_file = 'pytooter_usercred.secret'
|
|
)
|
|
|
|
mastodon = Mastodon(
|
|
access_token = 'pytooter_usercred.secret',
|
|
api_base_url = 'https://botsin.space'
|
|
)
|
|
|
|
def sleep(seconds):
|
|
for i in range(seconds):
|
|
try:
|
|
time.sleep(1)
|
|
except KeyboardInterrupt:
|
|
print("killed")
|
|
exit()
|
|
break
|
|
|
|
def get_more_words():
|
|
# we're doing these 500 at a time so we can get some semblance of
|
|
# randomness
|
|
max_words: str = "500"
|
|
new_continue_point: str
|
|
|
|
with open("./continue_file", "r") as continue_file:
|
|
continue_point = continue_file.read()
|
|
|
|
if continue_point.strip() != "":
|
|
url: str = ("https://en.wiktionary.org/w/api.php?action=query"
|
|
+ "&format=json"
|
|
+ "&generator=categorymembers"
|
|
+ "&gcmnamespace=0"
|
|
+ "&gcmtitle=Category:English_uncountable_nouns"
|
|
+ "&gcmsort=timestamp"
|
|
+ "&gcmlimit=" + max_words
|
|
+ "&gcmcontinue=" + continue_point)
|
|
else:
|
|
url: str = ("https://en.wiktionary.org/w/api.php?action=query"
|
|
+ "&format=json"
|
|
+ "&generator=categorymembers"
|
|
+ "&gcmnamespace=0"
|
|
+ "&gcmtitle=Category:English_uncountable_nouns"
|
|
+ "&gcmsort=timestamp"
|
|
+ "&gcmlimit=" + max_words)
|
|
|
|
response: requests.Response = requests.get(url)
|
|
response_json: dict
|
|
words: list
|
|
|
|
if response.ok:
|
|
response_json = response.json()
|
|
else:
|
|
print("request failed with status code {}: {}"
|
|
.format(response.status_code, response.reason))
|
|
return False
|
|
|
|
new_continue_point = response_json["continue"]["gcmcontinue"]
|
|
words = response_json["query"]["pages"].values()
|
|
words = list(map(lambda x: x["title"], words))
|
|
|
|
with open("./good_words", "a") as good_words_file:
|
|
for word in words:
|
|
good_words_file.write(word + "\n")
|
|
print("wrote new words to file")
|
|
|
|
with open("./continue_file", "w") as continue_file:
|
|
continue_file.write(new_continue_point)
|
|
print("set to continue from " + new_continue_point)
|
|
|
|
def get_a_food_action():
|
|
food_item_offset: str
|
|
with open("./food_item_offset", "r+") as food_item_offset_file:
|
|
food_item_offset = food_item_offset_file.read().strip()
|
|
|
|
food_item_url: str = ("https://api.conceptnet.io/query"
|
|
+ "?end=/c/en/food"
|
|
+ "&rel=/r/IsA"
|
|
+ "&offset=" + food_item_offset
|
|
+ "&limit=1")
|
|
|
|
|
|
print("getting " + food_item_url)
|
|
response: requests.Response = requests.get(food_item_url)
|
|
food_item: str
|
|
|
|
if response.ok:
|
|
food_item = response.json()["edges"][0]["start"]["label"]
|
|
else:
|
|
print("request failed with status code {}: {}"
|
|
.format(response.status_code, response.reason))
|
|
return None
|
|
with open("./units", "r") as units_file:
|
|
lines: list = units_file.readlines()
|
|
unit: str = random.choice(lines)
|
|
amount: pint.Quantity
|
|
if unit[0] == "m":
|
|
amount = random.randrange(1, max_food_weight) * ureg.milligram
|
|
elif unit[0] == "v":
|
|
amount = random.randrange(1, max_food_volume) * ureg.milliliter
|
|
elif unit[0] == "l":
|
|
amount = random.randrange(1, max_food_length) * ureg.millimeter
|
|
|
|
amount = amount.to(ureg("".join(unit.strip().split()[1])))
|
|
amount = ureg.Quantity(round(amount.m, 2), amount.u)
|
|
|
|
with open("./food_item_offset", "w") as food_item_offset_file:
|
|
food_item_offset_file.write(str(int(food_item_offset) + 1))
|
|
|
|
return("eating a " + str(amount).replace("_", " ") + " " + food_item)
|
|
|
|
def get_an_action():
|
|
action_offset: str
|
|
with open("./action_offset", "r") as action_offset_file:
|
|
action_offset = action_offset_file.read().strip()
|
|
|
|
action_url: str = ("https://api.conceptnet.io/query?start=/c/en/person"
|
|
+ "&rel=/r/CapableOf"
|
|
+ "&offset=" + action_offset
|
|
+ "&limit=1")
|
|
|
|
conjugation_url: str = ("https://lt-nlgservice.herokuapp.com"
|
|
+ "/rest/english/conjugate"
|
|
+ "?verb=")
|
|
|
|
print("getting " + action_url)
|
|
response: requests.Response = requests.get(action_url)
|
|
unconjugated_action: str
|
|
conjugated_action: str
|
|
|
|
if response.ok:
|
|
unconjugated_action = response.json()["edges"][0]["end"]["label"]
|
|
else:
|
|
print("request failed with status code {}: {}"
|
|
.format(response.status_code, response.reason))
|
|
return None
|
|
|
|
conjugation_url += unconjugated_action
|
|
print("getting " + conjugation_url)
|
|
response = requests.get(conjugation_url)
|
|
|
|
if response.ok:
|
|
if response.json()["result"] != "OK":
|
|
print("couldn't conjugate " + unconjugated_action)
|
|
with open("./action_offset", "w") as action_offset_file:
|
|
action_offset_file.write(str(int(action_offset) + 1))
|
|
return None
|
|
conjugated_action = response.json()["conjugation_tables"]["indicative"][5]["forms"][0][1]
|
|
conjugated_action = " ".join(conjugated_action.split()[1:])
|
|
else:
|
|
print("request failed with status code {}: {}"
|
|
.format(response.status_code, response.reason))
|
|
return None
|
|
|
|
with open("./action_offset", "w") as action_offset_file:
|
|
action_offset_file.write(str(int(action_offset) + 1))
|
|
|
|
return conjugated_action
|
|
|
|
def get_more_words():
|
|
# we're doing these 500 at a time so we can get some semblance of
|
|
# randomness
|
|
max_words: str = "500"
|
|
new_continue_point: str
|
|
|
|
with open("./continue_file", "r") as continue_file:
|
|
continue_point = continue_file.read()
|
|
|
|
if continue_point.strip() != "":
|
|
url: str = ("https://en.wiktionary.org/w/api.php?action=query"
|
|
+ "&format=json"
|
|
+ "&generator=categorymembers"
|
|
+ "&gcmnamespace=0"
|
|
+ "&gcmtitle=Category:English_uncountable_nouns"
|
|
+ "&gcmsort=timestamp"
|
|
+ "&gcmlimit=" + max_words
|
|
+ "&gcmcontinue=" + continue_point)
|
|
else:
|
|
url: str = ("https://en.wiktionary.org/w/api.php?action=query"
|
|
+ "&format=json"
|
|
+ "&generator=categorymembers"
|
|
+ "&gcmnamespace=0"
|
|
+ "&gcmtitle=Category:English_uncountable_nouns"
|
|
+ "&gcmsort=timestamp"
|
|
+ "&gcmlimit=" + max_words)
|
|
|
|
response: requests.Response = requests.get(url)
|
|
response_json: dict
|
|
words: list
|
|
|
|
if response.ok:
|
|
response_json = response.json()
|
|
else:
|
|
print("request failed with status code {}: {}"
|
|
.format(response.status_code, response.reason))
|
|
return None
|
|
|
|
new_continue_point = response_json["continue"]["gcmcontinue"]
|
|
words = response_json["query"]["pages"].values()
|
|
words = list(map(lambda x: x["title"], words))
|
|
|
|
with open("./good_words", "a") as good_words_file:
|
|
for word in words:
|
|
good_words_file.write(word + "\n")
|
|
print("wrote new words to file")
|
|
|
|
with open("./continue_file", "w") as continue_file:
|
|
continue_file.write(new_continue_point)
|
|
print("set to continue from " + new_continue_point)
|
|
|
|
def get_a_noun():
|
|
words: list
|
|
word: str
|
|
with open("./good_words", "r") as good_words_file:
|
|
words = good_words_file.readlines()
|
|
word = random.choice(words)
|
|
words.pop(words.index(word))
|
|
|
|
|
|
with open("./good_words", "w") as good_words_file:
|
|
good_words_file.writelines(words)
|
|
|
|
with open("./bad_words", "a") as bad_words_file:
|
|
bad_words_file.write(word)
|
|
|
|
if len(words) == 0:
|
|
print("out of words, getting more words...")
|
|
get_more_words()
|
|
|
|
word = word.strip()
|
|
print("chose " + word)
|
|
return word
|
|
|
|
while True:
|
|
print("forming another toot...")
|
|
random_number: int = random.randint(0, 1)
|
|
action: Optional[str]
|
|
if random_number == 0:
|
|
action = get_an_action()
|
|
if action is None:
|
|
print("action failed, continuing...")
|
|
continue
|
|
noun1 = get_a_noun()
|
|
noun2 = get_a_noun()
|
|
mastodon.toot(meme.format(action, noun1, noun2))
|
|
print("tooted " + meme.format(action, noun1, noun2))
|
|
elif random_number == 1:
|
|
action = get_a_food_action()
|
|
if action is None:
|
|
print("action failed, continuing...")
|
|
continue
|
|
noun1 = get_a_noun()
|
|
noun2 = get_a_noun()
|
|
mastodon.toot(meme.format(action, noun1, noun2).lower())
|
|
print("tooted " + meme.format(action, noun1, noun2).lower())
|
|
|
|
sleep(43200) # toot twice a day
|