2019-11-27 01:46:50 +00:00
|
|
|
require "pstore"
|
|
|
|
|
2019-09-22 02:30:03 +00:00
|
|
|
module Pigeon
|
|
|
|
class Storage
|
2020-04-16 14:05:10 +00:00
|
|
|
attr_reader :path
|
|
|
|
|
|
|
|
def initialize(path: PIGEON_DB_PATH)
|
|
|
|
@path = path
|
2020-04-17 13:55:18 +00:00
|
|
|
store.ultra_safe = true
|
|
|
|
bootstrap
|
2020-03-31 12:46:11 +00:00
|
|
|
end
|
|
|
|
|
2020-04-16 14:05:10 +00:00
|
|
|
def reset
|
2020-04-17 13:55:18 +00:00
|
|
|
File.delete(path) if on_disk?
|
|
|
|
bootstrap
|
2019-12-28 04:44:10 +00:00
|
|
|
end
|
|
|
|
|
2020-04-11 14:42:43 +00:00
|
|
|
def add_peer(identity)
|
|
|
|
write { store[PEER_NS].add(identity) }
|
|
|
|
identity
|
2020-03-06 14:42:19 +00:00
|
|
|
end
|
|
|
|
|
2020-04-11 14:42:43 +00:00
|
|
|
def remove_peer(identity)
|
|
|
|
write { store[PEER_NS].delete(identity) }
|
|
|
|
identity
|
2020-03-26 14:00:17 +00:00
|
|
|
end
|
|
|
|
|
2020-04-11 14:42:43 +00:00
|
|
|
def block_peer(identity)
|
|
|
|
remove_peer(identity)
|
|
|
|
write { store[BLCK_NS].add(identity) }
|
|
|
|
identity
|
2020-03-06 14:42:19 +00:00
|
|
|
end
|
2019-12-05 04:11:04 +00:00
|
|
|
|
2020-04-11 14:42:43 +00:00
|
|
|
def all_peers
|
|
|
|
read { store[PEER_NS].to_a }
|
2020-03-15 16:51:13 +00:00
|
|
|
end
|
|
|
|
|
2020-04-11 14:42:43 +00:00
|
|
|
def all_blocks
|
|
|
|
read { store[BLCK_NS].to_a }
|
2020-04-03 10:19:42 +00:00
|
|
|
end
|
|
|
|
|
2020-04-26 14:18:30 +00:00
|
|
|
def _get_config(key)
|
2020-04-11 14:42:43 +00:00
|
|
|
read { store[CONF_NS][key] }
|
2020-03-15 18:50:00 +00:00
|
|
|
end
|
|
|
|
|
2020-04-26 14:18:30 +00:00
|
|
|
def _add_config(key, value)
|
2020-04-17 13:55:18 +00:00
|
|
|
write do
|
|
|
|
a = store.fetch(CONF_NS)
|
|
|
|
raise "FIX SAVED DRAFTS" if value.instance_variable_get(:@db)
|
2020-04-25 15:11:25 +00:00
|
|
|
|
2020-04-17 13:55:18 +00:00
|
|
|
a[key] = value
|
|
|
|
end
|
2019-09-22 02:30:03 +00:00
|
|
|
end
|
|
|
|
|
2020-04-23 12:21:00 +00:00
|
|
|
def add_blob(data)
|
2020-04-20 13:37:08 +00:00
|
|
|
size = data.bytesize
|
2020-04-25 15:11:25 +00:00
|
|
|
if size > BLOB_BYTE_LIMIT
|
2020-04-20 13:37:08 +00:00
|
|
|
raise "Blob size limit is #{BLOB_BYTE_LIMIT} bytes. Got #{size}"
|
|
|
|
end
|
2020-04-25 15:11:25 +00:00
|
|
|
|
2020-04-02 12:58:57 +00:00
|
|
|
raw_digest = Digest::SHA256.digest(data)
|
2020-04-17 13:55:18 +00:00
|
|
|
b32_hash = Helpers.b32_encode(raw_digest)
|
|
|
|
multihash = [BLOB_SIGIL, b32_hash, BLOB_FOOTER].join("")
|
2020-04-22 13:21:55 +00:00
|
|
|
write_to_disk(multihash, data)
|
2020-03-14 02:59:13 +00:00
|
|
|
multihash
|
2019-09-22 11:00:19 +00:00
|
|
|
end
|
|
|
|
|
2020-03-14 02:59:13 +00:00
|
|
|
def get_blob(blob_multihash)
|
2020-05-07 11:50:07 +00:00
|
|
|
path1 = File.join(Helpers.hash2file_path(blob_multihash))
|
|
|
|
path2 = File.join(PIGEON_BLOB_PATH, path1)
|
|
|
|
File.read(path2) if File.file?(path2)
|
2019-09-22 02:30:03 +00:00
|
|
|
end
|
|
|
|
|
2020-04-11 14:42:43 +00:00
|
|
|
# `nil` means "none"
|
|
|
|
def get_message_count_for(mhash)
|
|
|
|
read { store[COUNT_INDEX_NS][mhash] || 0 }
|
2019-09-23 01:07:14 +00:00
|
|
|
end
|
|
|
|
|
2020-04-23 12:21:00 +00:00
|
|
|
def all_messages(author)
|
2020-04-19 15:16:06 +00:00
|
|
|
if author
|
|
|
|
all = []
|
|
|
|
depth = -1
|
|
|
|
last = ""
|
2020-04-19 21:27:09 +00:00
|
|
|
# TODO: This loop may become unresponsive.
|
2020-04-25 15:11:25 +00:00
|
|
|
until last.nil? || (depth > 99_999)
|
|
|
|
last = get_message_by_depth(author, depth += 1)
|
2020-04-19 15:16:06 +00:00
|
|
|
all.push(last) if last
|
|
|
|
end
|
2020-04-25 15:11:25 +00:00
|
|
|
all
|
2020-04-19 15:16:06 +00:00
|
|
|
else
|
|
|
|
read { store["messages"].keys }
|
2020-04-11 14:42:43 +00:00
|
|
|
end
|
2019-09-23 01:07:14 +00:00
|
|
|
end
|
|
|
|
|
2020-04-11 14:42:43 +00:00
|
|
|
def get_message_by_depth(multihash, depth)
|
|
|
|
# Map<[multihash(str), depth(int)], Signature>
|
|
|
|
key = [multihash, depth].join(".")
|
|
|
|
read { store[MESSAGE_BY_DEPTH_NS][key] }
|
2019-09-23 01:07:14 +00:00
|
|
|
end
|
|
|
|
|
2020-04-11 14:42:43 +00:00
|
|
|
def read_message(multihash)
|
|
|
|
read { store[MESG_NS].fetch(multihash) }
|
|
|
|
end
|
|
|
|
|
2020-04-18 14:59:56 +00:00
|
|
|
def insert_message(msg)
|
2020-04-11 14:42:43 +00:00
|
|
|
write do
|
2020-04-25 15:11:25 +00:00
|
|
|
return msg if store[MESG_NS].fetch(msg.multihash, false)
|
2020-04-20 02:32:37 +00:00
|
|
|
|
|
|
|
if store[BLCK_NS].member?(msg.author.multihash)
|
2020-04-25 15:11:25 +00:00
|
|
|
warn("Blocked peer: #{msg.author.multihash}")
|
2020-04-20 02:32:37 +00:00
|
|
|
return msg
|
|
|
|
end
|
|
|
|
|
2020-04-11 14:42:43 +00:00
|
|
|
insert_and_update_index(msg)
|
|
|
|
msg
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-04-26 13:51:14 +00:00
|
|
|
def have_message?(multihash)
|
2020-04-20 02:32:37 +00:00
|
|
|
read { store[MESG_NS].fetch(multihash, false) }
|
|
|
|
end
|
|
|
|
|
|
|
|
def peer_blocked?(multihash)
|
|
|
|
read { store[BLCK_NS].member?(multihash) }
|
|
|
|
end
|
|
|
|
|
2020-04-24 11:45:59 +00:00
|
|
|
def have_blob?(multihash)
|
|
|
|
path = File.join(PIGEON_BLOB_PATH, Helpers.hash2file_path(multihash))
|
|
|
|
File.file?(path)
|
|
|
|
end
|
|
|
|
|
2020-04-11 14:42:43 +00:00
|
|
|
private
|
|
|
|
|
2020-04-22 13:21:55 +00:00
|
|
|
def write_to_disk(mhash, data)
|
|
|
|
Helpers.write_to_disk(PIGEON_BLOB_PATH, mhash, data)
|
2020-04-20 13:37:08 +00:00
|
|
|
end
|
|
|
|
|
2020-03-14 03:34:29 +00:00
|
|
|
def bootstrap
|
2020-03-29 15:26:41 +00:00
|
|
|
write do
|
2020-04-17 13:55:18 +00:00
|
|
|
store[BLCK_NS] ||= Set.new
|
2020-03-14 03:34:29 +00:00
|
|
|
store[CONF_NS] ||= {}
|
2020-04-17 13:55:18 +00:00
|
|
|
store[COUNT_INDEX_NS] ||= {}
|
2020-03-14 03:34:29 +00:00
|
|
|
store[MESG_NS] ||= {}
|
2020-04-17 13:55:18 +00:00
|
|
|
store[MESSAGE_BY_DEPTH_NS] ||= {}
|
2020-03-14 03:34:29 +00:00
|
|
|
store[PEER_NS] ||= Set.new
|
2019-12-28 04:44:10 +00:00
|
|
|
end
|
2020-04-21 12:31:34 +00:00
|
|
|
Helpers.mkdir_p(PIGEON_BLOB_PATH)
|
2020-03-14 02:59:13 +00:00
|
|
|
store
|
2019-12-28 04:44:10 +00:00
|
|
|
end
|
|
|
|
|
2019-11-27 01:46:50 +00:00
|
|
|
def store
|
2020-04-17 13:55:18 +00:00
|
|
|
@store ||= PStore.new(PIGEON_DB_PATH)
|
2020-03-08 16:18:05 +00:00
|
|
|
end
|
|
|
|
|
2020-03-11 13:43:54 +00:00
|
|
|
def insert_and_update_index(message)
|
2020-04-06 12:22:24 +00:00
|
|
|
pub_key = message.author.multihash
|
2020-03-11 13:43:54 +00:00
|
|
|
# STEP 1: Update MESG_NS, the main storage spot.
|
|
|
|
store[MESG_NS][message.multihash] = message
|
|
|
|
|
|
|
|
# STEP 2: Update the "message by author and depth" index
|
|
|
|
# this index is used to find a person's nth
|
|
|
|
# message
|
2020-03-08 16:35:09 +00:00
|
|
|
# SECURITY AUDIT: How can we be certain the message is
|
|
|
|
# not lying about its depth?
|
2020-03-29 15:26:41 +00:00
|
|
|
key = [pub_key, message.depth].join(".")
|
2020-03-29 00:43:59 +00:00
|
|
|
store[MESSAGE_BY_DEPTH_NS][key] = message.multihash
|
|
|
|
store[COUNT_INDEX_NS][pub_key] ||= 0
|
|
|
|
store[COUNT_INDEX_NS][pub_key] += 1
|
2019-09-25 00:48:02 +00:00
|
|
|
end
|
2020-03-29 15:26:41 +00:00
|
|
|
|
|
|
|
def transaction(is_read_only)
|
|
|
|
store.transaction(is_read_only) { yield }
|
|
|
|
end
|
|
|
|
|
2020-04-25 15:11:25 +00:00
|
|
|
def write(&blk)
|
|
|
|
transaction(false, &blk)
|
|
|
|
end
|
|
|
|
|
|
|
|
def read(&blk)
|
|
|
|
transaction(true, &blk)
|
|
|
|
end
|
|
|
|
|
|
|
|
def on_disk?
|
|
|
|
File.file?(path)
|
|
|
|
end
|
2019-09-22 02:30:03 +00:00
|
|
|
end
|
|
|
|
end
|