Add blobs + peer messages to bundle

This commit is contained in:
Netscape Navigator 2020-04-22 08:21:55 -05:00
parent 4f38f12a1a
commit a3c4d86025
5 changed files with 46 additions and 37 deletions

1
.gitignore vendored
View File

@ -5,3 +5,4 @@ doc/
*scratchpad*
pigeon.bundle
*.gem
bundle/

View File

@ -89,16 +89,19 @@ TODO
- [X] Add log count to `pigeon-cli status`
- [X] Delete `Draft#put` entirely.
- [X] Check block list before ingesting bundles.
- [ ] Need a way of importing / exporting a feeds blobs. (see "Bundle Brainstorming" below)
- [X] Need a way of importing / exporting a feeds blobs. (see "Bundle Brainstorming" below)
- [X] Need a way of adding peers messages / gossip to bundles. (see "Bundle Brainstorming" below)
- [ ] Update README.md / tutorial.rb (user manual for `Pigeon::Database`).
- [ ] Make the switch to LevelDB, RocksDB, [UNQLite](https://unqlite.org/features.html) or similar (currently using Ruby PStore).
- [ ] Need a way of adding peers messages / gossip to bundles. (see "Bundle Brainstorming" below)
- [ ] Update spec document CLI usage examples to reflect API changes in 2020.
- [ ] Publish to RubyGems
# Optimizations
- [ ] add parsers and validators for all CLI inputs
- [ ] Make the switch to LevelDB, RocksDB, [UNQLite](https://unqlite.org/features.html) or similar (currently using Ruby PStore).
- [ ] Convert literals to constants, remove unused locals, reduce duplication.
- [ ] Reduce whole darn repo into single module to aide portability. `::Helpers` module is OK.
- [ ] Update the bundles.md document once `bundle consume` works.
- [ ] 100% documentation
- [ ] Update spec document CLI usage examples to reflect API changes in 2020.
- [ ] Publish to RubyGems
- [ ] Performance benchmarks (Do this second to last!)
- [ ] Performance tuning (Do this last!)
@ -128,7 +131,7 @@ Here's how we will support that:
* All messages are expected to be sorted by depth
* Messages from multiple authors may be included in a single bundle, but the messages must appear in the correct order with regards to the `depth` field.
3. Blobs are stored in a very specific hierarchy to maintain FAT compatibility:
* `blobs/sha256/AAAAAAAA/BBBBBBBB/CCCCCCCC/DDDDDDDD/EEEEEEEE/FFFFFFFF/G.HHH`
* `blobs/bundle/7Z2CSZK/MB1RE5G/6SKXRZ6/3ZGCNP8/VVEM3K0/XFMYKET/RDQSM5W.BSG`
Additional notes:

View File

@ -210,15 +210,32 @@ module Pigeon
Dir.mkdir(path) unless Dir.exists?(path)
end
def self.split_file_path(b32_hash)
def self.write_to_disk(base_path, mhash, data)
p = Helpers.hash2file_path(mhash)
file_name = p.pop
Helpers.mkdir_p(base_path)
dir = p.reduce(base_path) do |accum, item|
path = File.join(accum, item)
Helpers.mkdir_p(path)
path
end
full_path = File.join(dir, file_name)
unless File.file?(full_path)
File.write(full_path, data)
end
end
def self.hash2file_path(mhash)
mhash = mhash.sub("&", "")
[
b32_hash[0],
b32_hash[1...9],
b32_hash[9...17],
b32_hash[17...25],
b32_hash[25...33],
b32_hash[33...41],
[b32_hash[41...49], ".", b32_hash[49...52]].join(""),
mhash[0...7],
mhash[7...14],
mhash[14...21],
mhash[21...28],
mhash[28...35],
mhash[35...42],
[mhash[42...49], ".", mhash[49...52]].join(""),
]
end

View File

@ -106,15 +106,14 @@ module Pigeon
content = messages
.map { |message| message.render }
.join(BUNDLE_MESSAGE_SEPARATOR)
# MKdir
Helpers.mkdir_p("bundle")
# Get blobs for _all_ peers
blobs = messages.map(&:collect_blobs).flatten.uniq
# binding.pry if blobs.any?
# Write bundle to dir
# Link blobs to dir
File.write(file_path, content + CR)
messages
.map(&:collect_blobs)
.flatten
.uniq
.map { |mhash| ["bundle", mhash, get_blob(mhash)] }
.map { |arg| Helpers.write_to_disk(*arg) }
end
def ingest_bundle(file_path = DEFAULT_BUNDLE_PATH)

View File

@ -59,12 +59,12 @@ module Pigeon
raw_digest = Digest::SHA256.digest(data)
b32_hash = Helpers.b32_encode(raw_digest)
multihash = [BLOB_SIGIL, b32_hash, BLOB_FOOTER].join("")
write_to_disk(b32_hash, data)
write_to_disk(multihash, data)
multihash
end
def get_blob(blob_multihash)
path = File.join(Helpers.split_file_path(blob_multihash[1..52]))
path = File.join(Helpers.hash2file_path(blob_multihash))
path = File.join(PIGEON_BLOB_PATH, path)
if File.file?(path)
File.read(path)
@ -132,23 +132,12 @@ module Pigeon
private
def write_to_disk(b32_hash, data)
p = Helpers.split_file_path(b32_hash)
file_name = p.pop
dir = p.reduce(PIGEON_BLOB_PATH) do |accum, item|
path = File.join(accum, item)
Helpers.mkdir_p(path)
path
end
full_path = File.join(dir, file_name)
unless File.file?(full_path)
File.write(full_path, data)
end
def write_to_disk(mhash, data)
Helpers.write_to_disk(PIGEON_BLOB_PATH, mhash, data)
end
def bootstrap
write do
# TODO: Why is there a depth and count index??
store[BLCK_NS] ||= Set.new
store[CONF_NS] ||= {}
store[COUNT_INDEX_NS] ||= {}