feat: custom gossip implementation (#181)

* .

* rename global to app_state

* refactor event tracker

* gossip

* .

* .
This commit is contained in:
reya
2025-10-10 17:36:38 +07:00
committed by GitHub
parent b7693444e6
commit 68a8ec7a69
34 changed files with 1020 additions and 913 deletions

View File

@@ -0,0 +1,17 @@
[package]
name = "app_state"
version.workspace = true
edition.workspace = true
publish.workspace = true
[dependencies]
nostr-sdk.workspace = true
nostr-lmdb.workspace = true
dirs.workspace = true
smol.workspace = true
flume.workspace = true
log.workspace = true
anyhow.workspace = true
whoami = "1.5.2"
rustls = "0.23.23"

View File

@@ -0,0 +1,61 @@
pub const APP_NAME: &str = "Coop";
pub const APP_ID: &str = "su.reya.coop";
pub const APP_PUBKEY: &str = "dW50cnVzdGVkIGNvbW1lbnQ6IG1pbmlzaWduIHB1YmxpYyBrZXk6IDc4MkNFRkQ2RkVGQURGNzUKUldSMTMvcisxdThzZUZraHc4Vno3NVNJek81VkJFUEV3MkJweGFxQXhpekdSU1JIekpqMG4yemMK";
pub const APP_UPDATER_ENDPOINT: &str = "https://coop-updater.reya.su/";
pub const KEYRING_URL: &str = "Coop Safe Storage";
pub const ACCOUNT_IDENTIFIER: &str = "coop:user";
pub const SETTINGS_IDENTIFIER: &str = "coop:settings";
/// Bootstrap Relays.
pub const BOOTSTRAP_RELAYS: [&str; 5] = [
"wss://relay.damus.io",
"wss://relay.primal.net",
"wss://relay.nos.social",
"wss://user.kindpag.es",
"wss://purplepag.es",
];
/// Search Relays.
pub const SEARCH_RELAYS: [&str; 1] = ["wss://relay.nostr.band"];
/// NIP65 Relays. Used for new account
pub const NIP65_RELAYS: [&str; 4] = [
"wss://relay.damus.io",
"wss://relay.primal.net",
"wss://relay.nostr.net",
"wss://nos.lol",
];
/// Messaging Relays. Used for new account
pub const NIP17_RELAYS: [&str; 2] = ["wss://nip17.com", "wss://auth.nostr1.com"];
/// Default relay for Nostr Connect
pub const NOSTR_CONNECT_RELAY: &str = "wss://relay.nsec.app";
/// Default retry count for fetching NIP-17 relays
pub const RELAY_RETRY: u64 = 2;
/// Default retry count for sending messages
pub const SEND_RETRY: u64 = 10;
/// Default timeout (in seconds) for Nostr Connect
pub const NOSTR_CONNECT_TIMEOUT: u64 = 200;
/// Default timeout (in seconds) for Nostr Connect (Bunker)
pub const BUNKER_TIMEOUT: u64 = 30;
/// Total metadata requests will be grouped.
pub const METADATA_BATCH_LIMIT: usize = 100;
/// Maximum timeout for grouping metadata requests. (milliseconds)
pub const METADATA_BATCH_TIMEOUT: u64 = 300;
/// Default width of the sidebar.
pub const DEFAULT_SIDEBAR_WIDTH: f32 = 240.;
/// Image Resize Service
pub const IMAGE_RESIZE_SERVICE: &str = "https://wsrv.nl";
/// Default NIP96 Media Server.
pub const NIP96_SERVER: &str = "https://nostrmedia.com";

View File

@@ -0,0 +1,44 @@
use std::sync::OnceLock;
use std::time::Duration;
use nostr_lmdb::NostrLMDB;
use nostr_sdk::prelude::*;
use paths::nostr_file;
use crate::state::AppState;
pub mod constants;
pub mod paths;
pub mod state;
static APP_STATE: OnceLock<AppState> = OnceLock::new();
static NOSTR_CLIENT: OnceLock<Client> = OnceLock::new();
/// Initialize the application state.
pub fn app_state() -> &'static AppState {
APP_STATE.get_or_init(AppState::new)
}
/// Initialize the nostr client.
pub fn nostr_client() -> &'static Client {
NOSTR_CLIENT.get_or_init(|| {
// rustls uses the `aws_lc_rs` provider by default
// This only errors if the default provider has already
// been installed. We can ignore this `Result`.
rustls::crypto::aws_lc_rs::default_provider()
.install_default()
.ok();
let lmdb = NostrLMDB::open(nostr_file()).expect("Database is NOT initialized");
let opts = ClientOptions::new()
.gossip(false)
.automatic_authentication(false)
.verify_subscriptions(false)
.sleep_when_idle(SleepWhenIdle::Enabled {
timeout: Duration::from_secs(600),
});
ClientBuilder::default().database(lmdb).opts(opts).build()
})
}

View File

@@ -0,0 +1,64 @@
use std::path::PathBuf;
use std::sync::OnceLock;
/// Returns the path to the user's home directory.
pub fn home_dir() -> &'static PathBuf {
static HOME_DIR: OnceLock<PathBuf> = OnceLock::new();
HOME_DIR.get_or_init(|| dirs::home_dir().expect("failed to determine home directory"))
}
/// Returns the path to the configuration directory used by Coop.
pub fn config_dir() -> &'static PathBuf {
static CONFIG_DIR: OnceLock<PathBuf> = OnceLock::new();
CONFIG_DIR.get_or_init(|| {
if cfg!(target_os = "windows") {
return dirs::config_dir()
.expect("failed to determine RoamingAppData directory")
.join("Coop");
}
if cfg!(any(target_os = "linux", target_os = "freebsd")) {
return if let Ok(flatpak_xdg_config) = std::env::var("FLATPAK_XDG_CONFIG_HOME") {
flatpak_xdg_config.into()
} else {
dirs::config_dir().expect("failed to determine XDG_CONFIG_HOME directory")
}
.join("coop");
}
home_dir().join(".config").join("coop")
})
}
/// Returns the path to the support directory used by Coop.
pub fn support_dir() -> &'static PathBuf {
static SUPPORT_DIR: OnceLock<PathBuf> = OnceLock::new();
SUPPORT_DIR.get_or_init(|| {
if cfg!(target_os = "macos") {
return home_dir().join("Library/Application Support/Coop");
}
if cfg!(any(target_os = "linux", target_os = "freebsd")) {
return if let Ok(flatpak_xdg_data) = std::env::var("FLATPAK_XDG_DATA_HOME") {
flatpak_xdg_data.into()
} else {
dirs::data_local_dir().expect("failed to determine XDG_DATA_HOME directory")
}
.join("coop");
}
if cfg!(target_os = "windows") {
return dirs::data_local_dir()
.expect("failed to determine LocalAppData directory")
.join("coop");
}
config_dir().clone()
})
}
/// Returns the path to the `nostr` file.
pub fn nostr_file() -> &'static PathBuf {
static NOSTR_FILE: OnceLock<PathBuf> = OnceLock::new();
NOSTR_FILE.get_or_init(|| support_dir().join("nostr-db"))
}

View File

@@ -0,0 +1,217 @@
use std::collections::{HashMap, HashSet};
use std::time::Duration;
use anyhow::{anyhow, Error};
use nostr_sdk::prelude::*;
use crate::constants::BOOTSTRAP_RELAYS;
use crate::state::SignalKind;
use crate::{app_state, nostr_client};
#[derive(Debug, Clone, Default)]
pub struct Gossip {
pub nip17: HashMap<PublicKey, HashSet<RelayUrl>>,
pub nip65: HashMap<PublicKey, HashSet<(RelayUrl, Option<RelayMetadata>)>>,
}
impl Gossip {
pub fn insert(&mut self, event: &Event) {
match event.kind {
Kind::InboxRelays => {
let urls: Vec<RelayUrl> = nip17::extract_relay_list(event).cloned().collect();
if !urls.is_empty() {
self.nip17.entry(event.pubkey).or_default().extend(urls);
}
}
Kind::RelayList => {
let urls: Vec<(RelayUrl, Option<RelayMetadata>)> = nip65::extract_relay_list(event)
.map(|(url, metadata)| (url.to_owned(), metadata.to_owned()))
.collect();
if !urls.is_empty() {
self.nip65.entry(event.pubkey).or_default().extend(urls);
}
}
_ => {}
}
}
pub fn write_relays(&self, public_key: &PublicKey) -> Vec<&RelayUrl> {
self.nip65
.get(public_key)
.map(|relays| {
relays
.iter()
.filter(|(_, metadata)| metadata.as_ref() != Some(&RelayMetadata::Write))
.map(|(url, _)| url)
.take(3)
.collect()
})
.unwrap_or_default()
}
pub fn read_relays(&self, public_key: &PublicKey) -> Vec<&RelayUrl> {
self.nip65
.get(public_key)
.map(|relays| {
relays
.iter()
.filter(|(_, metadata)| metadata.as_ref() != Some(&RelayMetadata::Read))
.map(|(url, _)| url)
.take(3)
.collect()
})
.unwrap_or_default()
}
pub fn messaging_relays(&self, public_key: &PublicKey) -> Vec<&RelayUrl> {
self.nip17
.get(public_key)
.map(|relays| relays.iter().collect())
.unwrap_or_default()
}
pub async fn get_nip65(&mut self, public_key: PublicKey) -> Result<(), Error> {
let client = nostr_client();
let timeout = Duration::from_secs(5);
let opts = SubscribeAutoCloseOptions::default().exit_policy(ReqExitPolicy::ExitOnEOSE);
let filter = Filter::new()
.kind(Kind::RelayList)
.author(public_key)
.limit(1);
// Subscribe to events from the bootstrapping relays
client
.subscribe_to(BOOTSTRAP_RELAYS, filter.clone(), Some(opts))
.await?;
// Verify the received data after a timeout
smol::spawn(async move {
smol::Timer::after(timeout).await;
if client.database().count(filter).await.unwrap_or(0) < 1 {
app_state()
.signal
.send(SignalKind::GossipRelaysNotFound)
.await;
}
})
.detach();
Ok(())
}
pub async fn get_nip17(&mut self, public_key: PublicKey) -> Result<(), Error> {
let client = nostr_client();
let timeout = Duration::from_secs(5);
let opts = SubscribeAutoCloseOptions::default().exit_policy(ReqExitPolicy::ExitOnEOSE);
let filter = Filter::new()
.kind(Kind::InboxRelays)
.author(public_key)
.limit(1);
let urls = self.write_relays(&public_key);
// Ensure user's have at least one write relay
if urls.is_empty() {
return Err(anyhow!("NIP-17 relays are empty"));
}
// Ensure connection to relays
for url in urls.iter().cloned() {
client.add_relay(url).await?;
client.connect_relay(url).await?;
}
// Subscribe to events from the bootstrapping relays
client
.subscribe_to(urls, filter.clone(), Some(opts))
.await?;
// Verify the received data after a timeout
smol::spawn(async move {
smol::Timer::after(timeout).await;
if client.database().count(filter).await.unwrap_or(0) < 1 {
app_state()
.signal
.send(SignalKind::MessagingRelaysNotFound)
.await;
}
})
.detach();
Ok(())
}
pub async fn subscribe(&mut self, public_key: PublicKey, kind: Kind) -> Result<(), Error> {
let client = nostr_client();
let opts = SubscribeAutoCloseOptions::default().exit_policy(ReqExitPolicy::ExitOnEOSE);
let filter = Filter::new().author(public_key).kind(kind).limit(1);
let urls = self.write_relays(&public_key);
// Ensure user's have at least one write relay
if urls.is_empty() {
return Err(anyhow!("NIP-65 relays are empty"));
}
// Ensure connection to relays
for url in urls.iter().cloned() {
client.add_relay(url).await?;
client.connect_relay(url).await?;
}
// Subscribe to filters to user's write relays
client.subscribe_to(urls, filter, Some(opts)).await?;
Ok(())
}
pub async fn bulk_subscribe(&mut self, public_keys: HashSet<PublicKey>) -> Result<(), Error> {
if public_keys.is_empty() {
return Err(anyhow!("You need at least one public key"));
}
let client = nostr_client();
let opts = SubscribeAutoCloseOptions::default().exit_policy(ReqExitPolicy::ExitOnEOSE);
let kinds = vec![Kind::Metadata, Kind::ContactList, Kind::RelayList];
let limit = public_keys.len() * kinds.len() + 20;
let filter = Filter::new().authors(public_keys).kinds(kinds).limit(limit);
let urls = BOOTSTRAP_RELAYS;
// Subscribe to filters to the bootstrap relays
client.subscribe_to(urls, filter, Some(opts)).await?;
Ok(())
}
/// Monitor all gift wrap events in the messaging relays for a given public key
pub async fn monitor_inbox(&mut self, public_key: PublicKey) -> Result<(), Error> {
let client = nostr_client();
let id = SubscriptionId::new("inbox");
let filter = Filter::new().kind(Kind::GiftWrap).pubkey(public_key);
let urls = self.messaging_relays(&public_key);
// Ensure user's have at least one messaging relay
if urls.is_empty() {
return Err(anyhow!("Messaging relays are empty"));
}
// Ensure connection to relays
for url in urls.iter().cloned() {
client.add_relay(url).await?;
client.connect_relay(url).await?;
}
// Subscribe to filters to user's messaging relays
client.subscribe_with_id_to(urls, id, filter, None).await?;
Ok(())
}
}

View File

@@ -0,0 +1,516 @@
use std::borrow::Cow;
use std::collections::{HashMap, HashSet};
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Duration;
use anyhow::{anyhow, Error};
use flume::{Receiver, Sender};
use nostr_sdk::prelude::*;
use smol::lock::RwLock;
use crate::constants::{
BOOTSTRAP_RELAYS, METADATA_BATCH_LIMIT, METADATA_BATCH_TIMEOUT, SEARCH_RELAYS,
};
use crate::nostr_client;
use crate::paths::support_dir;
use crate::state::gossip::Gossip;
pub mod gossip;
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct AuthRequest {
pub url: RelayUrl,
pub challenge: String,
pub sending: bool,
}
impl AuthRequest {
pub fn new(challenge: impl Into<String>, url: RelayUrl) -> Self {
Self {
challenge: challenge.into(),
sending: false,
url,
}
}
}
#[derive(Debug, Clone, Default, PartialEq, Eq, PartialOrd, Ord)]
pub enum UnwrappingStatus {
#[default]
Initialized,
Processing,
Complete,
}
/// Signals sent through the global event channel to notify UI
#[derive(Debug)]
pub enum SignalKind {
/// A signal to notify UI that the client's signer has been set
SignerSet(PublicKey),
/// A signal to notify UI that the client's signer has been unset
SignerUnset,
/// A signal to notify UI that the relay requires authentication
Auth(AuthRequest),
/// A signal to notify UI that the browser proxy service is down
ProxyDown,
/// A signal to notify UI that a new profile has been received
NewProfile(Profile),
/// A signal to notify UI that a new gift wrap event has been received
NewMessage((EventId, Event)),
/// A signal to notify UI that no messaging relays for current user was found
MessagingRelaysNotFound,
/// A signal to notify UI that no gossip relays for current user was found
GossipRelaysNotFound,
/// A signal to notify UI that gift wrap status has changed
GiftWrapStatus(UnwrappingStatus),
}
#[derive(Debug)]
pub struct Signal {
rx: Receiver<SignalKind>,
tx: Sender<SignalKind>,
}
impl Default for Signal {
fn default() -> Self {
Self::new()
}
}
impl Signal {
pub fn new() -> Self {
let (tx, rx) = flume::bounded::<SignalKind>(2048);
Self { rx, tx }
}
pub fn receiver(&self) -> &Receiver<SignalKind> {
&self.rx
}
pub async fn send(&self, kind: SignalKind) {
if let Err(e) = self.tx.send_async(kind).await {
log::error!("Failed to send signal: {e}");
}
}
}
#[derive(Debug)]
pub struct Ingester {
rx: Receiver<PublicKey>,
tx: Sender<PublicKey>,
}
impl Default for Ingester {
fn default() -> Self {
Self::new()
}
}
impl Ingester {
pub fn new() -> Self {
let (tx, rx) = flume::bounded::<PublicKey>(1024);
Self { rx, tx }
}
pub fn receiver(&self) -> &Receiver<PublicKey> {
&self.rx
}
pub async fn send(&self, public_key: PublicKey) {
if let Err(e) = self.tx.send_async(public_key).await {
log::error!("Failed to send public key: {e}");
}
}
}
#[derive(Debug, Clone, Default)]
pub struct EventTracker {
/// Tracking events that have been resent by Coop in the current session
pub resent_ids: Vec<Output<EventId>>,
/// Temporarily store events that need to be resent later
pub resend_queue: HashMap<EventId, RelayUrl>,
/// Tracking events sent by Coop in the current session
pub sent_ids: HashSet<EventId>,
/// Tracking events seen on which relays in the current session
pub seen_on_relays: HashMap<EventId, HashSet<RelayUrl>>,
}
impl EventTracker {
pub fn resent_ids(&self) -> &Vec<Output<EventId>> {
&self.resent_ids
}
pub fn resend_queue(&self) -> &HashMap<EventId, RelayUrl> {
&self.resend_queue
}
pub fn sent_ids(&self) -> &HashSet<EventId> {
&self.sent_ids
}
pub fn seen_on_relays(&self) -> &HashMap<EventId, HashSet<RelayUrl>> {
&self.seen_on_relays
}
}
/// A simple storage to store all states that using across the application.
#[derive(Debug)]
pub struct AppState {
/// The timestamp when the application was initialized.
pub initialized_at: Timestamp,
/// Whether this is the first run of the application.
pub is_first_run: AtomicBool,
/// Whether gift wrap processing is in progress.
pub gift_wrap_processing: AtomicBool,
/// Subscription ID for listening to gift wrap events from relays.
pub gift_wrap_sub_id: SubscriptionId,
/// Auto-close options for relay subscriptions
pub auto_close_opts: Option<SubscribeAutoCloseOptions>,
/// NIP-65: https://github.com/nostr-protocol/nips/blob/master/65.md
pub gossip: RwLock<Gossip>,
/// Tracks activity related to Nostr events
pub event_tracker: RwLock<EventTracker>,
/// Signal channel for communication between Nostr and GPUI
pub signal: Signal,
/// Ingester channel for processing public keys
pub ingester: Ingester,
}
impl Default for AppState {
fn default() -> Self {
Self::new()
}
}
impl AppState {
pub fn new() -> Self {
let first_run = Self::first_run();
let initialized_at = Timestamp::now();
let opts = SubscribeAutoCloseOptions::default().exit_policy(ReqExitPolicy::ExitOnEOSE);
let signal = Signal::default();
let ingester = Ingester::default();
Self {
initialized_at,
signal,
ingester,
is_first_run: AtomicBool::new(first_run),
gift_wrap_sub_id: SubscriptionId::new("inbox"),
gift_wrap_processing: AtomicBool::new(false),
auto_close_opts: Some(opts),
gossip: RwLock::new(Gossip::default()),
event_tracker: RwLock::new(EventTracker::default()),
}
}
pub async fn handle_notifications(&self) -> Result<(), Error> {
let client = nostr_client();
// Get all bootstrapping relays
let mut urls = vec![];
urls.extend(BOOTSTRAP_RELAYS);
urls.extend(SEARCH_RELAYS);
// Add relay to the relay pool
for url in urls.into_iter() {
client.add_relay(url).await?;
}
// Establish connection to relays
client.connect().await;
let mut processed_events: HashSet<EventId> = HashSet::new();
let mut challenges: HashSet<Cow<'_, str>> = HashSet::new();
let mut notifications = client.notifications();
while let Ok(notification) = notifications.recv().await {
let RelayPoolNotification::Message { message, relay_url } = notification else {
continue;
};
match message {
RelayMessage::Event { event, .. } => {
// Keep track of which relays have seen this event
{
let mut event_tracker = self.event_tracker.write().await;
event_tracker
.seen_on_relays
.entry(event.id)
.or_default()
.insert(relay_url);
}
// Skip events that have already been processed
if !processed_events.insert(event.id) {
continue;
}
match event.kind {
Kind::RelayList => {
let mut gossip = self.gossip.write().await;
let is_self_authored = Self::is_self_authored(&event).await;
// Update NIP-65 relays for event's public key
gossip.insert(&event);
// Get events if relay list belongs to current user
if is_self_authored {
// Fetch user's metadata event
gossip.subscribe(event.pubkey, Kind::Metadata).await.ok();
// Fetch user's contact list event
gossip.subscribe(event.pubkey, Kind::ContactList).await.ok();
// Fetch user's messaging relays event
gossip.get_nip17(event.pubkey).await.ok();
}
}
Kind::InboxRelays => {
let mut gossip = self.gossip.write().await;
let is_self_authored = Self::is_self_authored(&event).await;
// Update NIP-17 relays for event's public key
gossip.insert(&event);
// Subscribe to gift wrap events if messaging relays belong to the current user
if is_self_authored {
if let Err(e) = gossip.monitor_inbox(event.pubkey).await {
log::error!("Error: {e}");
self.signal.send(SignalKind::MessagingRelaysNotFound).await;
}
}
}
Kind::ContactList => {
let is_self_authored = Self::is_self_authored(&event).await;
if is_self_authored {
let mut gossip = self.gossip.write().await;
let public_keys: HashSet<PublicKey> =
event.tags.public_keys().copied().collect();
gossip.bulk_subscribe(public_keys).await.ok();
}
}
Kind::Metadata => {
let metadata = Metadata::from_json(&event.content).unwrap_or_default();
let profile = Profile::new(event.pubkey, metadata);
self.signal.send(SignalKind::NewProfile(profile)).await;
}
Kind::GiftWrap => {
self.extract_rumor(&event).await;
}
_ => {}
}
}
RelayMessage::EndOfStoredEvents(subscription_id) => {
if *subscription_id == self.gift_wrap_sub_id {
self.signal
.send(SignalKind::GiftWrapStatus(UnwrappingStatus::Processing))
.await;
}
}
RelayMessage::Auth { challenge } => {
if challenges.insert(challenge.clone()) {
// Send a signal to the ingester to handle the auth request
self.signal
.send(SignalKind::Auth(AuthRequest::new(challenge, relay_url)))
.await;
}
}
RelayMessage::Ok {
event_id, message, ..
} => {
let msg = MachineReadablePrefix::parse(&message);
let mut event_tracker = self.event_tracker.write().await;
// Keep track of events sent by Coop
event_tracker.sent_ids.insert(event_id);
// Keep track of events that need to be resend after auth
if let Some(MachineReadablePrefix::AuthRequired) = msg {
event_tracker.resend_queue.insert(event_id, relay_url);
}
}
_ => {}
}
}
Ok(())
}
pub async fn handle_metadata_batching(&self) {
let timeout = Duration::from_millis(METADATA_BATCH_TIMEOUT);
let mut processed_pubkeys: HashSet<PublicKey> = HashSet::new();
let mut batch: HashSet<PublicKey> = HashSet::new();
/// Internal events for the metadata batching system
enum BatchEvent {
PublicKey(PublicKey),
Timeout,
Closed,
}
loop {
let futs = smol::future::or(
async move {
if let Ok(public_key) = self.ingester.receiver().recv_async().await {
BatchEvent::PublicKey(public_key)
} else {
BatchEvent::Closed
}
},
async move {
smol::Timer::after(timeout).await;
BatchEvent::Timeout
},
);
match futs.await {
BatchEvent::PublicKey(public_key) => {
// Prevent duplicate keys from being processed
if processed_pubkeys.insert(public_key) {
batch.insert(public_key);
}
// Process the batch if it's full
if batch.len() >= METADATA_BATCH_LIMIT {
let mut gossip = self.gossip.write().await;
gossip.bulk_subscribe(std::mem::take(&mut batch)).await.ok();
}
}
BatchEvent::Timeout => {
let mut gossip = self.gossip.write().await;
gossip.bulk_subscribe(std::mem::take(&mut batch)).await.ok();
}
BatchEvent::Closed => {
let mut gossip = self.gossip.write().await;
gossip.bulk_subscribe(std::mem::take(&mut batch)).await.ok();
// Exit the current loop
break;
}
}
}
}
async fn is_self_authored(event: &Event) -> bool {
let client = nostr_client();
let Ok(signer) = client.signer().await else {
return false;
};
let Ok(public_key) = signer.get_public_key().await else {
return false;
};
public_key == event.pubkey
}
/// Stores an unwrapped event in local database with reference to original
async fn set_rumor(&self, id: EventId, rumor: &Event) -> Result<(), Error> {
let client = nostr_client();
// Save unwrapped event
client.database().save_event(rumor).await?;
// Create a reference event pointing to the unwrapped event
let event = EventBuilder::new(Kind::ApplicationSpecificData, "")
.tags(vec![Tag::identifier(id), Tag::event(rumor.id)])
.sign(&Keys::generate())
.await?;
// Save reference event
client.database().save_event(&event).await?;
Ok(())
}
/// Retrieves a previously unwrapped event from local database
async fn get_rumor(&self, id: EventId) -> Result<Event, Error> {
let client = nostr_client();
let filter = Filter::new()
.kind(Kind::ApplicationSpecificData)
.identifier(id)
.limit(1);
if let Some(event) = client.database().query(filter).await?.first_owned() {
let target_id = event.tags.event_ids().collect::<Vec<_>>()[0];
if let Some(event) = client.database().event_by_id(target_id).await? {
Ok(event)
} else {
Err(anyhow!("Event not found."))
}
} else {
Err(anyhow!("Event is not cached yet."))
}
}
// Unwraps a gift-wrapped event and processes its contents.
async fn extract_rumor(&self, gift_wrap: &Event) {
let client = nostr_client();
let mut rumor: Option<Event> = None;
if let Ok(event) = self.get_rumor(gift_wrap.id).await {
rumor = Some(event);
} else if let Ok(unwrapped) = client.unwrap_gift_wrap(gift_wrap).await {
// Sign the unwrapped event with a RANDOM KEYS
if let Ok(event) = unwrapped.rumor.sign_with_keys(&Keys::generate()) {
// Save this event to the database for future use.
if let Err(e) = self.set_rumor(gift_wrap.id, &event).await {
log::warn!("Failed to cache unwrapped event: {e}")
}
rumor = Some(event);
}
}
if let Some(event) = rumor {
// Send all pubkeys to the metadata batch to sync data
for public_key in event.tags.public_keys().copied() {
self.ingester.send(public_key).await;
}
match event.created_at >= self.initialized_at {
// New message: send a signal to notify the UI
true => {
self.signal
.send(SignalKind::NewMessage((gift_wrap.id, event)))
.await;
}
// Old message: Coop is probably processing the user's messages during initial load
false => {
self.gift_wrap_processing.store(true, Ordering::Release);
}
}
}
}
fn first_run() -> bool {
let flag = support_dir().join(".first_run");
!flag.exists() && std::fs::write(&flag, "").is_ok()
}
}