first commit

This commit is contained in:
Michael Dong
2026-02-05 11:24:40 +08:00
commit a98e12f286
144 changed files with 26459 additions and 0 deletions

1
backend_rust/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target

4444
backend_rust/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

34
backend_rust/Cargo.toml Normal file
View File

@@ -0,0 +1,34 @@
[workspace]
members = [".", "migration"]
resolver = "2"
[package]
name = "backend_rust"
version = "0.1.0"
edition = "2024"
[dependencies]
tokio = { version = "1.49.0", features = ["full"] }
actix-web = "4"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
chrono = { version = "0", features = ["serde"] }
sea-orm = { version = "2.0.0-rc", features = ["runtime-tokio-rustls", "sqlx-postgres", "macros", "with-chrono", "with-uuid", "with-json"] }
tracing = "0.1"
tracing-subscriber = "0.3"
anyhow = "1"
thiserror = "1"
bcrypt = "0.17"
jsonwebtoken = "9"
uuid = { version = "1", features = ["v4", "serde"] }
validator = { version = "0.20", features = ["derive"] }
actix-web-httpauth = "0.8"
rand = "0.9"
reqwest = { version = "0.12", features = ["json"] }
urlencoding = "2"
actix-cors = "0.7.1"
actix-files = "0.6"
actix-multipart = "0.7"
futures-util = "0.3"
sea-orm-migration = "2.0.0-rc"
migration = { path = "./migration" }

46
backend_rust/Dockerfile Normal file
View File

@@ -0,0 +1,46 @@
# Build stage
FROM rust:1.84-alpine AS builder
RUN apk add --no-cache musl-dev pkgconfig openssl-dev
WORKDIR /app
# Copy manifests
COPY Cargo.toml Cargo.lock ./
COPY migration/Cargo.toml ./migration/
# Create dummy files to build dependencies
RUN mkdir src && echo "fn main() {}" > src/main.rs
RUN mkdir -p migration/src && echo "fn main() {}" > migration/src/main.rs && echo "" > migration/src/lib.rs
# Build dependencies only
RUN cargo build --release
# Remove dummy files
RUN rm -rf src migration/src
# Copy actual source code
COPY src ./src
COPY migration/src ./migration/src
# Build the actual application
RUN touch src/main.rs migration/src/main.rs migration/src/lib.rs
RUN cargo build --release
# Runtime stage
FROM alpine:3.21
RUN apk add --no-cache ca-certificates libgcc
WORKDIR /app
# Copy the binary from builder
COPY --from=builder /app/target/release/backend_rust /app/backend_rust
COPY --from=builder /app/target/release/migration /app/migration
# Create uploads directory
RUN mkdir -p /app/uploads/avatars
EXPOSE 4000
CMD ["/app/backend_rust"]

0
backend_rust/README.md Normal file
View File

View File

@@ -0,0 +1,22 @@
[package]
edition = "2024"
name = "migration"
publish = false
version = "0.1.0"
[lib]
name = "migration"
path = "src/lib.rs"
[dependencies]
tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] }
[dependencies.sea-orm-migration]
features = [
# Enable at least one `ASYNC_RUNTIME` and `DATABASE_DRIVER` feature if you want to run migration via CLI.
# View the list of supported features at https://www.sea-ql.org/SeaORM/docs/install-and-config/database-and-async-runtime.
# e.g.
"runtime-tokio-rustls",
"sqlx-postgres",
]
version = "~2.0.0-rc"

View File

@@ -0,0 +1,47 @@
# Running Migrator CLI
- Generate a new migration file
```sh
cargo run -- generate MIGRATION_NAME
```
- Apply all pending migrations
```sh
cargo run
```
```sh
cargo run -- up
```
- Apply first 10 pending migrations
```sh
cargo run -- up -n 10
```
- Rollback last applied migrations
```sh
cargo run -- down
```
- Rollback last 10 applied migrations
```sh
cargo run -- down -n 10
```
- Drop all tables from the database, then reapply all migrations
```sh
cargo run -- fresh
```
- Rollback all applied migrations, then reapply all migrations
```sh
cargo run -- refresh
```
- Rollback all applied migrations
```sh
cargo run -- reset
```
- Check the status of all migrations
```sh
cargo run -- status
```
- Generate entity from sea-orm-cli
```sh
sea-orm-cli generate entity --database-url postgres://dyc:dycdyc89@192.168.150.142/notify --output-dir ./src/entity --entity-format dense
```

View File

@@ -0,0 +1,42 @@
pub use sea_orm_migration::prelude::*;
mod m20220101_000001_create_user;
mod m20220101_000002_create_enums;
mod m20220101_000003_create_invite;
mod m20220101_000004_create_recurrence_rule;
mod m20220101_000005_create_todo;
mod m20220101_000006_create_reminder_task;
mod m20220101_000007_create_reminder_task_recipient;
mod m20220101_000008_create_reminder_offset;
mod m20220101_000009_create_notification;
mod m20220101_000010_create_delivery_log;
mod m20260128_000011_modify_todo;
mod m20260129_000012_add_bark_params;
mod m20260129_000013_add_notification_offset_id;
mod m20260129_000014_convert_timestamps_to_timestamptz;
mod m20260129_000015_add_user_invite_id;
pub struct Migrator;
#[async_trait::async_trait]
impl MigratorTrait for Migrator {
fn migrations() -> Vec<Box<dyn MigrationTrait>> {
vec![
Box::new(m20220101_000001_create_user::Migration),
Box::new(m20220101_000002_create_enums::Migration),
Box::new(m20220101_000003_create_invite::Migration),
Box::new(m20220101_000004_create_recurrence_rule::Migration),
Box::new(m20220101_000005_create_todo::Migration),
Box::new(m20220101_000006_create_reminder_task::Migration),
Box::new(m20220101_000007_create_reminder_task_recipient::Migration),
Box::new(m20220101_000008_create_reminder_offset::Migration),
Box::new(m20220101_000009_create_notification::Migration),
Box::new(m20220101_000010_create_delivery_log::Migration),
Box::new(m20260128_000011_modify_todo::Migration),
Box::new(m20260129_000012_add_bark_params::Migration),
Box::new(m20260129_000013_add_notification_offset_id::Migration),
Box::new(m20260129_000014_convert_timestamps_to_timestamptz::Migration),
Box::new(m20260129_000015_add_user_invite_id::Migration),
]
}
}

View File

@@ -0,0 +1,74 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(User::Table)
.if_not_exists()
.col(ColumnDef::new(User::Id).uuid().primary_key().not_null())
.col(
ColumnDef::new(User::Username)
.string()
.unique_key()
.not_null(),
)
.col(ColumnDef::new(User::PasswordHash).string().not_null())
.col(ColumnDef::new(User::Avatar).string().null())
.col(
ColumnDef::new(User::Timezone)
.string()
.not_null()
.default("Asia/Shanghai"),
)
.col(ColumnDef::new(User::BarkUrl).string().null())
.col(
ColumnDef::new(User::InappEnabled)
.boolean()
.not_null()
.default(true),
)
.col(
ColumnDef::new(User::BarkEnabled)
.boolean()
.not_null()
.default(false),
)
.col(
ColumnDef::new(User::CreatedAt)
.timestamp()
.not_null()
.extra("DEFAULT NOW()"),
)
.col(ColumnDef::new(User::UpdatedAt).timestamp().not_null())
.to_owned(),
)
.await
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table("User").to_owned())
.await
}
}
#[derive(DeriveIden)]
enum User {
Table,
Id,
Username,
PasswordHash,
Avatar,
Timezone,
BarkUrl,
InappEnabled,
BarkEnabled,
CreatedAt,
UpdatedAt,
}

View File

@@ -0,0 +1,137 @@
use sea_orm_migration::prelude::*;
use sea_query::extension::postgres::Type;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
// Create RecurrenceType enum
manager
.create_type(
Type::create()
.as_enum(RecurrenceType::Type)
.values([
RecurrenceType::Hourly,
RecurrenceType::Daily,
RecurrenceType::Weekly,
RecurrenceType::Monthly,
RecurrenceType::Yearly,
])
.to_owned(),
)
.await?;
// Create TargetType enum
manager
.create_type(
Type::create()
.as_enum(TargetType::Type)
.values([TargetType::Todo, TargetType::ReminderTask])
.to_owned(),
)
.await?;
// Create ChannelType enum
manager
.create_type(
Type::create()
.as_enum(ChannelType::Type)
.values([ChannelType::Inapp, ChannelType::Bark])
.to_owned(),
)
.await?;
// Create NotificationStatus enum
manager
.create_type(
Type::create()
.as_enum(NotificationStatus::Type)
.values([
NotificationStatus::Pending,
NotificationStatus::Queued,
NotificationStatus::Sent,
NotificationStatus::Failed,
])
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_type(Type::drop().name(NotificationStatus::Type).to_owned())
.await?;
manager
.drop_type(Type::drop().name(ChannelType::Type).to_owned())
.await?;
manager
.drop_type(Type::drop().name(TargetType::Type).to_owned())
.await?;
manager
.drop_type(Type::drop().name(RecurrenceType::Type).to_owned())
.await?;
Ok(())
}
}
// RecurrenceType enum
#[derive(DeriveIden)]
pub enum RecurrenceType {
#[sea_orm(iden = "recurrence_type")]
Type,
#[sea_orm(iden = "hourly")]
Hourly,
#[sea_orm(iden = "daily")]
Daily,
#[sea_orm(iden = "weekly")]
Weekly,
#[sea_orm(iden = "monthly")]
Monthly,
#[sea_orm(iden = "yearly")]
Yearly,
}
// TargetType enum
#[derive(DeriveIden)]
pub enum TargetType {
#[sea_orm(iden = "target_type")]
Type,
#[sea_orm(iden = "todo")]
Todo,
#[sea_orm(iden = "reminder_task")]
ReminderTask,
}
// ChannelType enum
#[derive(DeriveIden)]
pub enum ChannelType {
#[sea_orm(iden = "channel_type")]
Type,
#[sea_orm(iden = "inapp")]
Inapp,
#[sea_orm(iden = "bark")]
Bark,
}
// NotificationStatus enum
#[derive(DeriveIden)]
pub enum NotificationStatus {
#[sea_orm(iden = "notification_status")]
Type,
#[sea_orm(iden = "pending")]
Pending,
#[sea_orm(iden = "queued")]
Queued,
#[sea_orm(iden = "sent")]
Sent,
#[sea_orm(iden = "failed")]
Failed,
}

View File

@@ -0,0 +1,102 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(Invite::Table)
.if_not_exists()
.col(ColumnDef::new(Invite::Id).uuid().primary_key().not_null())
.col(
ColumnDef::new(Invite::Code)
.string()
.unique_key()
.not_null(),
)
.col(ColumnDef::new(Invite::CreatorId).uuid().not_null())
.col(
ColumnDef::new(Invite::MaxUses)
.integer()
.not_null()
.default(5),
)
.col(
ColumnDef::new(Invite::UsedCount)
.integer()
.not_null()
.default(0),
)
.col(ColumnDef::new(Invite::ExpiresAt).timestamp().not_null())
.col(ColumnDef::new(Invite::RevokedAt).timestamp().null())
.col(
ColumnDef::new(Invite::CreatedAt)
.timestamp()
.not_null()
.extra("DEFAULT NOW()"),
)
.foreign_key(
ForeignKey::create()
.name("FK_invite_creator")
.from(Invite::Table, Invite::CreatorId)
.to(User::Table, User::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await?;
// Create indexes
manager
.create_index(
Index::create()
.name("IDX_invite_creator_id")
.table(Invite::Table)
.col(Invite::CreatorId)
.to_owned(),
)
.await?;
manager
.create_index(
Index::create()
.name("IDX_invite_expires_at")
.table(Invite::Table)
.col(Invite::ExpiresAt)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(Invite::Table).to_owned())
.await
}
}
#[derive(DeriveIden)]
enum Invite {
Table,
Id,
Code,
CreatorId,
MaxUses,
UsedCount,
ExpiresAt,
RevokedAt,
CreatedAt,
}
#[derive(DeriveIden)]
enum User {
Table,
Id,
}

View File

@@ -0,0 +1,75 @@
use sea_orm_migration::prelude::*;
use crate::m20220101_000002_create_enums::RecurrenceType;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(RecurrenceRule::Table)
.if_not_exists()
.col(
ColumnDef::new(RecurrenceRule::Id)
.uuid()
.primary_key()
.not_null(),
)
.col(
ColumnDef::new(RecurrenceRule::Type)
.custom(RecurrenceType::Type)
.not_null(),
)
.col(
ColumnDef::new(RecurrenceRule::Interval)
.integer()
.not_null()
.default(1),
)
.col(ColumnDef::new(RecurrenceRule::ByWeekday).integer().null())
.col(ColumnDef::new(RecurrenceRule::ByMonthday).integer().null())
.col(
ColumnDef::new(RecurrenceRule::Timezone)
.string()
.not_null()
.default("Asia/Shanghai"),
)
.col(
ColumnDef::new(RecurrenceRule::CreatedAt)
.timestamp()
.not_null()
.extra("DEFAULT NOW()"),
)
.col(
ColumnDef::new(RecurrenceRule::UpdatedAt)
.timestamp()
.not_null(),
)
.to_owned(),
)
.await
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(RecurrenceRule::Table).to_owned())
.await
}
}
#[derive(DeriveIden)]
enum RecurrenceRule {
Table,
Id,
Type,
Interval,
ByWeekday,
ByMonthday,
Timezone,
CreatedAt,
UpdatedAt,
}

View File

@@ -0,0 +1,102 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(Todo::Table)
.if_not_exists()
.col(ColumnDef::new(Todo::Id).uuid().primary_key().not_null())
.col(ColumnDef::new(Todo::OwnerId).uuid().not_null())
.col(ColumnDef::new(Todo::Title).string().not_null())
.col(ColumnDef::new(Todo::Description).string().null())
.col(ColumnDef::new(Todo::DueAt).timestamp().not_null())
.col(ColumnDef::new(Todo::RecurrenceRuleId).uuid().null())
.col(
ColumnDef::new(Todo::CreatedAt)
.timestamp()
.not_null()
.extra("DEFAULT NOW()"),
)
.col(ColumnDef::new(Todo::UpdatedAt).timestamp().not_null())
.foreign_key(
ForeignKey::create()
.name("FK_todo_owner")
.from(Todo::Table, Todo::OwnerId)
.to(User::Table, User::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.foreign_key(
ForeignKey::create()
.name("FK_todo_recurrence_rule")
.from(Todo::Table, Todo::RecurrenceRuleId)
.to(RecurrenceRule::Table, RecurrenceRule::Id)
.on_delete(ForeignKeyAction::SetNull)
.on_update(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await?;
// Create indexes
manager
.create_index(
Index::create()
.name("IDX_todo_owner_due")
.table(Todo::Table)
.col(Todo::OwnerId)
.col(Todo::DueAt)
.to_owned(),
)
.await?;
manager
.create_index(
Index::create()
.name("IDX_todo_recurrence_rule_id")
.table(Todo::Table)
.col(Todo::RecurrenceRuleId)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(Todo::Table).to_owned())
.await
}
}
#[derive(DeriveIden)]
enum Todo {
Table,
Id,
OwnerId,
Title,
Description,
DueAt,
RecurrenceRuleId,
CreatedAt,
UpdatedAt,
}
#[derive(DeriveIden)]
enum User {
Table,
Id,
}
#[derive(DeriveIden)]
enum RecurrenceRule {
Table,
Id,
}

View File

@@ -0,0 +1,111 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(ReminderTask::Table)
.if_not_exists()
.col(
ColumnDef::new(ReminderTask::Id)
.uuid()
.primary_key()
.not_null(),
)
.col(ColumnDef::new(ReminderTask::CreatorId).uuid().not_null())
.col(ColumnDef::new(ReminderTask::Title).string().not_null())
.col(ColumnDef::new(ReminderTask::Description).string().null())
.col(ColumnDef::new(ReminderTask::DueAt).timestamp().not_null())
.col(ColumnDef::new(ReminderTask::RecurrenceRuleId).uuid().null())
.col(
ColumnDef::new(ReminderTask::CreatedAt)
.timestamp()
.not_null()
.extra("DEFAULT NOW()"),
)
.col(
ColumnDef::new(ReminderTask::UpdatedAt)
.timestamp()
.not_null(),
)
.foreign_key(
ForeignKey::create()
.name("FK_reminder_task_creator")
.from(ReminderTask::Table, ReminderTask::CreatorId)
.to(User::Table, User::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.foreign_key(
ForeignKey::create()
.name("FK_reminder_task_recurrence_rule")
.from(ReminderTask::Table, ReminderTask::RecurrenceRuleId)
.to(RecurrenceRule::Table, RecurrenceRule::Id)
.on_delete(ForeignKeyAction::SetNull)
.on_update(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await?;
// Create indexes
manager
.create_index(
Index::create()
.name("IDX_reminder_task_creator_due")
.table(ReminderTask::Table)
.col(ReminderTask::CreatorId)
.col(ReminderTask::DueAt)
.to_owned(),
)
.await?;
manager
.create_index(
Index::create()
.name("IDX_reminder_task_recurrence_rule_id")
.table(ReminderTask::Table)
.col(ReminderTask::RecurrenceRuleId)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(ReminderTask::Table).to_owned())
.await
}
}
#[derive(DeriveIden)]
enum ReminderTask {
Table,
Id,
CreatorId,
Title,
Description,
DueAt,
RecurrenceRuleId,
CreatedAt,
UpdatedAt,
}
#[derive(DeriveIden)]
enum User {
Table,
Id,
}
#[derive(DeriveIden)]
enum RecurrenceRule {
Table,
Id,
}

View File

@@ -0,0 +1,87 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(ReminderTaskRecipient::Table)
.if_not_exists()
.col(
ColumnDef::new(ReminderTaskRecipient::TaskId)
.uuid()
.not_null(),
)
.col(
ColumnDef::new(ReminderTaskRecipient::UserId)
.uuid()
.not_null(),
)
.primary_key(
Index::create()
.col(ReminderTaskRecipient::TaskId)
.col(ReminderTaskRecipient::UserId),
)
.foreign_key(
ForeignKey::create()
.name("FK_reminder_task_recipient_task")
.from(ReminderTaskRecipient::Table, ReminderTaskRecipient::TaskId)
.to(ReminderTask::Table, ReminderTask::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.foreign_key(
ForeignKey::create()
.name("FK_reminder_task_recipient_user")
.from(ReminderTaskRecipient::Table, ReminderTaskRecipient::UserId)
.to(User::Table, User::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await?;
// Create index on user_id for reverse lookups
manager
.create_index(
Index::create()
.name("IDX_reminder_task_recipient_user_id")
.table(ReminderTaskRecipient::Table)
.col(ReminderTaskRecipient::UserId)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(ReminderTaskRecipient::Table).to_owned())
.await
}
}
#[derive(DeriveIden)]
enum ReminderTaskRecipient {
Table,
TaskId,
UserId,
}
#[derive(DeriveIden)]
enum ReminderTask {
Table,
Id,
}
#[derive(DeriveIden)]
enum User {
Table,
Id,
}

View File

@@ -0,0 +1,87 @@
use sea_orm_migration::prelude::*;
use crate::m20220101_000002_create_enums::TargetType;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(ReminderOffset::Table)
.if_not_exists()
.col(
ColumnDef::new(ReminderOffset::Id)
.uuid()
.primary_key()
.not_null(),
)
.col(
ColumnDef::new(ReminderOffset::TargetType)
.custom(TargetType::Type)
.not_null(),
)
.col(ColumnDef::new(ReminderOffset::TargetId).uuid().not_null())
.col(
ColumnDef::new(ReminderOffset::OffsetMinutes)
.integer()
.not_null(),
)
.col(
ColumnDef::new(ReminderOffset::ChannelInapp)
.boolean()
.not_null()
.default(true),
)
.col(
ColumnDef::new(ReminderOffset::ChannelBark)
.boolean()
.not_null()
.default(false),
)
.col(
ColumnDef::new(ReminderOffset::CreatedAt)
.timestamp()
.not_null()
.extra("DEFAULT NOW()"),
)
.to_owned(),
)
.await?;
// Create index for polymorphic lookup
manager
.create_index(
Index::create()
.name("IDX_reminder_offset_target")
.table(ReminderOffset::Table)
.col(ReminderOffset::TargetType)
.col(ReminderOffset::TargetId)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(ReminderOffset::Table).to_owned())
.await
}
}
#[derive(DeriveIden)]
enum ReminderOffset {
Table,
Id,
TargetType,
TargetId,
OffsetMinutes,
ChannelInapp,
ChannelBark,
CreatedAt,
}

View File

@@ -0,0 +1,141 @@
use sea_orm_migration::prelude::*;
use crate::m20220101_000002_create_enums::{ChannelType, NotificationStatus, TargetType};
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(Notification::Table)
.if_not_exists()
.col(
ColumnDef::new(Notification::Id)
.uuid()
.primary_key()
.not_null(),
)
.col(ColumnDef::new(Notification::RecipientId).uuid().not_null())
.col(
ColumnDef::new(Notification::TargetType)
.custom(TargetType::Type)
.not_null(),
)
.col(ColumnDef::new(Notification::TargetId).uuid().not_null())
.col(
ColumnDef::new(Notification::TriggerAt)
.timestamp()
.not_null(),
)
.col(
ColumnDef::new(Notification::Channel)
.custom(ChannelType::Type)
.not_null(),
)
.col(
ColumnDef::new(Notification::Status)
.custom(NotificationStatus::Type)
.not_null()
.default("pending"),
)
.col(ColumnDef::new(Notification::LockedAt).timestamp().null())
.col(ColumnDef::new(Notification::SentAt).timestamp().null())
.col(ColumnDef::new(Notification::ReadAt).timestamp().null())
.col(
ColumnDef::new(Notification::CreatedAt)
.timestamp()
.not_null()
.extra("DEFAULT NOW()"),
)
.col(
ColumnDef::new(Notification::UpdatedAt)
.timestamp()
.not_null(),
)
.foreign_key(
ForeignKey::create()
.name("FK_notification_recipient")
.from(Notification::Table, Notification::RecipientId)
.to(User::Table, User::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await?;
// Create unique constraint
manager
.create_index(
Index::create()
.name("UQ_notification_recipient_target_trigger_channel")
.table(Notification::Table)
.col(Notification::RecipientId)
.col(Notification::TargetType)
.col(Notification::TargetId)
.col(Notification::TriggerAt)
.col(Notification::Channel)
.unique()
.to_owned(),
)
.await?;
// Create indexes
manager
.create_index(
Index::create()
.name("IDX_notification_status_trigger")
.table(Notification::Table)
.col(Notification::Status)
.col(Notification::TriggerAt)
.to_owned(),
)
.await?;
manager
.create_index(
Index::create()
.name("IDX_notification_recipient_read")
.table(Notification::Table)
.col(Notification::RecipientId)
.col(Notification::ReadAt)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(Notification::Table).to_owned())
.await
}
}
#[derive(DeriveIden)]
enum Notification {
Table,
Id,
RecipientId,
TargetType,
TargetId,
TriggerAt,
Channel,
Status,
LockedAt,
SentAt,
ReadAt,
CreatedAt,
UpdatedAt,
}
#[derive(DeriveIden)]
enum User {
Table,
Id,
}

View File

@@ -0,0 +1,94 @@
use sea_orm_migration::prelude::*;
use crate::m20220101_000002_create_enums::{ChannelType, NotificationStatus};
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(DeliveryLog::Table)
.if_not_exists()
.col(
ColumnDef::new(DeliveryLog::Id)
.uuid()
.primary_key()
.not_null(),
)
.col(
ColumnDef::new(DeliveryLog::NotificationId)
.uuid()
.not_null(),
)
.col(ColumnDef::new(DeliveryLog::AttemptNo).integer().not_null())
.col(
ColumnDef::new(DeliveryLog::Channel)
.custom(ChannelType::Type)
.not_null(),
)
.col(
ColumnDef::new(DeliveryLog::Status)
.custom(NotificationStatus::Type)
.not_null(),
)
.col(ColumnDef::new(DeliveryLog::ResponseMeta).json_binary().null())
.col(
ColumnDef::new(DeliveryLog::CreatedAt)
.timestamp()
.not_null()
.extra("DEFAULT NOW()"),
)
.foreign_key(
ForeignKey::create()
.name("FK_delivery_log_notification")
.from(DeliveryLog::Table, DeliveryLog::NotificationId)
.to(Notification::Table, Notification::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await?;
// Create index
manager
.create_index(
Index::create()
.name("IDX_delivery_log_notification_id")
.table(DeliveryLog::Table)
.col(DeliveryLog::NotificationId)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(DeliveryLog::Table).to_owned())
.await
}
}
#[derive(DeriveIden)]
enum DeliveryLog {
Table,
Id,
NotificationId,
AttemptNo,
Channel,
Status,
ResponseMeta,
CreatedAt,
}
#[derive(DeriveIden)]
enum Notification {
Table,
Id,
}

View File

@@ -0,0 +1,38 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
Table::alter()
.table(Todo::Table)
.add_column(ColumnDef::new(Todo::CheckInAt).timestamp().null())
.add_column(
ColumnDef::new(Todo::CheckInCount)
.integer()
.not_null()
.default(0),
)
.add_column(
ColumnDef::new(Todo::IsCheckedIn)
.boolean()
.not_null()
.default(false),
)
.to_owned(),
)
.await?;
Ok(())
}
}
#[derive(DeriveIden)]
enum Todo {
Table,
CheckInAt,
CheckInCount,
IsCheckedIn,
}

View File

@@ -0,0 +1,49 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
Table::alter()
.table(ReminderOffset::Table)
.add_column(ColumnDef::new(ReminderOffset::BarkTitle).string().null())
.add_column(ColumnDef::new(ReminderOffset::BarkSubtitle).string().null())
.add_column(ColumnDef::new(ReminderOffset::BarkBodyMarkdown).text().null())
.add_column(ColumnDef::new(ReminderOffset::BarkLevel).string().null())
.add_column(ColumnDef::new(ReminderOffset::BarkIcon).string().null())
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
Table::alter()
.table(ReminderOffset::Table)
.drop_column(ReminderOffset::BarkTitle)
.drop_column(ReminderOffset::BarkSubtitle)
.drop_column(ReminderOffset::BarkBodyMarkdown)
.drop_column(ReminderOffset::BarkLevel)
.drop_column(ReminderOffset::BarkIcon)
.to_owned(),
)
.await?;
Ok(())
}
}
#[derive(DeriveIden)]
enum ReminderOffset {
Table,
BarkTitle,
BarkSubtitle,
BarkBodyMarkdown,
BarkLevel,
BarkIcon,
}

View File

@@ -0,0 +1,37 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
Table::alter()
.table(Notification::Table)
.add_column(ColumnDef::new(Notification::OffsetId).uuid().null())
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
Table::alter()
.table(Notification::Table)
.drop_column(Notification::OffsetId)
.to_owned(),
)
.await?;
Ok(())
}
}
#[derive(DeriveIden)]
enum Notification {
Table,
OffsetId,
}

View File

@@ -0,0 +1,143 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
let db = manager.get_connection();
// User table: created_at, updated_at
db.execute_unprepared(
"ALTER TABLE \"user\"
ALTER COLUMN created_at TYPE TIMESTAMP WITH TIME ZONE USING created_at AT TIME ZONE 'UTC',
ALTER COLUMN updated_at TYPE TIMESTAMP WITH TIME ZONE USING updated_at AT TIME ZONE 'UTC'"
).await?;
// Invite table: expires_at, revoked_at, created_at
db.execute_unprepared(
"ALTER TABLE invite
ALTER COLUMN expires_at TYPE TIMESTAMP WITH TIME ZONE USING expires_at AT TIME ZONE 'UTC',
ALTER COLUMN revoked_at TYPE TIMESTAMP WITH TIME ZONE USING revoked_at AT TIME ZONE 'UTC',
ALTER COLUMN created_at TYPE TIMESTAMP WITH TIME ZONE USING created_at AT TIME ZONE 'UTC'"
).await?;
// RecurrenceRule table: created_at, updated_at
db.execute_unprepared(
"ALTER TABLE recurrence_rule
ALTER COLUMN created_at TYPE TIMESTAMP WITH TIME ZONE USING created_at AT TIME ZONE 'UTC',
ALTER COLUMN updated_at TYPE TIMESTAMP WITH TIME ZONE USING updated_at AT TIME ZONE 'UTC'"
).await?;
// Todo table: due_at, created_at, updated_at, check_in_at
db.execute_unprepared(
"ALTER TABLE todo
ALTER COLUMN due_at TYPE TIMESTAMP WITH TIME ZONE USING due_at AT TIME ZONE 'UTC',
ALTER COLUMN created_at TYPE TIMESTAMP WITH TIME ZONE USING created_at AT TIME ZONE 'UTC',
ALTER COLUMN updated_at TYPE TIMESTAMP WITH TIME ZONE USING updated_at AT TIME ZONE 'UTC',
ALTER COLUMN check_in_at TYPE TIMESTAMP WITH TIME ZONE USING check_in_at AT TIME ZONE 'UTC'"
).await?;
// ReminderTask table: due_at, created_at, updated_at
db.execute_unprepared(
"ALTER TABLE reminder_task
ALTER COLUMN due_at TYPE TIMESTAMP WITH TIME ZONE USING due_at AT TIME ZONE 'UTC',
ALTER COLUMN created_at TYPE TIMESTAMP WITH TIME ZONE USING created_at AT TIME ZONE 'UTC',
ALTER COLUMN updated_at TYPE TIMESTAMP WITH TIME ZONE USING updated_at AT TIME ZONE 'UTC'"
).await?;
// ReminderOffset table: created_at
db.execute_unprepared(
"ALTER TABLE reminder_offset
ALTER COLUMN created_at TYPE TIMESTAMP WITH TIME ZONE USING created_at AT TIME ZONE 'UTC'"
).await?;
// Notification table: trigger_at, locked_at, sent_at, read_at, created_at, updated_at
db.execute_unprepared(
"ALTER TABLE notification
ALTER COLUMN trigger_at TYPE TIMESTAMP WITH TIME ZONE USING trigger_at AT TIME ZONE 'UTC',
ALTER COLUMN locked_at TYPE TIMESTAMP WITH TIME ZONE USING locked_at AT TIME ZONE 'UTC',
ALTER COLUMN sent_at TYPE TIMESTAMP WITH TIME ZONE USING sent_at AT TIME ZONE 'UTC',
ALTER COLUMN read_at TYPE TIMESTAMP WITH TIME ZONE USING read_at AT TIME ZONE 'UTC',
ALTER COLUMN created_at TYPE TIMESTAMP WITH TIME ZONE USING created_at AT TIME ZONE 'UTC',
ALTER COLUMN updated_at TYPE TIMESTAMP WITH TIME ZONE USING updated_at AT TIME ZONE 'UTC'"
).await?;
// DeliveryLog table: created_at
db.execute_unprepared(
"ALTER TABLE delivery_log
ALTER COLUMN created_at TYPE TIMESTAMP WITH TIME ZONE USING created_at AT TIME ZONE 'UTC'"
).await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
let db = manager.get_connection();
// Revert User table
db.execute_unprepared(
"ALTER TABLE \"user\"
ALTER COLUMN created_at TYPE TIMESTAMP USING created_at AT TIME ZONE 'UTC',
ALTER COLUMN updated_at TYPE TIMESTAMP USING updated_at AT TIME ZONE 'UTC'"
).await?;
// Revert Invite table
db.execute_unprepared(
"ALTER TABLE invite
ALTER COLUMN expires_at TYPE TIMESTAMP USING expires_at AT TIME ZONE 'UTC',
ALTER COLUMN revoked_at TYPE TIMESTAMP USING revoked_at AT TIME ZONE 'UTC',
ALTER COLUMN created_at TYPE TIMESTAMP USING created_at AT TIME ZONE 'UTC'"
).await?;
// Revert RecurrenceRule table
db.execute_unprepared(
"ALTER TABLE recurrence_rule
ALTER COLUMN created_at TYPE TIMESTAMP USING created_at AT TIME ZONE 'UTC',
ALTER COLUMN updated_at TYPE TIMESTAMP USING updated_at AT TIME ZONE 'UTC'"
).await?;
// Revert Todo table
db.execute_unprepared(
"ALTER TABLE todo
ALTER COLUMN due_at TYPE TIMESTAMP USING due_at AT TIME ZONE 'UTC',
ALTER COLUMN created_at TYPE TIMESTAMP USING created_at AT TIME ZONE 'UTC',
ALTER COLUMN updated_at TYPE TIMESTAMP USING updated_at AT TIME ZONE 'UTC',
ALTER COLUMN check_in_at TYPE TIMESTAMP USING check_in_at AT TIME ZONE 'UTC'"
).await?;
// Revert ReminderTask table
db.execute_unprepared(
"ALTER TABLE reminder_task
ALTER COLUMN due_at TYPE TIMESTAMP USING due_at AT TIME ZONE 'UTC',
ALTER COLUMN created_at TYPE TIMESTAMP USING created_at AT TIME ZONE 'UTC',
ALTER COLUMN updated_at TYPE TIMESTAMP USING updated_at AT TIME ZONE 'UTC'"
).await?;
// Revert ReminderOffset table
db.execute_unprepared(
"ALTER TABLE reminder_offset
ALTER COLUMN created_at TYPE TIMESTAMP USING created_at AT TIME ZONE 'UTC'"
).await?;
// Revert Notification table
db.execute_unprepared(
"ALTER TABLE notification
ALTER COLUMN trigger_at TYPE TIMESTAMP USING trigger_at AT TIME ZONE 'UTC',
ALTER COLUMN locked_at TYPE TIMESTAMP USING locked_at AT TIME ZONE 'UTC',
ALTER COLUMN sent_at TYPE TIMESTAMP USING sent_at AT TIME ZONE 'UTC',
ALTER COLUMN read_at TYPE TIMESTAMP USING read_at AT TIME ZONE 'UTC',
ALTER COLUMN created_at TYPE TIMESTAMP USING created_at AT TIME ZONE 'UTC',
ALTER COLUMN updated_at TYPE TIMESTAMP USING updated_at AT TIME ZONE 'UTC'"
).await?;
// Revert DeliveryLog table
db.execute_unprepared(
"ALTER TABLE delivery_log
ALTER COLUMN created_at TYPE TIMESTAMP USING created_at AT TIME ZONE 'UTC'"
).await?;
Ok(())
}
}

View File

@@ -0,0 +1,86 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
// Add invite_id column to user table
manager
.alter_table(
Table::alter()
.table(User::Table)
.add_column(ColumnDef::new(User::InviteId).uuid().null())
.to_owned(),
)
.await?;
// Add foreign key constraint
manager
.create_foreign_key(
ForeignKey::create()
.name("fk_user_invite_id")
.from(User::Table, User::InviteId)
.to(Invite::Table, Invite::Id)
.on_delete(ForeignKeyAction::SetNull)
.on_update(ForeignKeyAction::Cascade)
.to_owned(),
)
.await?;
// Add index for better query performance
manager
.create_index(
Index::create()
.name("idx_user_invite_id")
.table(User::Table)
.col(User::InviteId)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
// Drop index
manager
.drop_index(Index::drop().name("idx_user_invite_id").to_owned())
.await?;
// Drop foreign key
manager
.drop_foreign_key(
ForeignKey::drop()
.table(User::Table)
.name("fk_user_invite_id")
.to_owned(),
)
.await?;
// Drop column
manager
.alter_table(
Table::alter()
.table(User::Table)
.drop_column(User::InviteId)
.to_owned(),
)
.await?;
Ok(())
}
}
#[derive(DeriveIden)]
enum User {
Table,
InviteId,
}
#[derive(DeriveIden)]
enum Invite {
Table,
Id,
}

View File

@@ -0,0 +1,6 @@
use sea_orm_migration::prelude::*;
#[tokio::main]
async fn main() {
cli::run_cli(migration::Migrator).await;
}

View File

@@ -0,0 +1,161 @@
use actix_web::{HttpResponse, Responder, Scope, post, web};
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, QueryFilter, Set, TransactionTrait};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::app_data::AppData;
use crate::entity::{invite, user};
use crate::error::ApiError;
use crate::middleware::auth::create_token;
#[derive(Debug, Deserialize)]
pub struct RegisterRequest {
pub username: String,
pub password: String,
pub invite_code: String,
}
#[derive(Debug, Deserialize)]
pub struct LoginRequest {
pub username: String,
pub password: String,
}
#[derive(Debug, Serialize)]
pub struct AuthResponse {
pub token: String,
pub user: UserInfo,
}
#[derive(Debug, Serialize)]
pub struct UserInfo {
pub id: Uuid,
pub username: String,
}
#[post("/register")]
async fn register(
app_data: web::Data<AppData>,
body: web::Json<RegisterRequest>,
) -> Result<impl Responder, ApiError> {
if body.username.len() < 3 {
return Err(ApiError::BadRequest("Invalid payload".to_string()));
}
if body.password.len() < 6 {
return Err(ApiError::BadRequest("Invalid payload".to_string()));
}
if body.invite_code.len() < 4 {
return Err(ApiError::BadRequest("Invalid payload".to_string()));
}
let now = chrono::Utc::now().fixed_offset();
let result = app_data
.db
.transaction::<_, (Uuid, String), ApiError>(|txn| {
let username = body.username.clone();
let password = body.password.clone();
let invite_code = body.invite_code.clone();
Box::pin(async move {
// Find valid invite
let inv = invite::Entity::find()
.filter(invite::Column::Code.eq(&invite_code))
.filter(invite::Column::RevokedAt.is_null())
.filter(invite::Column::ExpiresAt.gt(now))
.one(txn)
.await?
.ok_or_else(|| ApiError::BadRequest("Invalid invite".to_string()))?;
if inv.used_count >= inv.max_uses {
return Err(ApiError::BadRequest("Invalid invite".to_string()));
}
// Check username exists
let existing = user::Entity::find()
.filter(user::Column::Username.eq(&username))
.one(txn)
.await?;
if existing.is_some() {
return Err(ApiError::Conflict("Username taken".to_string()));
}
// Create user
let password_hash = bcrypt::hash(&password, 10)?;
let user_id = Uuid::new_v4();
let invite_id = inv.id;
let new_user = user::ActiveModel {
id: Set(user_id),
username: Set(username.clone()),
password_hash: Set(password_hash),
avatar: Set(None),
timezone: Set("Asia/Shanghai".to_string()),
bark_url: Set(None),
inapp_enabled: Set(true),
bark_enabled: Set(false),
invite_id: Set(Some(invite_id)),
created_at: Set(now),
updated_at: Set(now),
};
new_user.insert(txn).await?;
// Update invite used count
let mut inv_active: invite::ActiveModel = inv.into();
inv_active.used_count = Set(inv_active.used_count.unwrap() + 1);
inv_active.update(txn).await?;
Ok((user_id, username))
})
})
.await
.map_err(|e| match e {
sea_orm::TransactionError::Connection(e) => ApiError::Internal(e.to_string()),
sea_orm::TransactionError::Transaction(e) => e,
})?;
let token = create_token(result.0, &app_data.jwt_secret)?;
Ok(HttpResponse::Ok().json(AuthResponse {
token,
user: UserInfo {
id: result.0,
username: result.1,
},
}))
}
#[post("/login")]
async fn login(
app_data: web::Data<AppData>,
body: web::Json<LoginRequest>,
) -> Result<impl Responder, ApiError> {
if body.username.len() < 3 || body.password.len() < 6 {
return Err(ApiError::BadRequest("Invalid payload".to_string()));
}
let user = user::Entity::find()
.filter(user::Column::Username.eq(&body.username))
.one(&app_data.db)
.await?
.ok_or_else(|| ApiError::Unauthorized("Invalid credentials".to_string()))?;
let valid = bcrypt::verify(&body.password, &user.password_hash)?;
if !valid {
return Err(ApiError::Unauthorized("Invalid credentials".to_string()));
}
let token = create_token(user.id, &app_data.jwt_secret)?;
Ok(HttpResponse::Ok().json(AuthResponse {
token,
user: UserInfo {
id: user.id,
username: user.username,
},
}))
}
pub fn routes() -> Scope {
web::scope("/api/auth").service(register).service(login)
}

View File

@@ -0,0 +1,10 @@
use actix_web::{HttpResponse, Responder, Scope, get, web};
#[get("")]
async fn health() -> impl Responder {
HttpResponse::Ok().body("OK")
}
pub fn routes() -> Scope {
web::scope("/health").service(health)
}

View File

@@ -0,0 +1,167 @@
use actix_web::{HttpResponse, Responder, Scope, get, post, web};
use rand::{Rng, distr::Alphanumeric};
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, QueryFilter, QueryOrder, Set};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::app_data::AppData;
use crate::entity::{invite, user};
use crate::error::ApiError;
use crate::middleware::auth::AuthUser;
#[derive(Debug, Deserialize)]
pub struct CreateInviteRequest {
pub max_uses: Option<i32>,
pub expires_in_days: Option<i32>,
}
#[derive(Debug, Serialize)]
pub struct OkResponse {
pub ok: bool,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct RegisteredUserInfo {
pub id: Uuid,
pub username: String,
pub avatar: Option<String>,
pub created_at: chrono::DateTime<chrono::FixedOffset>,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct InviteWithUsers {
pub id: Uuid,
pub code: String,
pub creator_id: Uuid,
pub max_uses: i32,
pub used_count: i32,
pub expires_at: chrono::DateTime<chrono::FixedOffset>,
pub revoked_at: Option<chrono::DateTime<chrono::FixedOffset>>,
pub created_at: chrono::DateTime<chrono::FixedOffset>,
pub registered_users: Vec<RegisteredUserInfo>,
}
#[post("")]
async fn create_invite(
app_data: web::Data<AppData>,
auth: AuthUser,
body: web::Json<CreateInviteRequest>,
) -> Result<impl Responder, ApiError> {
let max_uses = body.max_uses.unwrap_or(5).clamp(1, 20);
let expires_in_days = body.expires_in_days.unwrap_or(7).clamp(1, 30);
let expires_at =
chrono::Utc::now().fixed_offset() + chrono::Duration::days(expires_in_days as i64);
let code = generate_invite_code();
let new_invite = invite::ActiveModel {
id: Set(Uuid::new_v4()),
code: Set(code),
creator_id: Set(auth.user_id),
max_uses: Set(max_uses),
used_count: Set(0),
expires_at: Set(expires_at),
revoked_at: Set(None),
created_at: Set(chrono::Utc::now().fixed_offset()),
};
let result = new_invite.insert(&app_data.db).await?;
Ok(HttpResponse::Ok().json(result))
}
#[get("")]
async fn list_invites(
app_data: web::Data<AppData>,
auth: AuthUser,
) -> Result<impl Responder, ApiError> {
let invites = invite::Entity::find()
.filter(invite::Column::CreatorId.eq(auth.user_id))
.order_by_desc(invite::Column::CreatedAt)
.all(&app_data.db)
.await?;
Ok(HttpResponse::Ok().json(invites))
}
#[get("/{id}")]
async fn get_invite(
app_data: web::Data<AppData>,
auth: AuthUser,
path: web::Path<Uuid>,
) -> Result<impl Responder, ApiError> {
let id = path.into_inner();
let invite = invite::Entity::find_by_id(id)
.filter(invite::Column::CreatorId.eq(auth.user_id))
.one(&app_data.db)
.await?
.ok_or_else(|| ApiError::NotFound("Invite not found".to_string()))?;
// Get users who registered with this invite
let registered_users = user::Entity::find()
.filter(user::Column::InviteId.eq(invite.id))
.order_by_asc(user::Column::CreatedAt)
.all(&app_data.db)
.await?;
let registered_users_info: Vec<RegisteredUserInfo> = registered_users
.into_iter()
.map(|u| RegisteredUserInfo {
id: u.id,
username: u.username,
avatar: u.avatar,
created_at: u.created_at,
})
.collect();
Ok(HttpResponse::Ok().json(InviteWithUsers {
id: invite.id,
code: invite.code,
creator_id: invite.creator_id,
max_uses: invite.max_uses,
used_count: invite.used_count,
expires_at: invite.expires_at,
revoked_at: invite.revoked_at,
created_at: invite.created_at,
registered_users: registered_users_info,
}))
}
#[post("/{id}/revoke")]
async fn revoke_invite(
app_data: web::Data<AppData>,
auth: AuthUser,
path: web::Path<Uuid>,
) -> Result<impl Responder, ApiError> {
let id = path.into_inner();
let invite = invite::Entity::find_by_id(id)
.filter(invite::Column::CreatorId.eq(auth.user_id))
.one(&app_data.db)
.await?
.ok_or_else(|| ApiError::NotFound("Invite not found".to_string()))?;
let mut active: invite::ActiveModel = invite.into();
active.revoked_at = Set(Some(chrono::Utc::now().fixed_offset()));
active.update(&app_data.db).await?;
Ok(HttpResponse::Ok().json(OkResponse { ok: true }))
}
fn generate_invite_code() -> String {
let mut rng = rand::rng();
let suffix: String = (0..6).map(|_| rng.sample(Alphanumeric) as char).collect();
format!("INV-{}", suffix)
}
pub fn routes() -> Scope {
web::scope("/api/invites")
.service(create_invite)
.service(list_invites)
.service(get_invite)
.service(revoke_invite)
}

216
backend_rust/src/api/me.rs Normal file
View File

@@ -0,0 +1,216 @@
use actix_multipart::Multipart;
use actix_web::{HttpResponse, Responder, Scope, get, post, put, web};
use futures_util::StreamExt;
use sea_orm::{ActiveModelTrait, EntityTrait, Set};
use serde::{Deserialize, Serialize};
use std::io::Write;
use uuid::Uuid;
use crate::app_data::AppData;
use crate::entity::user;
use crate::error::ApiError;
use crate::middleware::auth::AuthUser;
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct MeResponse {
pub id: Uuid,
pub username: String,
pub avatar: Option<String>,
pub timezone: String,
pub bark_url: Option<String>,
pub inapp_enabled: bool,
pub bark_enabled: bool,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct UpdateSettingsRequest {
pub avatar: Option<String>,
pub timezone: Option<String>,
pub bark_url: Option<String>,
pub inapp_enabled: Option<bool>,
pub bark_enabled: Option<bool>,
}
#[get("")]
async fn get_me(
app_data: web::Data<AppData>,
auth: AuthUser,
) -> Result<impl Responder, ApiError> {
let user = user::Entity::find_by_id(auth.user_id)
.one(&app_data.db)
.await?
.ok_or_else(|| ApiError::NotFound("User not found".to_string()))?;
Ok(HttpResponse::Ok().json(MeResponse {
id: user.id,
username: user.username,
avatar: user.avatar,
timezone: user.timezone,
bark_url: user.bark_url,
inapp_enabled: user.inapp_enabled,
bark_enabled: user.bark_enabled,
}))
}
#[put("/settings")]
async fn update_settings(
app_data: web::Data<AppData>,
auth: AuthUser,
body: web::Json<UpdateSettingsRequest>,
) -> Result<impl Responder, ApiError> {
let user = user::Entity::find_by_id(auth.user_id)
.one(&app_data.db)
.await?
.ok_or_else(|| ApiError::NotFound("User not found".to_string()))?;
let mut active: user::ActiveModel = user.into();
if let Some(avatar) = &body.avatar {
active.avatar = Set(Some(avatar.clone()));
}
if let Some(timezone) = &body.timezone {
active.timezone = Set(timezone.clone());
}
if let Some(bark_url) = &body.bark_url {
active.bark_url = Set(Some(bark_url.clone()));
}
if let Some(inapp_enabled) = body.inapp_enabled {
active.inapp_enabled = Set(inapp_enabled);
}
if let Some(bark_enabled) = body.bark_enabled {
active.bark_enabled = Set(bark_enabled);
}
active.updated_at = Set(chrono::Utc::now().fixed_offset());
let updated = active.update(&app_data.db).await?;
Ok(HttpResponse::Ok().json(MeResponse {
id: updated.id,
username: updated.username,
avatar: updated.avatar,
timezone: updated.timezone,
bark_url: updated.bark_url,
inapp_enabled: updated.inapp_enabled,
bark_enabled: updated.bark_enabled,
}))
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct UploadAvatarResponse {
pub avatar_url: String,
}
/// 上传用户头像
/// 支持的格式: jpg, jpeg, png, gif, webp
/// 最大文件大小: 5MB
#[post("/avatar")]
async fn upload_avatar(
app_data: web::Data<AppData>,
auth: AuthUser,
mut payload: Multipart,
) -> Result<impl Responder, ApiError> {
const MAX_FILE_SIZE: usize = 5 * 1024 * 1024; // 5MB
const ALLOWED_EXTENSIONS: &[&str] = &["jpg", "jpeg", "png", "gif", "webp"];
let mut file_data: Option<(Vec<u8>, String)> = None;
// 解析 multipart 表单数据
while let Some(item) = payload.next().await {
let mut field = item.map_err(|e| ApiError::BadRequest(format!("Multipart error: {}", e)))?;
let content_disposition = field
.content_disposition()
.ok_or_else(|| ApiError::BadRequest("Missing content disposition".to_string()))?;
let field_name = content_disposition.get_name().unwrap_or("");
if field_name != "avatar" {
continue;
}
// 获取文件名和扩展名
let filename = content_disposition
.get_filename()
.ok_or_else(|| ApiError::BadRequest("Missing filename".to_string()))?;
let extension: String = filename
.rsplit('.')
.next()
.map(|s| s.to_lowercase())
.ok_or_else(|| ApiError::BadRequest("Invalid filename".to_string()))?;
if !ALLOWED_EXTENSIONS.contains(&extension.as_str()) {
return Err(ApiError::BadRequest(format!(
"Unsupported file format. Allowed: {}",
ALLOWED_EXTENSIONS.join(", ")
)));
}
// 读取文件内容
let mut data = Vec::new();
while let Some(chunk) = field.next().await {
let chunk =
chunk.map_err(|e| ApiError::BadRequest(format!("Error reading chunk: {}", e)))?;
if data.len() + chunk.len() > MAX_FILE_SIZE {
return Err(ApiError::BadRequest(format!(
"File too large. Max size: {}MB",
MAX_FILE_SIZE / 1024 / 1024
)));
}
data.extend_from_slice(&chunk);
}
file_data = Some((data, extension));
break;
}
let (data, extension) = file_data.ok_or_else(|| {
ApiError::BadRequest("No avatar file provided. Use field name 'avatar'".to_string())
})?;
// 生成唯一文件名
let file_id = Uuid::new_v4();
let filename = format!("{}.{}", file_id, extension);
let file_path = app_data.upload_dir.join("avatars").join(&filename);
// 保存文件
let mut file = std::fs::File::create(&file_path)
.map_err(|e| ApiError::Internal(format!("Failed to create file: {}", e)))?;
file.write_all(&data)
.map_err(|e| ApiError::Internal(format!("Failed to write file: {}", e)))?;
// 生成头像 URL
let avatar_url = format!("{}/uploads/avatars/{}", app_data.base_url, filename);
// 更新用户头像
let user = user::Entity::find_by_id(auth.user_id)
.one(&app_data.db)
.await?
.ok_or_else(|| ApiError::NotFound("User not found".to_string()))?;
// 如果用户之前有头像,尝试删除旧文件
if let Some(old_avatar) = &user.avatar {
if let Some(old_filename) = old_avatar.rsplit('/').next() {
let old_path = app_data.upload_dir.join("avatars").join(old_filename);
// 忽略删除错误,旧文件可能不存在
let _ = std::fs::remove_file(old_path);
}
}
let mut active: user::ActiveModel = user.into();
active.avatar = Set(Some(avatar_url.clone()));
active.updated_at = Set(chrono::Utc::now().fixed_offset());
active.update(&app_data.db).await?;
Ok(HttpResponse::Ok().json(UploadAvatarResponse { avatar_url }))
}
pub fn routes() -> Scope {
web::scope("/api/me")
.service(get_me)
.service(update_settings)
.service(upload_avatar)
}

View File

@@ -0,0 +1,8 @@
pub mod auth;
pub mod health;
pub mod invites;
pub mod me;
pub mod notifications;
pub mod reminder_tasks;
pub mod todos;
pub mod users;

View File

@@ -0,0 +1,95 @@
use actix_web::{HttpResponse, Responder, Scope, get, post, web};
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, QueryFilter, QueryOrder, Set};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::app_data::AppData;
use crate::entity::notification;
use crate::error::ApiError;
use crate::middleware::auth::AuthUser;
#[derive(Debug, Deserialize)]
pub struct ListQuery {
pub status: Option<String>,
}
#[derive(Debug, Serialize)]
pub struct OkResponse {
pub ok: bool,
}
#[get("")]
async fn list_notifications(
app_data: web::Data<AppData>,
auth: AuthUser,
query: web::Query<ListQuery>,
) -> Result<impl Responder, ApiError> {
let status = query.status.as_deref().unwrap_or("all");
let mut q =
notification::Entity::find().filter(notification::Column::RecipientId.eq(auth.user_id));
if status == "unread" {
q = q.filter(notification::Column::ReadAt.is_null());
}
let notifications = q
.order_by_desc(notification::Column::TriggerAt)
.all(&app_data.db)
.await?;
Ok(HttpResponse::Ok().json(notifications))
}
#[post("/{id}/read")]
async fn mark_read(
app_data: web::Data<AppData>,
auth: AuthUser,
path: web::Path<Uuid>,
) -> Result<impl Responder, ApiError> {
let id = path.into_inner();
let notif = notification::Entity::find_by_id(id)
.filter(notification::Column::RecipientId.eq(auth.user_id))
.one(&app_data.db)
.await?
.ok_or_else(|| ApiError::NotFound("Not found".to_string()))?;
let mut active: notification::ActiveModel = notif.into();
active.read_at = Set(Some(chrono::Utc::now().fixed_offset()));
active.updated_at = Set(chrono::Utc::now().fixed_offset());
active.update(&app_data.db).await?;
Ok(HttpResponse::Ok().json(OkResponse { ok: true }))
}
#[post("/read-all")]
async fn mark_all_read(
app_data: web::Data<AppData>,
auth: AuthUser,
) -> Result<impl Responder, ApiError> {
let now = chrono::Utc::now().fixed_offset();
notification::Entity::update_many()
.filter(notification::Column::RecipientId.eq(auth.user_id))
.filter(notification::Column::ReadAt.is_null())
.col_expr(
notification::Column::ReadAt,
sea_orm::sea_query::Expr::value(now),
)
.col_expr(
notification::Column::UpdatedAt,
sea_orm::sea_query::Expr::value(now),
)
.exec(&app_data.db)
.await?;
Ok(HttpResponse::Ok().json(OkResponse { ok: true }))
}
pub fn routes() -> Scope {
web::scope("/api/notifications")
.service(list_notifications)
.service(mark_all_read)
.service(mark_read)
}

View File

@@ -0,0 +1,451 @@
use actix_web::{HttpResponse, Responder, Scope, delete, get, post, put, web};
use sea_orm::{
ActiveModelTrait, ColumnTrait, EntityTrait, QueryFilter, QueryOrder, Set, TransactionTrait,
};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::app_data::AppData;
use crate::entity::sea_orm_active_enums::{RecurrenceType, TargetType};
use crate::entity::{recurrence_rule, reminder_offset, reminder_task, reminder_task_recipient};
use crate::error::ApiError;
use crate::middleware::auth::AuthUser;
use crate::timer::WorkerCommand;
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RecurrenceRuleInput {
pub r#type: String,
pub interval: Option<i32>,
pub by_weekday: Option<i32>,
pub by_monthday: Option<i32>,
pub timezone: Option<String>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct OffsetInput {
pub offset_minutes: i32,
pub channel_inapp: Option<bool>,
pub channel_bark: Option<bool>,
/// Custom title for Bark push notification
pub bark_title: Option<String>,
/// Custom subtitle for Bark push notification
pub bark_subtitle: Option<String>,
/// Markdown content for Bark push (overrides body if set)
pub bark_body_markdown: Option<String>,
/// Alert level: active, timeSensitive, passive, critical
pub bark_level: Option<String>,
/// Custom icon URL for Bark push
pub bark_icon: Option<String>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ReminderTaskInput {
pub title: String,
pub description: Option<String>,
pub due_at: chrono::DateTime<chrono::Utc>,
pub recipient_ids: Vec<Uuid>,
pub recurrence_rule: Option<RecurrenceRuleInput>,
pub offsets: Option<Vec<OffsetInput>>,
}
#[derive(Debug, Serialize)]
pub struct OkResponse {
pub ok: bool,
}
#[get("")]
async fn list_tasks(
app_data: web::Data<AppData>,
auth: AuthUser,
) -> Result<impl Responder, ApiError> {
let items = reminder_task::Entity::find()
.filter(reminder_task::Column::CreatorId.eq(auth.user_id))
.order_by_desc(reminder_task::Column::DueAt)
.find_also_related(recurrence_rule::Entity)
.all(&app_data.db)
.await?;
let task_ids: Vec<Uuid> = items.iter().map(|(t, _)| t.id).collect();
let recipients = reminder_task_recipient::Entity::find()
.filter(reminder_task_recipient::Column::TaskId.is_in(task_ids.clone()))
.all(&app_data.db)
.await?;
let offsets = reminder_offset::Entity::find()
.filter(reminder_offset::Column::TargetType.eq(TargetType::ReminderTask))
.filter(reminder_offset::Column::TargetId.is_in(task_ids))
.all(&app_data.db)
.await?;
let result: Vec<serde_json::Value> = items
.into_iter()
.map(|(t, rule)| {
let task_recipients: Vec<_> = recipients
.iter()
.filter(|r| r.task_id == t.id)
.cloned()
.collect();
let task_offsets: Vec<_> = offsets
.iter()
.filter(|o| o.target_id == t.id)
.cloned()
.collect();
serde_json::json!({
"id": t.id,
"creatorId": t.creator_id,
"title": t.title,
"description": t.description,
"dueAt": t.due_at,
"recurrenceRuleId": t.recurrence_rule_id,
"createdAt": t.created_at,
"updatedAt": t.updated_at,
"recurrenceRule": rule,
"recipients": task_recipients,
"offsets": task_offsets,
})
})
.collect();
Ok(HttpResponse::Ok().json(result))
}
#[post("")]
async fn create_task(
app_data: web::Data<AppData>,
auth: AuthUser,
body: web::Json<ReminderTaskInput>,
) -> Result<impl Responder, ApiError> {
if body.title.is_empty() {
return Err(ApiError::BadRequest("Invalid payload".to_string()));
}
let now = chrono::Utc::now().fixed_offset();
let body = body.into_inner();
let user_id = auth.user_id;
let result = app_data
.db
.transaction::<_, reminder_task::Model, ApiError>(|txn| {
Box::pin(async move {
// Create recurrence rule if provided
let rule_id = if let Some(rule_input) = body.recurrence_rule {
let rule = recurrence_rule::ActiveModel {
id: Set(Uuid::new_v4()),
r#type: Set(parse_recurrence_type(&rule_input.r#type)?),
interval: Set(rule_input.interval.unwrap_or(1)),
by_weekday: Set(rule_input.by_weekday),
by_monthday: Set(rule_input.by_monthday),
timezone: Set(rule_input.timezone.unwrap_or("Asia/Shanghai".to_string())),
created_at: Set(now),
updated_at: Set(now),
};
let created = rule.insert(txn).await?;
Some(created.id)
} else {
None
};
// Create task
let new_task = reminder_task::ActiveModel {
id: Set(Uuid::new_v4()),
creator_id: Set(user_id),
title: Set(body.title),
description: Set(body.description),
due_at: Set(body.due_at.fixed_offset()),
recurrence_rule_id: Set(rule_id),
created_at: Set(now),
updated_at: Set(now),
};
let created_task = new_task.insert(txn).await?;
// Create recipients
for recipient_id in body.recipient_ids {
let new_recipient = reminder_task_recipient::ActiveModel {
task_id: Set(created_task.id),
user_id: Set(recipient_id),
};
new_recipient.insert(txn).await?;
}
// Create offsets
if let Some(offsets) = body.offsets {
for offset in offsets {
let new_offset = reminder_offset::ActiveModel {
id: Set(Uuid::new_v4()),
target_type: Set(TargetType::ReminderTask),
target_id: Set(created_task.id),
offset_minutes: Set(offset.offset_minutes),
channel_inapp: Set(offset.channel_inapp.unwrap_or(true)),
channel_bark: Set(offset.channel_bark.unwrap_or(false)),
created_at: Set(now),
bark_title: Set(offset.bark_title),
bark_subtitle: Set(offset.bark_subtitle),
bark_body_markdown: Set(offset.bark_body_markdown),
bark_level: Set(offset.bark_level),
bark_icon: Set(offset.bark_icon),
};
new_offset.insert(txn).await?;
}
}
Ok(created_task)
})
})
.await
.map_err(|e| match e {
sea_orm::TransactionError::Connection(e) => ApiError::Internal(e.to_string()),
sea_orm::TransactionError::Transaction(e) => e,
})?;
// 触发通知生成
let _ = app_data
.send_worker_command(WorkerCommand::GenerateNotifications {
target_type: TargetType::ReminderTask,
target_id: result.id,
})
.await;
Ok(HttpResponse::Ok().json(result))
}
#[get("/{id}")]
async fn get_task(
app_data: web::Data<AppData>,
auth: AuthUser,
path: web::Path<Uuid>,
) -> Result<impl Responder, ApiError> {
let id = path.into_inner();
let (t, rule) = reminder_task::Entity::find_by_id(id)
.filter(reminder_task::Column::CreatorId.eq(auth.user_id))
.find_also_related(recurrence_rule::Entity)
.one(&app_data.db)
.await?
.ok_or_else(|| ApiError::NotFound("Not found".to_string()))?;
let recipients = reminder_task_recipient::Entity::find()
.filter(reminder_task_recipient::Column::TaskId.eq(t.id))
.all(&app_data.db)
.await?;
let offsets = reminder_offset::Entity::find()
.filter(reminder_offset::Column::TargetType.eq(TargetType::ReminderTask))
.filter(reminder_offset::Column::TargetId.eq(t.id))
.all(&app_data.db)
.await?;
let result = serde_json::json!({
"id": t.id,
"creatorId": t.creator_id,
"title": t.title,
"description": t.description,
"dueAt": t.due_at,
"recurrenceRuleId": t.recurrence_rule_id,
"createdAt": t.created_at,
"updatedAt": t.updated_at,
"recurrenceRule": rule,
"recipients": recipients,
"offsets": offsets,
});
Ok(HttpResponse::Ok().json(result))
}
#[put("/{id}")]
async fn update_task(
app_data: web::Data<AppData>,
auth: AuthUser,
path: web::Path<Uuid>,
body: web::Json<ReminderTaskInput>,
) -> Result<impl Responder, ApiError> {
let id = path.into_inner();
if body.title.is_empty() {
return Err(ApiError::BadRequest("Invalid payload".to_string()));
}
let now = chrono::Utc::now().fixed_offset();
let body = body.into_inner();
let user_id = auth.user_id;
let result = app_data
.db
.transaction::<_, reminder_task::Model, ApiError>(|txn| {
Box::pin(async move {
let existing = reminder_task::Entity::find_by_id(id)
.filter(reminder_task::Column::CreatorId.eq(user_id))
.one(txn)
.await?
.ok_or_else(|| ApiError::NotFound("Not found".to_string()))?;
// Handle recurrence rule
let mut rule_id = existing.recurrence_rule_id;
if let Some(rule_input) = body.recurrence_rule {
if let Some(existing_rule_id) = rule_id {
// Update existing rule
let mut rule: recurrence_rule::ActiveModel =
recurrence_rule::Entity::find_by_id(existing_rule_id)
.one(txn)
.await?
.ok_or_else(|| ApiError::Internal("Rule not found".to_string()))?
.into();
rule.r#type = Set(parse_recurrence_type(&rule_input.r#type)?);
rule.interval = Set(rule_input.interval.unwrap_or(1));
rule.by_weekday = Set(rule_input.by_weekday);
rule.by_monthday = Set(rule_input.by_monthday);
rule.timezone =
Set(rule_input.timezone.unwrap_or("Asia/Shanghai".to_string()));
rule.updated_at = Set(now);
rule.update(txn).await?;
} else {
// Create new rule
let rule = recurrence_rule::ActiveModel {
id: Set(Uuid::new_v4()),
r#type: Set(parse_recurrence_type(&rule_input.r#type)?),
interval: Set(rule_input.interval.unwrap_or(1)),
by_weekday: Set(rule_input.by_weekday),
by_monthday: Set(rule_input.by_monthday),
timezone: Set(rule_input
.timezone
.unwrap_or("Asia/Shanghai".to_string())),
created_at: Set(now),
updated_at: Set(now),
};
let created = rule.insert(txn).await?;
rule_id = Some(created.id);
}
} else if let Some(existing_rule_id) = rule_id {
// Delete existing rule
recurrence_rule::Entity::delete_by_id(existing_rule_id)
.exec(txn)
.await?;
rule_id = None;
}
// Delete existing recipients and create new ones
reminder_task_recipient::Entity::delete_many()
.filter(reminder_task_recipient::Column::TaskId.eq(id))
.exec(txn)
.await?;
for recipient_id in body.recipient_ids {
let new_recipient = reminder_task_recipient::ActiveModel {
task_id: Set(id),
user_id: Set(recipient_id),
};
new_recipient.insert(txn).await?;
}
// Delete existing offsets and create new ones
reminder_offset::Entity::delete_many()
.filter(reminder_offset::Column::TargetType.eq(TargetType::ReminderTask))
.filter(reminder_offset::Column::TargetId.eq(id))
.exec(txn)
.await?;
if let Some(offsets) = body.offsets {
for offset in offsets {
let new_offset = reminder_offset::ActiveModel {
id: Set(Uuid::new_v4()),
target_type: Set(TargetType::ReminderTask),
target_id: Set(id),
offset_minutes: Set(offset.offset_minutes),
channel_inapp: Set(offset.channel_inapp.unwrap_or(true)),
channel_bark: Set(offset.channel_bark.unwrap_or(false)),
created_at: Set(now),
bark_title: Set(offset.bark_title),
bark_subtitle: Set(offset.bark_subtitle),
bark_body_markdown: Set(offset.bark_body_markdown),
bark_level: Set(offset.bark_level),
bark_icon: Set(offset.bark_icon),
};
new_offset.insert(txn).await?;
}
}
// Update task
let mut active: reminder_task::ActiveModel = existing.into();
active.title = Set(body.title);
active.description = Set(body.description);
active.due_at = Set(body.due_at.fixed_offset());
active.recurrence_rule_id = Set(rule_id);
active.updated_at = Set(now);
let updated = active.update(txn).await?;
Ok(updated)
})
})
.await
.map_err(|e| match e {
sea_orm::TransactionError::Connection(e) => ApiError::Internal(e.to_string()),
sea_orm::TransactionError::Transaction(e) => e,
})?;
// 触发通知重新生成
let _ = app_data
.send_worker_command(WorkerCommand::GenerateNotifications {
target_type: TargetType::ReminderTask,
target_id: result.id,
})
.await;
Ok(HttpResponse::Ok().json(result))
}
#[delete("/{id}")]
async fn delete_task(
app_data: web::Data<AppData>,
auth: AuthUser,
path: web::Path<Uuid>,
) -> Result<impl Responder, ApiError> {
let id = path.into_inner();
let result = reminder_task::Entity::delete_many()
.filter(reminder_task::Column::Id.eq(id))
.filter(reminder_task::Column::CreatorId.eq(auth.user_id))
.exec(&app_data.db)
.await?;
if result.rows_affected == 0 {
return Err(ApiError::NotFound("Not found".to_string()));
}
// Delete offsets
reminder_offset::Entity::delete_many()
.filter(reminder_offset::Column::TargetType.eq(TargetType::ReminderTask))
.filter(reminder_offset::Column::TargetId.eq(id))
.exec(&app_data.db)
.await?;
// Delete recipients
reminder_task_recipient::Entity::delete_many()
.filter(reminder_task_recipient::Column::TaskId.eq(id))
.exec(&app_data.db)
.await?;
Ok(HttpResponse::Ok().json(OkResponse { ok: true }))
}
fn parse_recurrence_type(s: &str) -> Result<RecurrenceType, ApiError> {
match s {
"hourly" => Ok(RecurrenceType::Hourly),
"daily" => Ok(RecurrenceType::Daily),
"weekly" => Ok(RecurrenceType::Weekly),
"monthly" => Ok(RecurrenceType::Monthly),
"yearly" => Ok(RecurrenceType::Yearly),
_ => Err(ApiError::BadRequest("Invalid recurrence type".to_string())),
}
}
pub fn routes() -> Scope {
web::scope("/api/reminder-tasks")
.service(list_tasks)
.service(create_task)
.service(get_task)
.service(update_task)
.service(delete_task)
}

View File

@@ -0,0 +1,432 @@
use actix_web::{HttpResponse, Responder, Scope, delete, get, post, put, web};
use sea_orm::{
ActiveModelTrait, ColumnTrait, EntityTrait, QueryFilter, QueryOrder, Set, TransactionTrait,
};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::app_data::AppData;
use crate::entity::sea_orm_active_enums::{RecurrenceType, TargetType};
use crate::entity::{recurrence_rule, reminder_offset, todo};
use crate::error::ApiError;
use crate::middleware::auth::AuthUser;
use crate::timer::WorkerCommand;
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RecurrenceRuleInput {
pub r#type: String,
pub interval: Option<i32>,
pub by_weekday: Option<i32>,
pub by_monthday: Option<i32>,
pub timezone: Option<String>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct OffsetInput {
pub offset_minutes: i32,
pub channel_inapp: Option<bool>,
pub channel_bark: Option<bool>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TodoInput {
pub title: String,
pub description: Option<String>,
pub due_at: chrono::DateTime<chrono::Utc>,
pub recurrence_rule: Option<RecurrenceRuleInput>,
pub offsets: Option<Vec<OffsetInput>>,
}
#[derive(Debug, Serialize)]
pub struct OkResponse {
pub ok: bool,
}
#[get("")]
async fn list_todos(
app_data: web::Data<AppData>,
auth: AuthUser,
) -> Result<impl Responder, ApiError> {
let items = todo::Entity::find()
.filter(todo::Column::OwnerId.eq(auth.user_id))
.order_by_asc(todo::Column::DueAt)
.find_also_related(recurrence_rule::Entity)
.all(&app_data.db)
.await?;
let todo_ids: Vec<Uuid> = items.iter().map(|(t, _)| t.id).collect();
let offsets = reminder_offset::Entity::find()
.filter(reminder_offset::Column::TargetType.eq(TargetType::Todo))
.filter(reminder_offset::Column::TargetId.is_in(todo_ids))
.all(&app_data.db)
.await?;
let result: Vec<serde_json::Value> = items
.into_iter()
.map(|(t, rule)| {
let todo_offsets: Vec<_> = offsets
.iter()
.filter(|o| o.target_id == t.id)
.cloned()
.collect();
serde_json::json!({
"id": t.id,
"ownerId": t.owner_id,
"title": t.title,
"description": t.description,
"dueAt": t.due_at,
"recurrenceRuleId": t.recurrence_rule_id,
"createdAt": t.created_at,
"updatedAt": t.updated_at,
"checkInAt": t.check_in_at,
"checkInCount": t.check_in_count,
"isCheckedIn": t.is_checked_in,
"recurrenceRule": rule,
"offsets": todo_offsets,
})
})
.collect();
Ok(HttpResponse::Ok().json(result))
}
#[post("")]
async fn create_todo(
app_data: web::Data<AppData>,
auth: AuthUser,
body: web::Json<TodoInput>,
) -> Result<impl Responder, ApiError> {
if body.title.is_empty() {
return Err(ApiError::BadRequest("Invalid payload".to_string()));
}
let now = chrono::Utc::now().fixed_offset();
let body = body.into_inner();
let user_id = auth.user_id;
let result = app_data
.db
.transaction::<_, todo::Model, ApiError>(|txn| {
Box::pin(async move {
// Create recurrence rule if provided
let rule_id = if let Some(rule_input) = body.recurrence_rule {
let rule = recurrence_rule::ActiveModel {
id: Set(Uuid::new_v4()),
r#type: Set(parse_recurrence_type(&rule_input.r#type)?),
interval: Set(rule_input.interval.unwrap_or(1)),
by_weekday: Set(rule_input.by_weekday),
by_monthday: Set(rule_input.by_monthday),
timezone: Set(rule_input.timezone.unwrap_or("Asia/Shanghai".to_string())),
created_at: Set(now),
updated_at: Set(now),
};
let created = rule.insert(txn).await?;
Some(created.id)
} else {
None
};
// Create todo
let new_todo = todo::ActiveModel {
id: Set(Uuid::new_v4()),
owner_id: Set(user_id),
title: Set(body.title),
description: Set(body.description),
due_at: Set(body.due_at.fixed_offset()),
recurrence_rule_id: Set(rule_id),
created_at: Set(now),
updated_at: Set(now),
check_in_at: Set(None),
check_in_count: Set(0),
is_checked_in: Set(false),
};
let created_todo = new_todo.insert(txn).await?;
// Create offsets
if let Some(offsets) = body.offsets {
for offset in offsets {
let new_offset = reminder_offset::ActiveModel {
id: Set(Uuid::new_v4()),
target_type: Set(TargetType::Todo),
target_id: Set(created_todo.id),
offset_minutes: Set(offset.offset_minutes),
channel_inapp: Set(offset.channel_inapp.unwrap_or(true)),
channel_bark: Set(offset.channel_bark.unwrap_or(false)),
created_at: Set(now),
bark_title: Set(None),
bark_subtitle: Set(None),
bark_body_markdown: Set(None),
bark_level: Set(None),
bark_icon: Set(None),
};
new_offset.insert(txn).await?;
}
}
Ok(created_todo)
})
})
.await
.map_err(|e| match e {
sea_orm::TransactionError::Connection(e) => ApiError::Internal(e.to_string()),
sea_orm::TransactionError::Transaction(e) => e,
})?;
// 触发通知生成
let _ = app_data
.send_worker_command(WorkerCommand::GenerateNotifications {
target_type: TargetType::Todo,
target_id: result.id,
})
.await;
Ok(HttpResponse::Ok().json(result))
}
#[get("/{id}")]
async fn get_todo(
app_data: web::Data<AppData>,
auth: AuthUser,
path: web::Path<Uuid>,
) -> Result<impl Responder, ApiError> {
let id = path.into_inner();
let (t, rule) = todo::Entity::find_by_id(id)
.filter(todo::Column::OwnerId.eq(auth.user_id))
.find_also_related(recurrence_rule::Entity)
.one(&app_data.db)
.await?
.ok_or_else(|| ApiError::NotFound("Not found".to_string()))?;
let offsets = reminder_offset::Entity::find()
.filter(reminder_offset::Column::TargetType.eq(TargetType::Todo))
.filter(reminder_offset::Column::TargetId.eq(t.id))
.all(&app_data.db)
.await?;
let result = serde_json::json!({
"id": t.id,
"ownerId": t.owner_id,
"title": t.title,
"description": t.description,
"dueAt": t.due_at,
"recurrenceRuleId": t.recurrence_rule_id,
"createdAt": t.created_at,
"updatedAt": t.updated_at,
"checkInAt": t.check_in_at,
"checkInCount": t.check_in_count,
"isCheckedIn": t.is_checked_in,
"recurrenceRule": rule,
"offsets": offsets,
});
Ok(HttpResponse::Ok().json(result))
}
#[put("/{id}")]
async fn update_todo(
app_data: web::Data<AppData>,
auth: AuthUser,
path: web::Path<Uuid>,
body: web::Json<TodoInput>,
) -> Result<impl Responder, ApiError> {
let id = path.into_inner();
if body.title.is_empty() {
return Err(ApiError::BadRequest("Invalid payload".to_string()));
}
let now = chrono::Utc::now().fixed_offset();
let body = body.into_inner();
let user_id = auth.user_id;
let result = app_data
.db
.transaction::<_, todo::Model, ApiError>(|txn| {
Box::pin(async move {
let existing = todo::Entity::find_by_id(id)
.filter(todo::Column::OwnerId.eq(user_id))
.one(txn)
.await?
.ok_or_else(|| ApiError::NotFound("Not found".to_string()))?;
// Handle recurrence rule
let mut rule_id = existing.recurrence_rule_id;
if let Some(rule_input) = body.recurrence_rule {
if let Some(existing_rule_id) = rule_id {
// Update existing rule
let mut rule: recurrence_rule::ActiveModel =
recurrence_rule::Entity::find_by_id(existing_rule_id)
.one(txn)
.await?
.ok_or_else(|| ApiError::Internal("Rule not found".to_string()))?
.into();
rule.r#type = Set(parse_recurrence_type(&rule_input.r#type)?);
rule.interval = Set(rule_input.interval.unwrap_or(1));
rule.by_weekday = Set(rule_input.by_weekday);
rule.by_monthday = Set(rule_input.by_monthday);
rule.timezone =
Set(rule_input.timezone.unwrap_or("Asia/Shanghai".to_string()));
rule.updated_at = Set(now);
rule.update(txn).await?;
} else {
// Create new rule
let rule = recurrence_rule::ActiveModel {
id: Set(Uuid::new_v4()),
r#type: Set(parse_recurrence_type(&rule_input.r#type)?),
interval: Set(rule_input.interval.unwrap_or(1)),
by_weekday: Set(rule_input.by_weekday),
by_monthday: Set(rule_input.by_monthday),
timezone: Set(rule_input
.timezone
.unwrap_or("Asia/Shanghai".to_string())),
created_at: Set(now),
updated_at: Set(now),
};
let created = rule.insert(txn).await?;
rule_id = Some(created.id);
}
} else if let Some(existing_rule_id) = rule_id {
// Delete existing rule
recurrence_rule::Entity::delete_by_id(existing_rule_id)
.exec(txn)
.await?;
rule_id = None;
}
// Delete existing offsets and create new ones
reminder_offset::Entity::delete_many()
.filter(reminder_offset::Column::TargetType.eq(TargetType::Todo))
.filter(reminder_offset::Column::TargetId.eq(id))
.exec(txn)
.await?;
if let Some(offsets) = body.offsets {
for offset in offsets {
let new_offset = reminder_offset::ActiveModel {
id: Set(Uuid::new_v4()),
target_type: Set(TargetType::Todo),
target_id: Set(id),
offset_minutes: Set(offset.offset_minutes),
channel_inapp: Set(offset.channel_inapp.unwrap_or(true)),
channel_bark: Set(offset.channel_bark.unwrap_or(false)),
created_at: Set(now),
bark_title: Set(None),
bark_subtitle: Set(None),
bark_body_markdown: Set(None),
bark_level: Set(None),
bark_icon: Set(None),
};
new_offset.insert(txn).await?;
}
}
// Update todo
let mut active: todo::ActiveModel = existing.into();
active.title = Set(body.title);
active.description = Set(body.description);
active.due_at = Set(body.due_at.fixed_offset());
active.recurrence_rule_id = Set(rule_id);
active.updated_at = Set(now);
let updated = active.update(txn).await?;
Ok(updated)
})
})
.await
.map_err(|e| match e {
sea_orm::TransactionError::Connection(e) => ApiError::Internal(e.to_string()),
sea_orm::TransactionError::Transaction(e) => e,
})?;
// 触发通知重新生成
let _ = app_data
.send_worker_command(WorkerCommand::GenerateNotifications {
target_type: TargetType::Todo,
target_id: result.id,
})
.await;
Ok(HttpResponse::Ok().json(result))
}
#[post("/{id}/check-in")]
async fn check_in_todo(
app_data: web::Data<AppData>,
auth: AuthUser,
path: web::Path<Uuid>,
) -> Result<impl Responder, ApiError> {
let id = path.into_inner();
// Find and verify ownership
let existing = todo::Entity::find_by_id(id)
.filter(todo::Column::OwnerId.eq(auth.user_id))
.one(&app_data.db)
.await?
.ok_or_else(|| ApiError::NotFound("Not found".to_string()))?;
let now = chrono::Utc::now().fixed_offset();
// Update check-in fields
let mut active: todo::ActiveModel = existing.clone().into();
active.is_checked_in = Set(true);
active.check_in_at = Set(Some(now));
active.check_in_count = Set(existing.check_in_count + 1);
active.updated_at = Set(now);
let updated = active.update(&app_data.db).await?;
Ok(HttpResponse::Ok().json(updated))
}
#[delete("/{id}")]
async fn delete_todo(
app_data: web::Data<AppData>,
auth: AuthUser,
path: web::Path<Uuid>,
) -> Result<impl Responder, ApiError> {
let id = path.into_inner();
let result = todo::Entity::delete_many()
.filter(todo::Column::Id.eq(id))
.filter(todo::Column::OwnerId.eq(auth.user_id))
.exec(&app_data.db)
.await?;
if result.rows_affected == 0 {
return Err(ApiError::NotFound("Not found".to_string()));
}
// Delete offsets
reminder_offset::Entity::delete_many()
.filter(reminder_offset::Column::TargetType.eq(TargetType::Todo))
.filter(reminder_offset::Column::TargetId.eq(id))
.exec(&app_data.db)
.await?;
Ok(HttpResponse::Ok().json(OkResponse { ok: true }))
}
fn parse_recurrence_type(s: &str) -> Result<RecurrenceType, ApiError> {
match s {
"hourly" => Ok(RecurrenceType::Hourly),
"daily" => Ok(RecurrenceType::Daily),
"weekly" => Ok(RecurrenceType::Weekly),
"monthly" => Ok(RecurrenceType::Monthly),
"yearly" => Ok(RecurrenceType::Yearly),
_ => Err(ApiError::BadRequest("Invalid recurrence type".to_string())),
}
}
pub fn routes() -> Scope {
web::scope("/api/todos")
.service(list_todos)
.service(create_todo)
.service(get_todo)
.service(update_todo)
.service(check_in_todo)
.service(delete_todo)
}

View File

@@ -0,0 +1,64 @@
use actix_web::{HttpResponse, Responder, Scope, get, web};
use sea_orm::sea_query::extension::postgres::PgExpr;
use sea_orm::sea_query::Expr;
use sea_orm::{EntityTrait, QueryFilter, QueryOrder};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::app_data::AppData;
use crate::entity::user;
use crate::error::ApiError;
use crate::middleware::auth::AuthUser;
#[derive(Debug, Deserialize)]
pub struct SearchQuery {
pub query: Option<String>,
}
#[derive(Debug, Serialize)]
pub struct UserResponse {
pub id: Uuid,
pub username: String,
pub avatar: Option<String>,
}
#[get("")]
async fn search_users(
app_data: web::Data<AppData>,
_auth: AuthUser,
query: web::Query<SearchQuery>,
) -> Result<impl Responder, ApiError> {
let mut q = user::Entity::find();
if let Some(search) = &query.query {
let search = search.trim();
if !search.is_empty() {
// 使用 ILIKE 进行不区分大小写的搜索PostgreSQL
let pattern = format!("%{}%", search);
q = q.filter(
Expr::col((user::Entity, user::Column::Username))
.ilike(&pattern)
);
}
}
let users = q
.order_by_asc(user::Column::Username)
.all(&app_data.db)
.await?;
let result: Vec<UserResponse> = users
.into_iter()
.map(|u| UserResponse {
id: u.id,
username: u.username,
avatar: u.avatar,
})
.collect();
Ok(HttpResponse::Ok().json(result))
}
pub fn routes() -> Scope {
web::scope("/api/users").service(search_users)
}

View File

@@ -0,0 +1,71 @@
use migration::{Migrator, MigratorTrait};
use sea_orm::{ConnectOptions, Database, DbConn};
use std::env;
use std::path::PathBuf;
use tokio::sync::mpsc;
use tracing::info;
use crate::timer::{NotificationWorker, SharedTimeWheel, WorkerCommand};
#[derive(Clone)]
pub struct AppData {
pub db: DbConn,
pub jwt_secret: String,
pub worker_tx: mpsc::Sender<WorkerCommand>,
/// 服务器基础 URL用于生成头像等资源的完整 URL
/// 本地调试: http://localhost:4000
/// 生产环境: https://notify.michaelandmeryl.xyz
pub base_url: String,
/// 上传文件的存储目录
pub upload_dir: PathBuf,
}
impl AppData {
pub async fn new() -> Result<Self, anyhow::Error> {
let url = env::var("DATABASE_URL")
.unwrap_or_else(|_| "postgres://postgres:postgres@localhost:5432/notify".to_string());
let mut opt = ConnectOptions::new(url);
opt.max_connections(10).sqlx_logging(false);
let db = Database::connect(opt).await?;
// 自动运行数据库迁移
info!("Running database migrations...");
Migrator::up(&db, None).await?;
info!("Database migrations completed.");
let jwt_secret = env::var("JWT_SECRET").unwrap_or_else(|_| "dev-secret".to_string());
// 从环境变量读取 BASE_URL默认为本地开发地址
let base_url = env::var("BASE_URL").unwrap_or_else(|_| "http://localhost:4000".to_string());
// 上传目录,默认为当前目录下的 uploads
let upload_dir = env::var("UPLOAD_DIR")
.map(PathBuf::from)
.unwrap_or_else(|_| PathBuf::from("./uploads"));
// 确保上传目录存在
tokio::fs::create_dir_all(&upload_dir).await?;
tokio::fs::create_dir_all(upload_dir.join("avatars")).await?;
// 创建并启动时间轮 Worker
let time_wheel = SharedTimeWheel::new();
let worker = NotificationWorker::new(db.clone(), time_wheel);
let worker_tx = worker.start().await;
Ok(Self {
db,
jwt_secret,
worker_tx,
base_url,
upload_dir,
})
}
/// 发送命令给 Worker
pub async fn send_worker_command(
&self,
cmd: WorkerCommand,
) -> Result<(), mpsc::error::SendError<WorkerCommand>> {
self.worker_tx.send(cmd).await
}
}

View File

@@ -0,0 +1,41 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 2.0
use super::sea_orm_active_enums::ChannelType;
use super::sea_orm_active_enums::NotificationStatus;
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "delivery_log")]
#[sea_orm::model]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: Uuid,
pub notification_id: Uuid,
pub attempt_no: i32,
pub channel: ChannelType,
pub status: NotificationStatus,
#[sea_orm(column_type = "JsonBinary", nullable)]
pub response_meta: Option<Json>,
pub created_at: DateTimeWithTimeZone,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::notification::Entity",
from = "Column::NotificationId",
to = "super::notification::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
Notification,
}
impl Related<super::notification::Entity> for Entity {
fn to() -> RelationDef {
Relation::Notification.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,40 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 2.0
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "invite")]
#[sea_orm::model]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: Uuid,
#[sea_orm(unique)]
pub code: String,
pub creator_id: Uuid,
pub max_uses: i32,
pub used_count: i32,
pub expires_at: DateTimeWithTimeZone,
pub revoked_at: Option<DateTimeWithTimeZone>,
pub created_at: DateTimeWithTimeZone,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::CreatorId",
to = "super::user::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
User,
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,14 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 2.0
pub mod prelude;
pub mod delivery_log;
pub mod invite;
pub mod notification;
pub mod recurrence_rule;
pub mod reminder_offset;
pub mod reminder_task;
pub mod reminder_task_recipient;
pub mod sea_orm_active_enums;
pub mod todo;
pub mod user;

View File

@@ -0,0 +1,61 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 2.0
use super::sea_orm_active_enums::ChannelType;
use super::sea_orm_active_enums::NotificationStatus;
use super::sea_orm_active_enums::TargetType;
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "notification")]
#[sea_orm::model]
#[serde(rename_all = "camelCase")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: Uuid,
#[sea_orm(unique_key = "UQ_notification_recipient_target_trigger_channel")]
pub recipient_id: Uuid,
#[sea_orm(unique_key = "UQ_notification_recipient_target_trigger_channel")]
pub target_type: TargetType,
#[sea_orm(unique_key = "UQ_notification_recipient_target_trigger_channel")]
pub target_id: Uuid,
#[sea_orm(unique_key = "UQ_notification_recipient_target_trigger_channel")]
pub trigger_at: DateTimeWithTimeZone,
#[sea_orm(unique_key = "UQ_notification_recipient_target_trigger_channel")]
pub channel: ChannelType,
pub status: NotificationStatus,
pub locked_at: Option<DateTimeWithTimeZone>,
pub sent_at: Option<DateTimeWithTimeZone>,
pub read_at: Option<DateTimeWithTimeZone>,
pub created_at: DateTimeWithTimeZone,
pub updated_at: DateTimeWithTimeZone,
pub offset_id: Option<Uuid>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::delivery_log::Entity")]
DeliveryLog,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::RecipientId",
to = "super::user::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
User,
}
impl Related<super::delivery_log::Entity> for Entity {
fn to() -> RelationDef {
Relation::DeliveryLog.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,11 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 2.0
pub use super::delivery_log::Entity as DeliveryLog;
pub use super::invite::Entity as Invite;
pub use super::notification::Entity as Notification;
pub use super::recurrence_rule::Entity as RecurrenceRule;
pub use super::reminder_offset::Entity as ReminderOffset;
pub use super::reminder_task::Entity as ReminderTask;
pub use super::reminder_task_recipient::Entity as ReminderTaskRecipient;
pub use super::todo::Entity as Todo;
pub use super::user::Entity as User;

View File

@@ -0,0 +1,43 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 2.0
use super::sea_orm_active_enums::RecurrenceType;
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "recurrence_rule")]
#[sea_orm::model]
#[serde(rename_all = "camelCase")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: Uuid,
pub r#type: RecurrenceType,
pub interval: i32,
pub by_weekday: Option<i32>,
pub by_monthday: Option<i32>,
pub timezone: String,
pub created_at: DateTimeWithTimeZone,
pub updated_at: DateTimeWithTimeZone,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::reminder_task::Entity")]
ReminderTask,
#[sea_orm(has_many = "super::todo::Entity")]
Todo,
}
impl Related<super::reminder_task::Entity> for Entity {
fn to() -> RelationDef {
Relation::ReminderTask.def()
}
}
impl Related<super::todo::Entity> for Entity {
fn to() -> RelationDef {
Relation::Todo.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,31 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 2.0
use super::sea_orm_active_enums::TargetType;
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "reminder_offset")]
#[sea_orm::model]
#[serde(rename_all = "camelCase")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: Uuid,
pub target_type: TargetType,
pub target_id: Uuid,
pub offset_minutes: i32,
pub channel_inapp: bool,
pub channel_bark: bool,
pub created_at: DateTimeWithTimeZone,
pub bark_title: Option<String>,
pub bark_subtitle: Option<String>,
#[sea_orm(column_type = "Text", nullable)]
pub bark_body_markdown: Option<String>,
pub bark_level: Option<String>,
pub bark_icon: Option<String>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,68 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 2.0
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "reminder_task")]
#[sea_orm::model]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: Uuid,
pub creator_id: Uuid,
pub title: String,
pub description: Option<String>,
pub due_at: DateTimeWithTimeZone,
pub recurrence_rule_id: Option<Uuid>,
pub created_at: DateTimeWithTimeZone,
pub updated_at: DateTimeWithTimeZone,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::recurrence_rule::Entity",
from = "Column::RecurrenceRuleId",
to = "super::recurrence_rule::Column::Id",
on_update = "Cascade",
on_delete = "SetNull"
)]
RecurrenceRule,
#[sea_orm(has_many = "super::reminder_task_recipient::Entity")]
ReminderTaskRecipient,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::CreatorId",
to = "super::user::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
User,
}
impl Related<super::recurrence_rule::Entity> for Entity {
fn to() -> RelationDef {
Relation::RecurrenceRule.def()
}
}
impl Related<super::reminder_task_recipient::Entity> for Entity {
fn to() -> RelationDef {
Relation::ReminderTaskRecipient.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
super::reminder_task_recipient::Relation::User.def()
}
fn via() -> Option<RelationDef> {
Some(
super::reminder_task_recipient::Relation::ReminderTask
.def()
.rev(),
)
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,49 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 2.0
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "reminder_task_recipient")]
#[sea_orm::model]
#[serde(rename_all = "camelCase")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub task_id: Uuid,
#[sea_orm(primary_key, auto_increment = false)]
pub user_id: Uuid,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::reminder_task::Entity",
from = "Column::TaskId",
to = "super::reminder_task::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
ReminderTask,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
User,
}
impl Related<super::reminder_task::Entity> for Entity {
fn to() -> RelationDef {
Relation::ReminderTask.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,51 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 2.0
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)]
#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "channel_type")]
pub enum ChannelType {
#[sea_orm(string_value = "inapp")]
Inapp,
#[sea_orm(string_value = "bark")]
Bark,
}
#[derive(Debug, Clone, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)]
#[sea_orm(
rs_type = "String",
db_type = "Enum",
enum_name = "notification_status"
)]
pub enum NotificationStatus {
#[sea_orm(string_value = "pending")]
Pending,
#[sea_orm(string_value = "queued")]
Queued,
#[sea_orm(string_value = "sent")]
Sent,
#[sea_orm(string_value = "failed")]
Failed,
}
#[derive(Debug, Clone, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)]
#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "recurrence_type")]
pub enum RecurrenceType {
#[sea_orm(string_value = "hourly")]
Hourly,
#[sea_orm(string_value = "daily")]
Daily,
#[sea_orm(string_value = "weekly")]
Weekly,
#[sea_orm(string_value = "monthly")]
Monthly,
#[sea_orm(string_value = "yearly")]
Yearly,
}
#[derive(Debug, Clone, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)]
#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "target_type")]
pub enum TargetType {
#[sea_orm(string_value = "todo")]
Todo,
#[sea_orm(string_value = "reminder_task")]
ReminderTask,
}

View File

@@ -0,0 +1,56 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 2.0
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "todo")]
#[sea_orm::model]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: Uuid,
pub owner_id: Uuid,
pub title: String,
pub description: Option<String>,
pub due_at: DateTimeWithTimeZone,
pub recurrence_rule_id: Option<Uuid>,
pub created_at: DateTimeWithTimeZone,
pub updated_at: DateTimeWithTimeZone,
pub check_in_at: Option<DateTimeWithTimeZone>,
pub check_in_count: i32,
pub is_checked_in: bool,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::recurrence_rule::Entity",
from = "Column::RecurrenceRuleId",
to = "super::recurrence_rule::Column::Id",
on_update = "Cascade",
on_delete = "SetNull"
)]
RecurrenceRule,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::OwnerId",
to = "super::user::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
User,
}
impl Related<super::recurrence_rule::Entity> for Entity {
fn to() -> RelationDef {
Relation::RecurrenceRule.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,79 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 2.0
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "user")]
#[sea_orm::model]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: Uuid,
#[sea_orm(unique)]
pub username: String,
#[serde(skip_serializing)]
pub password_hash: String,
pub avatar: Option<String>,
pub timezone: String,
pub bark_url: Option<String>,
pub inapp_enabled: bool,
pub bark_enabled: bool,
pub created_at: DateTimeWithTimeZone,
pub updated_at: DateTimeWithTimeZone,
pub invite_id: Option<Uuid>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::invite::Entity",
from = "Column::InviteId",
to = "super::invite::Column::Id",
on_update = "Cascade",
on_delete = "SetNull"
)]
Invite,
#[sea_orm(has_many = "super::notification::Entity")]
Notification,
#[sea_orm(has_many = "super::reminder_task::Entity")]
ReminderTask,
#[sea_orm(has_many = "super::reminder_task_recipient::Entity")]
ReminderTaskRecipient,
#[sea_orm(has_many = "super::todo::Entity")]
Todo,
}
impl Related<super::invite::Entity> for Entity {
fn to() -> RelationDef {
Relation::Invite.def()
}
}
impl Related<super::notification::Entity> for Entity {
fn to() -> RelationDef {
Relation::Notification.def()
}
}
impl Related<super::reminder_task_recipient::Entity> for Entity {
fn to() -> RelationDef {
Relation::ReminderTaskRecipient.def()
}
}
impl Related<super::todo::Entity> for Entity {
fn to() -> RelationDef {
Relation::Todo.def()
}
}
impl Related<super::reminder_task::Entity> for Entity {
fn to() -> RelationDef {
super::reminder_task_recipient::Relation::ReminderTask.def()
}
fn via() -> Option<RelationDef> {
Some(super::reminder_task_recipient::Relation::User.def().rev())
}
}
impl ActiveModelBehavior for ActiveModel {}

69
backend_rust/src/error.rs Normal file
View File

@@ -0,0 +1,69 @@
use actix_web::{HttpResponse, ResponseError, http::StatusCode};
use serde::Serialize;
use std::fmt;
#[derive(Debug)]
pub enum ApiError {
BadRequest(String),
Unauthorized(String),
Forbidden(String),
NotFound(String),
Conflict(String),
Internal(String),
}
#[derive(Serialize)]
struct ErrorResponse {
error: String,
}
impl fmt::Display for ApiError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ApiError::BadRequest(msg) => write!(f, "{}", msg),
ApiError::Unauthorized(msg) => write!(f, "{}", msg),
ApiError::Forbidden(msg) => write!(f, "{}", msg),
ApiError::NotFound(msg) => write!(f, "{}", msg),
ApiError::Conflict(msg) => write!(f, "{}", msg),
ApiError::Internal(msg) => write!(f, "{}", msg),
}
}
}
impl ResponseError for ApiError {
fn error_response(&self) -> HttpResponse {
let error_response = ErrorResponse {
error: self.to_string(),
};
HttpResponse::build(self.status_code()).json(error_response)
}
fn status_code(&self) -> StatusCode {
match self {
ApiError::BadRequest(_) => StatusCode::BAD_REQUEST,
ApiError::Unauthorized(_) => StatusCode::UNAUTHORIZED,
ApiError::Forbidden(_) => StatusCode::FORBIDDEN,
ApiError::NotFound(_) => StatusCode::NOT_FOUND,
ApiError::Conflict(_) => StatusCode::CONFLICT,
ApiError::Internal(_) => StatusCode::INTERNAL_SERVER_ERROR,
}
}
}
impl From<sea_orm::DbErr> for ApiError {
fn from(err: sea_orm::DbErr) -> Self {
ApiError::Internal(err.to_string())
}
}
impl From<bcrypt::BcryptError> for ApiError {
fn from(err: bcrypt::BcryptError) -> Self {
ApiError::Internal(err.to_string())
}
}
impl From<jsonwebtoken::errors::Error> for ApiError {
fn from(_: jsonwebtoken::errors::Error) -> Self {
ApiError::Unauthorized("Invalid token".to_string())
}
}

6
backend_rust/src/lib.rs Normal file
View File

@@ -0,0 +1,6 @@
pub mod api;
pub mod app_data;
pub mod entity;
pub mod error;
pub mod middleware;
pub mod timer;

64
backend_rust/src/main.rs Normal file
View File

@@ -0,0 +1,64 @@
use actix_cors::Cors;
use actix_files::Files;
use actix_web::{App, HttpServer, web};
use backend_rust::api::{auth, health, invites, me, notifications, reminder_tasks, todos, users};
use backend_rust::app_data::AppData;
use std::env;
use std::path::PathBuf;
use tracing::{error, info};
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let format = tracing_subscriber::fmt::format().pretty();
tracing_subscriber::fmt().event_format(format).init();
// 从环境变量读取配置,使用默认值
let host = env::var("HOST").unwrap_or_else(|_| "0.0.0.0".to_string());
let port: u16 = env::var("PORT")
.unwrap_or_else(|_| "4000".to_string())
.parse()
.expect("PORT must be a valid number");
// 上传目录配置
let upload_dir = env::var("UPLOAD_DIR")
.map(PathBuf::from)
.unwrap_or_else(|_| PathBuf::from("./uploads"));
let app_data = match AppData::new().await {
Ok(app_data) => web::Data::new(app_data),
Err(e) => {
error!("Failed to connect to database: {}", e);
std::process::exit(1);
}
};
info!("Starting server on {}:{}", host, port);
info!("Upload directory: {:?}", upload_dir);
HttpServer::new(move || {
let cors = Cors::default()
.allow_any_header()
.allow_any_method()
.allow_any_origin();
App::new()
.wrap(cors)
.app_data(app_data.clone())
.configure(configure_routes)
// 配置静态文件服务,用于提供上传的文件(如头像)
.service(Files::new("/uploads", upload_dir.clone()).show_files_listing())
})
.bind((host, port))?
.run()
.await
}
fn configure_routes(m: &mut web::ServiceConfig) {
m.service(health::routes())
.service(auth::routes())
.service(invites::routes())
.service(me::routes())
.service(notifications::routes())
.service(todos::routes())
.service(reminder_tasks::routes())
.service(users::routes());
}

View File

@@ -0,0 +1,76 @@
use actix_web::{FromRequest, HttpRequest, dev::Payload, web};
use jsonwebtoken::{DecodingKey, Validation, decode};
use serde::{Deserialize, Serialize};
use std::future::{Ready, ready};
use uuid::Uuid;
use crate::app_data::AppData;
use crate::error::ApiError;
#[derive(Debug, Serialize, Deserialize)]
pub struct Claims {
pub user_id: Uuid,
pub exp: usize,
}
#[derive(Debug, Clone)]
pub struct AuthUser {
pub user_id: Uuid,
}
impl FromRequest for AuthUser {
type Error = ApiError;
type Future = Ready<Result<Self, Self::Error>>;
fn from_request(req: &HttpRequest, _payload: &mut Payload) -> Self::Future {
let result = extract_auth_user(req);
ready(result)
}
}
fn extract_auth_user(req: &HttpRequest) -> Result<AuthUser, ApiError> {
let app_data = req
.app_data::<web::Data<AppData>>()
.ok_or_else(|| ApiError::Internal("AppData not found".to_string()))?;
let auth_header = req
.headers()
.get("Authorization")
.and_then(|h| h.to_str().ok())
.ok_or_else(|| ApiError::Unauthorized("Missing authorization header".to_string()))?;
let token = auth_header
.strip_prefix("Bearer ")
.ok_or_else(|| ApiError::Unauthorized("Invalid authorization header".to_string()))?;
let token_data = decode::<Claims>(
token,
&DecodingKey::from_secret(app_data.jwt_secret.as_bytes()),
&Validation::default(),
)
.map_err(|_| ApiError::Unauthorized("Invalid token".to_string()))?;
Ok(AuthUser {
user_id: token_data.claims.user_id,
})
}
pub fn create_token(user_id: Uuid, secret: &str) -> Result<String, jsonwebtoken::errors::Error> {
use jsonwebtoken::{EncodingKey, Header, encode};
let expiration = chrono::Utc::now()
.checked_add_signed(chrono::Duration::days(7))
.expect("valid timestamp")
.timestamp() as usize;
let claims = Claims {
user_id,
exp: expiration,
};
encode(
&Header::default(),
&claims,
&EncodingKey::from_secret(secret.as_bytes()),
)
}

View File

@@ -0,0 +1 @@
pub mod auth;

View File

@@ -0,0 +1,7 @@
pub mod recurrence;
pub mod time_wheel;
pub mod worker;
pub use recurrence::calculate_next_due;
pub use time_wheel::{SharedTimeWheel, TimerTask, TimeWheel};
pub use worker::{NotificationWorker, WorkerCommand};

View File

@@ -0,0 +1,207 @@
use chrono::{DateTime, Datelike, Duration, FixedOffset, NaiveDateTime, TimeZone, Weekday};
use crate::entity::recurrence_rule;
use crate::entity::sea_orm_active_enums::RecurrenceType;
/// 根据循环规则计算下一次触发时间
pub fn calculate_next_due(
rule: &recurrence_rule::Model,
current_due: DateTime<FixedOffset>,
) -> Option<DateTime<FixedOffset>> {
let interval = rule.interval.max(1) as i64;
let offset = current_due.offset().clone();
match rule.r#type {
RecurrenceType::Hourly => {
// 每 N 小时
Some(current_due + Duration::hours(interval))
}
RecurrenceType::Daily => {
// 每 N 天
Some(current_due + Duration::days(interval))
}
RecurrenceType::Weekly => {
// 每 N 周,可选指定星期几
if let Some(weekday) = rule.by_weekday {
// 找到下一个指定的星期几
let target_weekday = num_to_weekday(weekday);
let next = current_due + Duration::weeks(interval);
// 调整到目标星期几
let current_weekday = next.weekday();
let days_ahead = (target_weekday.num_days_from_monday() as i64
- current_weekday.num_days_from_monday() as i64
+ 7)
% 7;
if days_ahead == 0 {
// 同一天,保持当前时间
Some(next)
} else {
Some(next + Duration::days(days_ahead))
}
} else {
Some(current_due + Duration::weeks(interval))
}
}
RecurrenceType::Monthly => {
// 每 N 月,可选指定几号
let target_day = rule.by_monthday.unwrap_or(current_due.day() as i32) as u32;
let mut year = current_due.year();
let mut month = current_due.month() as i32 + interval as i32;
// 处理年份进位
while month > 12 {
month -= 12;
year += 1;
}
// 处理月份天数不足的情况(如 2 月没有 31 号)
let day = target_day.min(days_in_month(year, month as u32));
let naive = NaiveDateTime::new(
chrono::NaiveDate::from_ymd_opt(year, month as u32, day)?,
current_due.time(),
);
Some(offset.from_local_datetime(&naive).single()?)
}
RecurrenceType::Yearly => {
// 每 N 年
let year = current_due.year() + interval as i32;
let month = current_due.month();
let day = current_due.day().min(days_in_month(year, month));
let naive = NaiveDateTime::new(
chrono::NaiveDate::from_ymd_opt(year, month, day)?,
current_due.time(),
);
Some(offset.from_local_datetime(&naive).single()?)
}
}
}
fn num_to_weekday(num: i32) -> Weekday {
match num % 7 {
0 => Weekday::Sun,
1 => Weekday::Mon,
2 => Weekday::Tue,
3 => Weekday::Wed,
4 => Weekday::Thu,
5 => Weekday::Fri,
6 => Weekday::Sat,
_ => Weekday::Mon,
}
}
fn days_in_month(year: i32, month: u32) -> u32 {
match month {
1 | 3 | 5 | 7 | 8 | 10 | 12 => 31,
4 | 6 | 9 | 11 => 30,
2 => {
if is_leap_year(year) {
29
} else {
28
}
}
_ => 30,
}
}
fn is_leap_year(year: i32) -> bool {
(year % 4 == 0 && year % 100 != 0) || (year % 400 == 0)
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::{NaiveDate, Timelike};
use uuid::Uuid;
fn make_rule(
rule_type: RecurrenceType,
interval: i32,
by_weekday: Option<i32>,
by_monthday: Option<i32>,
) -> recurrence_rule::Model {
recurrence_rule::Model {
id: Uuid::new_v4(),
r#type: rule_type,
interval,
by_weekday,
by_monthday,
timezone: "Asia/Shanghai".to_string(),
created_at: chrono::Utc::now().fixed_offset(),
updated_at: chrono::Utc::now().fixed_offset(),
}
}
fn make_datetime(year: i32, month: u32, day: u32, hour: u32, min: u32) -> DateTime<FixedOffset> {
let naive = NaiveDate::from_ymd_opt(year, month, day)
.unwrap()
.and_hms_opt(hour, min, 0)
.unwrap();
FixedOffset::east_opt(0).unwrap().from_local_datetime(&naive).unwrap()
}
#[test]
fn test_hourly() {
let rule = make_rule(RecurrenceType::Hourly, 2, None, None);
let current = make_datetime(2024, 1, 15, 10, 30);
let next = calculate_next_due(&rule, current).unwrap();
assert_eq!(next.hour(), 12);
assert_eq!(next.minute(), 30);
}
#[test]
fn test_daily() {
let rule = make_rule(RecurrenceType::Daily, 3, None, None);
let current = make_datetime(2024, 1, 15, 10, 30);
let next = calculate_next_due(&rule, current).unwrap();
assert_eq!(next.day(), 18);
}
#[test]
fn test_weekly() {
let rule = make_rule(RecurrenceType::Weekly, 1, None, None);
let current = make_datetime(2024, 1, 15, 10, 30);
let next = calculate_next_due(&rule, current).unwrap();
assert_eq!(next.day(), 22);
}
#[test]
fn test_monthly() {
let rule = make_rule(RecurrenceType::Monthly, 1, None, Some(15));
let current = make_datetime(2024, 1, 15, 10, 30);
let next = calculate_next_due(&rule, current).unwrap();
assert_eq!(next.month(), 2);
assert_eq!(next.day(), 15);
}
#[test]
fn test_monthly_overflow() {
// 1月31号 -> 2月没有31号应该是28号或29号
let rule = make_rule(RecurrenceType::Monthly, 1, None, Some(31));
let current = make_datetime(2024, 1, 31, 10, 30);
let next = calculate_next_due(&rule, current).unwrap();
assert_eq!(next.month(), 2);
assert_eq!(next.day(), 29); // 2024 是闰年
}
#[test]
fn test_yearly() {
let rule = make_rule(RecurrenceType::Yearly, 1, None, None);
let current = make_datetime(2024, 6, 15, 10, 30);
let next = calculate_next_due(&rule, current).unwrap();
assert_eq!(next.year(), 2025);
assert_eq!(next.month(), 6);
assert_eq!(next.day(), 15);
}
}

View File

@@ -0,0 +1,395 @@
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
use uuid::Uuid;
/// 时间轮任务
#[derive(Clone, Debug)]
pub struct TimerTask {
pub id: Uuid,
pub notification_id: Uuid,
pub trigger_at: i64, // Unix timestamp in seconds
}
/// 单层时间轮
struct WheelLevel {
slots: Vec<Vec<TimerTask>>,
current: usize,
slot_count: usize,
interval_secs: i64, // 每个槽位代表的秒数
}
impl WheelLevel {
fn new(slot_count: usize, interval_secs: i64) -> Self {
let slots = (0..slot_count).map(|_| Vec::new()).collect();
Self {
slots,
current: 0,
slot_count,
interval_secs,
}
}
/// 将任务添加到指定槽位
fn add_to_slot(&mut self, slot: usize, task: TimerTask) {
self.slots[slot].push(task);
}
/// 获取当前槽位的所有任务并清空
fn take_current(&mut self) -> Vec<TimerTask> {
std::mem::take(&mut self.slots[self.current])
}
/// 移动到下一个槽位,返回是否完成一圈
fn advance(&mut self) -> bool {
self.current = (self.current + 1) % self.slot_count;
self.current == 0
}
}
/// 多级时间轮
///
/// 结构:
/// - Level 0: 秒轮 (60 slots, 每槽 1 秒)
/// - Level 1: 分钟轮 (60 slots, 每槽 60 秒)
/// - Level 2: 小时轮 (24 slots, 每槽 3600 秒)
/// - Level 3: 天轮 (30 slots, 每槽 86400 秒)
pub struct TimeWheel {
levels: Vec<WheelLevel>,
current_time: i64, // 当前时间
task_index: HashMap<Uuid, Uuid>, // task_id -> notification_id 用于取消任务
}
impl TimeWheel {
pub fn new() -> Self {
let now = chrono::Utc::now().timestamp();
Self {
levels: vec![
WheelLevel::new(60, 1), // 秒轮: 60槽, 每槽1秒
WheelLevel::new(60, 60), // 分钟轮: 60槽, 每槽60秒
WheelLevel::new(24, 3600), // 小时轮: 24槽, 每槽1小时
WheelLevel::new(30, 86400), // 天轮: 30槽, 每槽1天
],
current_time: now,
task_index: HashMap::new(),
}
}
/// 计算任务应该放入哪一层的哪个槽位
///
/// 时间轮结构:
/// - Level 0 (秒轮): 60 slots, interval=1s, 覆盖 1-60s 的延迟
/// - Level 1 (分钟轮): 60 slots, interval=60s, 覆盖 60s-1h 的延迟
/// - Level 2 (小时轮): 24 slots, interval=3600s, 覆盖 1h-24h 的延迟
/// - Level 3 (天轮): 30 slots, interval=86400s, 覆盖 1d-30d 的延迟
///
/// 级联逻辑:当 Level N 完成一圈时,从 Level N+1 的当前槽取出任务降级
fn calculate_slot(&self, trigger_at: i64) -> Option<(usize, usize)> {
let delay = trigger_at - self.current_time;
if delay <= 0 {
// 已经过期,放入秒轮当前槽位(立即执行)
return Some((0, self.levels[0].current));
}
// 计算每个层级的总覆盖范围
let mut level_ranges: Vec<i64> = Vec::new();
let mut cumulative = 0i64;
for level in &self.levels {
cumulative += level.slot_count as i64 * level.interval_secs;
level_ranges.push(cumulative);
}
// 找到合适的层级
let mut prev_range = 0i64;
for (level_idx, &range) in level_ranges.iter().enumerate() {
if delay <= range {
let level = &self.levels[level_idx];
// 计算槽位偏移
// 对于秒轮delay=5 -> slot 5 (从当前位置偏移5)
// 对于高层级轮:需要考虑何时会级联
let slots_away = if level_idx == 0 {
delay as usize
} else {
// 高层级轮中,任务会在 (slots_away * lower_level_total_ticks) 后级联
// 我们需要找到最小的 slots_away 使得 (slots_away * lower_level_total_ticks) >= delay
// 但不能太大,否则会延迟太久
//
// 例如delay=65, lower_level_range=60
// 我们希望在 tick 60 时级联slots_away=0然后剩余 5 秒在 L0 处理
//
// 公式slots_away = (delay - prev_range - 1) / lower_level_range
// delay=65, prev_range=60: (65-60-1)/60 = 0 -> 在当前槽级联
// delay=120, prev_range=60: (120-60-1)/60 = 0 -> 在当前槽级联
// delay=121, prev_range=60: (121-60-1)/60 = 1 -> 在下一槽级联
// 实际上更简单:我们计算从当前槽开始,需要多少个槽位
// delay 落在 (prev_range, range] 区间
// 每个槽位覆盖 level.interval_secs 秒
// 从当前槽开始slot[0] 在下一次级联时处理,覆盖 (prev_range, prev_range + interval]
// slot[1] 覆盖 (prev_range + interval, prev_range + 2*interval]
//
// slots_away = (delay - prev_range - 1) / interval
((delay - prev_range - 1) / level.interval_secs) as usize
};
let target_slot = (level.current + slots_away) % level.slot_count;
return Some((level_idx, target_slot));
}
prev_range = range;
}
// 超过最大范围,放入最高层级的最后一个槽位
let last_level = self.levels.len() - 1;
Some((
last_level,
(self.levels[last_level].current + self.levels[last_level].slot_count - 1)
% self.levels[last_level].slot_count,
))
}
/// 添加定时任务
pub fn add_task(&mut self, task: TimerTask) -> bool {
if let Some((level, slot)) = self.calculate_slot(task.trigger_at) {
self.task_index.insert(task.id, task.notification_id);
self.levels[level].add_to_slot(slot, task);
true
} else {
false
}
}
/// 取消任务(标记删除,实际在 tick 时过滤)
pub fn cancel_task(&mut self, task_id: Uuid) -> bool {
self.task_index.remove(&task_id).is_some()
}
/// 检查任务是否有效(未被取消)
fn is_task_valid(&self, task: &TimerTask) -> bool {
self.task_index.contains_key(&task.id)
}
/// 时间轮前进一秒
/// 返回需要执行的任务列表
pub fn tick(&mut self) -> Vec<TimerTask> {
// 先移动到下一个槽位
let cascade = self.levels[0].advance();
self.current_time += 1;
let mut ready_tasks = Vec::new();
// 级联处理高层级时间轮(在获取当前槽任务之前)
if cascade {
self.cascade_from_level(1);
}
// 从秒轮获取当前槽位的任务
let tasks = self.levels[0].take_current();
for task in tasks {
if self.is_task_valid(&task) {
if task.trigger_at <= self.current_time {
// 任务已到期,加入执行队列
self.task_index.remove(&task.id);
ready_tasks.push(task);
} else {
// 重新计算槽位(理论上不应该发生)
if let Some((level, slot)) = self.calculate_slot(task.trigger_at) {
self.levels[level].add_to_slot(slot, task);
}
}
}
}
ready_tasks
}
/// 从指定层级开始级联处理
fn cascade_from_level(&mut self, start_level: usize) {
if start_level >= self.levels.len() {
return;
}
// 从高层级轮取出当前槽位的任务,降级到低层级
let tasks = self.levels[start_level].take_current();
for task in tasks {
if self.is_task_valid(&task) {
// 重新计算槽位,降级到更低层级
if let Some((new_level, slot)) = self.calculate_slot(task.trigger_at) {
self.levels[new_level].add_to_slot(slot, task);
}
}
}
// 高层级轮前进
let cascade = self.levels[start_level].advance();
if cascade {
self.cascade_from_level(start_level + 1);
}
}
/// 获取时间轮状态统计
pub fn stats(&self) -> TimeWheelStats {
let mut total_tasks = 0;
let mut level_counts = Vec::new();
for level in &self.levels {
let count: usize = level.slots.iter().map(|s| s.len()).sum();
level_counts.push(count);
total_tasks += count;
}
TimeWheelStats {
total_tasks,
level_counts,
current_time: self.current_time,
}
}
}
impl Default for TimeWheel {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone)]
pub struct TimeWheelStats {
pub total_tasks: usize,
pub level_counts: Vec<usize>,
pub current_time: i64,
}
/// 线程安全的时间轮包装
pub struct SharedTimeWheel {
inner: Arc<RwLock<TimeWheel>>,
}
impl SharedTimeWheel {
pub fn new() -> Self {
Self {
inner: Arc::new(RwLock::new(TimeWheel::new())),
}
}
pub async fn add_task(&self, task: TimerTask) -> bool {
let mut wheel = self.inner.write().await;
wheel.add_task(task)
}
pub async fn cancel_task(&self, task_id: Uuid) -> bool {
let mut wheel = self.inner.write().await;
wheel.cancel_task(task_id)
}
pub async fn tick(&self) -> Vec<TimerTask> {
let mut wheel = self.inner.write().await;
wheel.tick()
}
pub async fn stats(&self) -> TimeWheelStats {
let wheel = self.inner.read().await;
wheel.stats()
}
}
impl Default for SharedTimeWheel {
fn default() -> Self {
Self::new()
}
}
impl Clone for SharedTimeWheel {
fn clone(&self) -> Self {
Self {
inner: Arc::clone(&self.inner),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_add_task_immediate() {
let mut wheel = TimeWheel::new();
let now = wheel.current_time;
let task = TimerTask {
id: Uuid::new_v4(),
notification_id: Uuid::new_v4(),
trigger_at: now + 5, // 5秒后
};
assert!(wheel.add_task(task));
assert_eq!(wheel.stats().total_tasks, 1);
}
#[test]
fn test_tick_executes_task() {
let mut wheel = TimeWheel::new();
let now = wheel.current_time;
let task = TimerTask {
id: Uuid::new_v4(),
notification_id: Uuid::new_v4(),
trigger_at: now + 1, // 1秒后
};
wheel.add_task(task.clone());
// 前进1秒
let tasks = wheel.tick();
assert_eq!(tasks.len(), 1);
assert_eq!(tasks[0].notification_id, task.notification_id);
}
#[test]
fn test_cascade_from_minute_level() {
let mut wheel = TimeWheel::new();
let now = wheel.current_time;
let task = TimerTask {
id: Uuid::new_v4(),
notification_id: Uuid::new_v4(),
trigger_at: now + 65, // 65秒后应该放入分钟轮
};
wheel.add_task(task.clone());
// 分钟轮应该有任务
assert!(wheel.stats().level_counts[1] > 0 || wheel.stats().level_counts[0] > 0);
// 前进65秒
let mut executed = Vec::new();
for _ in 0..66 {
executed.extend(wheel.tick());
}
assert_eq!(executed.len(), 1);
assert_eq!(executed[0].notification_id, task.notification_id);
}
#[test]
fn test_cancel_task() {
let mut wheel = TimeWheel::new();
let now = wheel.current_time;
let task = TimerTask {
id: Uuid::new_v4(),
notification_id: Uuid::new_v4(),
trigger_at: now + 5,
};
wheel.add_task(task.clone());
assert!(wheel.cancel_task(task.id));
// 前进5秒任务不应执行
let mut executed = Vec::new();
for _ in 0..6 {
executed.extend(wheel.tick());
}
assert!(executed.is_empty());
}
}

View File

@@ -0,0 +1,842 @@
use sea_orm::{
ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, PaginatorTrait, QueryFilter,
QueryOrder, Set,
};
use serde_json::json;
use std::sync::Arc;
use tokio::sync::mpsc;
use tokio::time::{Duration, interval};
use tracing::{error, info, warn};
use uuid::Uuid;
use crate::entity::sea_orm_active_enums::{ChannelType, NotificationStatus, TargetType};
use crate::entity::{
delivery_log, notification, recurrence_rule, reminder_offset, reminder_task, todo, user,
};
use crate::timer::recurrence::calculate_next_due;
use crate::timer::{SharedTimeWheel, TimerTask};
/// 重试退避时间(毫秒)
const BACKOFF_STEPS_MS: [i64; 5] = [
60_000, // 1 分钟
5 * 60_000, // 5 分钟
15 * 60_000, // 15 分钟
60 * 60_000, // 1 小时
4 * 60 * 60_000, // 4 小时
];
/// 最大重试次数
const MAX_RETRY_ATTEMPTS: i32 = 5;
/// 锁超时时间5 分钟)
const LOCK_TIMEOUT_MS: i64 = 5 * 60 * 1000;
/// 计算退避时间
fn calc_backoff_ms(attempt_no: i32) -> i64 {
let index = (attempt_no - 1)
.min(BACKOFF_STEPS_MS.len() as i32 - 1)
.max(0) as usize;
BACKOFF_STEPS_MS[index]
}
/// 默认的提醒偏移配置
struct DefaultOffset {
offset_minutes: i32,
channel_inapp: bool,
channel_bark: bool,
}
impl Default for DefaultOffset {
fn default() -> Self {
Self {
offset_minutes: 0,
channel_inapp: true,
channel_bark: false,
}
}
}
/// 通知推送 Worker
pub struct NotificationWorker {
db: DatabaseConnection,
time_wheel: SharedTimeWheel,
bark_client: reqwest::Client,
}
impl NotificationWorker {
pub fn new(db: DatabaseConnection, time_wheel: SharedTimeWheel) -> Self {
Self {
db,
time_wheel,
bark_client: reqwest::Client::new(),
}
}
/// 启动 Worker
/// 返回一个用于发送命令的 channel
pub async fn start(self) -> mpsc::Sender<WorkerCommand> {
let (tx, mut rx) = mpsc::channel::<WorkerCommand>(1000);
let worker = Arc::new(self);
// 时间轮 tick 任务
let tick_worker = Arc::clone(&worker);
tokio::spawn(async move {
let mut ticker = interval(Duration::from_secs(1));
loop {
ticker.tick().await;
let tasks = tick_worker.time_wheel.tick().await;
for task in tasks {
if let Err(e) = tick_worker.execute_notification(task.notification_id).await {
error!(
"Failed to execute notification {}: {}",
task.notification_id, e
);
}
}
}
});
// 定期从数据库加载待处理通知
let load_worker = Arc::clone(&worker);
tokio::spawn(async move {
let mut ticker = interval(Duration::from_secs(60)); // 每分钟检查一次
loop {
ticker.tick().await;
if let Err(e) = load_worker.load_pending_notifications().await {
error!("Failed to load pending notifications: {}", e);
}
}
});
// 命令处理
let cmd_worker = Arc::clone(&worker);
tokio::spawn(async move {
while let Some(cmd) = rx.recv().await {
match cmd {
WorkerCommand::ScheduleNotification {
notification_id,
trigger_at,
} => {
let task = TimerTask {
id: Uuid::new_v4(),
notification_id,
trigger_at,
};
cmd_worker.time_wheel.add_task(task).await;
}
WorkerCommand::CancelNotification { notification_id } => {
// 标记数据库中的通知为取消状态
if let Err(e) = cmd_worker.cancel_notification(notification_id).await {
error!("Failed to cancel notification {}: {}", notification_id, e);
}
}
WorkerCommand::GenerateNotifications {
target_type,
target_id,
} => {
if let Err(e) = cmd_worker
.generate_notifications(target_type.clone(), target_id)
.await
{
error!(
"Failed to generate notifications for {:?}/{}: {}",
target_type, target_id, e
);
}
}
}
}
});
tx
}
/// 从数据库加载待处理的通知到时间轮
async fn load_pending_notifications(
&self,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let now = chrono::Utc::now().fixed_offset();
let future_limit = now + chrono::Duration::hours(25); // 加载未来25小时的通知
let expired_lock = now - chrono::Duration::milliseconds(LOCK_TIMEOUT_MS);
// 查找待处理的通知(包括锁超时的)
let notifications = notification::Entity::find()
.filter(notification::Column::Status.eq(NotificationStatus::Pending))
.filter(notification::Column::TriggerAt.lte(future_limit))
.filter(
sea_orm::Condition::any()
.add(notification::Column::LockedAt.is_null())
.add(notification::Column::LockedAt.lt(expired_lock)),
)
.order_by_asc(notification::Column::TriggerAt)
.all(&self.db)
.await?;
info!(
"Loading {} pending notifications into time wheel",
notifications.len()
);
for notif in notifications {
let trigger_at = notif.trigger_at.timestamp();
let task = TimerTask {
id: Uuid::new_v4(),
notification_id: notif.id,
trigger_at,
};
self.time_wheel.add_task(task).await;
}
Ok(())
}
/// 执行通知推送
async fn execute_notification(
&self,
notification_id: Uuid,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// 获取通知
let notif = notification::Entity::find_by_id(notification_id)
.one(&self.db)
.await?
.ok_or("Notification not found")?;
// 检查状态
if notif.status != NotificationStatus::Pending {
warn!("Notification {} is not pending, skipping", notification_id);
return Ok(());
}
let now = chrono::Utc::now().fixed_offset();
// 锁定通知(防止重复处理)
let mut active: notification::ActiveModel = notif.clone().into();
active.status = Set(NotificationStatus::Queued);
active.locked_at = Set(Some(now));
active.updated_at = Set(now);
active.update(&self.db).await?;
// 获取当前尝试次数
let last_log = delivery_log::Entity::find()
.filter(delivery_log::Column::NotificationId.eq(notification_id))
.order_by_desc(delivery_log::Column::AttemptNo)
.one(&self.db)
.await?;
let attempt_no = last_log.map(|l| l.attempt_no + 1).unwrap_or(1);
// 获取接收者信息
let recipient = user::Entity::find_by_id(notif.recipient_id)
.one(&self.db)
.await?
.ok_or("Recipient not found")?;
// 根据渠道发送通知
let result: Result<(), String> = match notif.channel {
ChannelType::Inapp => {
// 应用内通知:只需要更新状态即可
Ok(())
}
ChannelType::Bark => {
if !recipient.bark_enabled {
Err("bark_disabled".to_string())
} else if let Some(bark_url) = &recipient.bark_url {
// 获取 offset 配置(如果有)
let offset = if let Some(offset_id) = notif.offset_id {
reminder_offset::Entity::find_by_id(offset_id)
.one(&self.db)
.await
.ok()
.flatten()
} else {
None
};
// 获取目标详情
match self.get_notification_content(&notif).await {
Ok((default_title, default_body)) => {
// 使用 offset 中的自定义参数,如果没有则使用默认值
let title = offset
.as_ref()
.and_then(|o| o.bark_title.clone())
.unwrap_or(default_title);
let subtitle = offset.as_ref().and_then(|o| o.bark_subtitle.clone());
let body_or_markdown = offset
.as_ref()
.and_then(|o| o.bark_body_markdown.clone())
.or(Some(default_body));
let level = offset.as_ref().and_then(|o| o.bark_level.clone());
let icon = offset
.as_ref()
.and_then(|o| o.bark_icon.clone())
.or_else(|| recipient.avatar.clone());
tracing::info!("Sending Bark notification with icon: {:?}", icon);
self.send_bark_notification(
bark_url,
&title,
subtitle.as_deref(),
body_or_markdown.as_deref(),
level.as_deref(),
icon.as_deref(),
offset
.as_ref()
.and_then(|o| o.bark_body_markdown.as_ref())
.is_some(),
)
.await
.map_err(|e| e.to_string())
}
Err(e) => Err(e.to_string()),
}
} else {
Err("no_bark_url".to_string())
}
}
};
// 根据结果更新状态
let target_type = notif.target_type.clone();
let target_id = notif.target_id;
let channel = notif.channel.clone();
let original_trigger_at = notif.trigger_at;
match &result {
Ok(_) => {
// 发送成功
let mut active: notification::ActiveModel = notif.into();
active.status = Set(NotificationStatus::Sent);
active.sent_at = Set(Some(now));
active.locked_at = Set(None);
active.updated_at = Set(now);
active.update(&self.db).await?;
// 记录成功日志
self.create_delivery_log(
notification_id,
attempt_no,
channel,
NotificationStatus::Sent,
None,
)
.await?;
// 检查是否需要推进周期
self.check_and_advance_recurrence(target_type, target_id)
.await?;
}
Err(error_msg) => {
// 发送失败,检查是否需要重试
let should_retry = attempt_no < MAX_RETRY_ATTEMPTS;
if should_retry {
// 计算重试时间
let backoff_ms = calc_backoff_ms(attempt_no);
let retry_at = now + chrono::Duration::milliseconds(backoff_ms);
let mut active: notification::ActiveModel = notif.into();
active.status = Set(NotificationStatus::Pending);
active.trigger_at = Set(retry_at);
active.locked_at = Set(None);
active.updated_at = Set(now);
active.update(&self.db).await?;
// 重新添加到时间轮
let task = TimerTask {
id: Uuid::new_v4(),
notification_id,
trigger_at: retry_at.timestamp(),
};
self.time_wheel.add_task(task).await;
info!(
"Notification {} scheduled for retry at {} (attempt {})",
notification_id, retry_at, attempt_no
);
// 记录重试日志
self.create_delivery_log(
notification_id,
attempt_no,
channel,
NotificationStatus::Pending,
Some(json!({ "message": error_msg })),
)
.await?;
} else {
// 超过最大重试次数,标记失败
let mut active: notification::ActiveModel = notif.into();
active.status = Set(NotificationStatus::Failed);
active.trigger_at = Set(original_trigger_at); // 恢复原始 trigger_at
active.locked_at = Set(None);
active.updated_at = Set(now);
active.update(&self.db).await?;
error!(
"Notification {} failed after {} attempts: {}",
notification_id, attempt_no, error_msg
);
// 记录失败日志
self.create_delivery_log(
notification_id,
attempt_no,
channel,
NotificationStatus::Failed,
Some(json!({ "message": error_msg })),
)
.await?;
}
}
}
Ok(())
}
/// 创建投递日志
async fn create_delivery_log(
&self,
notification_id: Uuid,
attempt_no: i32,
channel: ChannelType,
status: NotificationStatus,
response_meta: Option<serde_json::Value>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let log = delivery_log::ActiveModel {
id: Set(Uuid::new_v4()),
notification_id: Set(notification_id),
attempt_no: Set(attempt_no),
channel: Set(channel),
status: Set(status),
response_meta: Set(response_meta),
created_at: Set(chrono::Utc::now().fixed_offset()),
};
log.insert(&self.db).await?;
Ok(())
}
/// 检查目标是否有周期性规则,如果所有当前周期的通知都已处理,则推进到下一周期
async fn check_and_advance_recurrence(
&self,
target_type: TargetType,
target_id: Uuid,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// 检查是否还有该目标的待处理通知
let pending_count = notification::Entity::find()
.filter(notification::Column::TargetType.eq(target_type.clone()))
.filter(notification::Column::TargetId.eq(target_id))
.filter(notification::Column::Status.eq(NotificationStatus::Pending))
.count(&self.db)
.await?;
if pending_count > 0 {
// 还有未处理的通知,不需要推进周期
return Ok(());
}
// 获取目标及其循环规则
match target_type {
TargetType::Todo => {
self.advance_todo_recurrence(target_id).await?;
}
TargetType::ReminderTask => {
self.advance_reminder_task_recurrence(target_id).await?;
}
}
Ok(())
}
/// 推进 Todo 到下一周期
async fn advance_todo_recurrence(
&self,
todo_id: Uuid,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let todo_item = todo::Entity::find_by_id(todo_id).one(&self.db).await?;
let todo_item = match todo_item {
Some(t) => t,
None => return Ok(()), // Todo 已被删除
};
// 检查是否有循环规则
let rule_id = match todo_item.recurrence_rule_id {
Some(id) => id,
None => return Ok(()), // 非周期性任务
};
let rule = recurrence_rule::Entity::find_by_id(rule_id)
.one(&self.db)
.await?;
let rule = match rule {
Some(r) => r,
None => return Ok(()), // 规则已被删除
};
// 计算下一个 due_at
let next_due = match calculate_next_due(&rule, todo_item.due_at) {
Some(d) => d,
None => {
warn!("Failed to calculate next due for todo {}", todo_id);
return Ok(());
}
};
info!(
"Advancing todo {} from {} to {}",
todo_id, todo_item.due_at, next_due
);
// 更新 Todo 的 due_at 并重置打卡状态
let mut active: todo::ActiveModel = todo_item.into();
active.due_at = Set(next_due);
active.is_checked_in = Set(false); // 重置打卡状态,开启下一周期的打卡
active.updated_at = Set(chrono::Utc::now().fixed_offset());
active.update(&self.db).await?;
// 为下一周期生成通知
self.generate_notifications(TargetType::Todo, todo_id)
.await?;
Ok(())
}
/// 推进 ReminderTask 到下一周期
async fn advance_reminder_task_recurrence(
&self,
task_id: Uuid,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let task = reminder_task::Entity::find_by_id(task_id)
.one(&self.db)
.await?;
let task = match task {
Some(t) => t,
None => return Ok(()), // Task 已被删除
};
// 检查是否有循环规则
let rule_id = match task.recurrence_rule_id {
Some(id) => id,
None => return Ok(()), // 非周期性任务
};
let rule = recurrence_rule::Entity::find_by_id(rule_id)
.one(&self.db)
.await?;
let rule = match rule {
Some(r) => r,
None => return Ok(()), // 规则已被删除
};
// 计算下一个 due_at
let next_due = match calculate_next_due(&rule, task.due_at) {
Some(d) => d,
None => {
warn!("Failed to calculate next due for reminder_task {}", task_id);
return Ok(());
}
};
info!(
"Advancing reminder_task {} from {} to {}",
task_id, task.due_at, next_due
);
// 更新 ReminderTask 的 due_at
let mut active: reminder_task::ActiveModel = task.into();
active.due_at = Set(next_due);
active.updated_at = Set(chrono::Utc::now().fixed_offset());
active.update(&self.db).await?;
// 为下一周期生成通知
self.generate_notifications(TargetType::ReminderTask, task_id)
.await?;
Ok(())
}
/// 获取通知内容
async fn get_notification_content(
&self,
notif: &notification::Model,
) -> Result<(String, String), Box<dyn std::error::Error + Send + Sync>> {
match notif.target_type {
TargetType::Todo => {
let todo = todo::Entity::find_by_id(notif.target_id)
.one(&self.db)
.await?
.ok_or("Todo not found")?;
Ok((todo.title, todo.description.unwrap_or_default()))
}
TargetType::ReminderTask => {
let task = reminder_task::Entity::find_by_id(notif.target_id)
.one(&self.db)
.await?
.ok_or("ReminderTask not found")?;
Ok((task.title, task.description.unwrap_or_default()))
}
}
}
/// 发送 Bark 通知
async fn send_bark_notification(
&self,
bark_url: &str,
title: &str,
subtitle: Option<&str>,
body: Option<&str>,
level: Option<&str>,
icon: Option<&str>,
is_markdown: bool,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// 构建 Bark 推送参数
let mut payload = json!({
"title": title,
"group": "notify",
});
// 添加可选参数
if let Some(sub) = subtitle {
payload["subtitle"] = json!(sub);
}
// 如果是 markdown 格式,使用 markdown 字段;否则使用 body 字段
if is_markdown {
if let Some(content) = body {
payload["markdown"] = json!(content);
}
} else if let Some(content) = body {
payload["body"] = json!(content);
}
// 添加推送级别
if let Some(lvl) = level {
payload["level"] = json!(lvl);
}
// 添加图标
if let Some(ic) = icon {
payload["icon"] = json!(ic);
}
let response = self
.bark_client
.post(bark_url)
.header("Content-Type", "application/json")
.json(&payload)
.timeout(Duration::from_secs(10))
.send()
.await?;
if !response.status().is_success() {
let status = response.status();
let text = response.text().await.unwrap_or_default();
return Err(format!("Bark API error: {} - {}", status, text).into());
}
info!("Bark notification sent successfully");
Ok(())
}
/// 取消通知
async fn cancel_notification(
&self,
notification_id: Uuid,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let notif = notification::Entity::find_by_id(notification_id)
.one(&self.db)
.await?;
if let Some(notif) = notif {
if notif.status == NotificationStatus::Pending {
let mut active: notification::ActiveModel = notif.into();
active.status = Set(NotificationStatus::Failed);
active.updated_at = Set(chrono::Utc::now().fixed_offset());
active.update(&self.db).await?;
}
}
Ok(())
}
/// 为目标生成通知
/// 当创建或更新 Todo/ReminderTask 时调用
pub async fn generate_notifications(
&self,
target_type: TargetType,
target_id: Uuid,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// 删除旧的 pending 通知
notification::Entity::delete_many()
.filter(notification::Column::TargetType.eq(target_type.clone()))
.filter(notification::Column::TargetId.eq(target_id))
.filter(notification::Column::Status.eq(NotificationStatus::Pending))
.exec(&self.db)
.await?;
// 获取目标的截止时间和接收者
let (due_at, recipient_ids) = match target_type {
TargetType::Todo => {
let todo = todo::Entity::find_by_id(target_id)
.one(&self.db)
.await?
.ok_or("Todo not found")?;
(todo.due_at, vec![todo.owner_id])
}
TargetType::ReminderTask => {
let task = reminder_task::Entity::find_by_id(target_id)
.one(&self.db)
.await?
.ok_or("ReminderTask not found")?;
// 获取所有接收者
use crate::entity::reminder_task_recipient;
let recipients = reminder_task_recipient::Entity::find()
.filter(reminder_task_recipient::Column::TaskId.eq(target_id))
.all(&self.db)
.await?;
let recipient_ids: Vec<Uuid> = recipients.into_iter().map(|r| r.user_id).collect();
(task.due_at, recipient_ids)
}
};
// 获取提醒偏移配置
let db_offsets = reminder_offset::Entity::find()
.filter(reminder_offset::Column::TargetType.eq(target_type.clone()))
.filter(reminder_offset::Column::TargetId.eq(target_id))
.all(&self.db)
.await?;
// 如果没有配置偏移,使用默认偏移
// (offset_minutes, channel_inapp, channel_bark, offset_id)
let offsets: Vec<(i32, bool, bool, Option<Uuid>)> = if db_offsets.is_empty() {
let default = DefaultOffset::default();
vec![(
default.offset_minutes,
default.channel_inapp,
default.channel_bark,
None, // No offset_id for default offset
)]
} else {
db_offsets
.iter()
.map(|o| {
(
o.offset_minutes,
o.channel_inapp,
o.channel_bark,
Some(o.id),
)
})
.collect()
};
let now = chrono::Utc::now().fixed_offset();
// 为每个接收者和每个偏移生成通知
for recipient_id in &recipient_ids {
// 获取接收者的通知偏好
let user = user::Entity::find_by_id(*recipient_id)
.one(&self.db)
.await?;
let (inapp_enabled, bark_enabled, has_bark_url) = match &user {
Some(u) => (u.inapp_enabled, u.bark_enabled, u.bark_url.is_some()),
None => continue, // 用户不存在,跳过
};
for (offset_minutes, channel_inapp, channel_bark, offset_id) in &offsets {
let trigger_at = due_at - chrono::Duration::minutes(*offset_minutes as i64);
// 跳过已过期的通知
if trigger_at <= now {
continue;
}
// 生成 inapp 通知(检查用户是否启用)
if *channel_inapp && inapp_enabled {
let notif = notification::ActiveModel {
id: Set(Uuid::new_v4()),
recipient_id: Set(*recipient_id),
target_type: Set(target_type.clone()),
target_id: Set(target_id),
trigger_at: Set(trigger_at),
channel: Set(ChannelType::Inapp),
status: Set(NotificationStatus::Pending),
locked_at: Set(None),
sent_at: Set(None),
read_at: Set(None),
created_at: Set(now),
updated_at: Set(now),
offset_id: Set(*offset_id),
};
if let Ok(created) = notif.insert(&self.db).await {
// 添加到时间轮
let task = TimerTask {
id: Uuid::new_v4(),
notification_id: created.id,
trigger_at: trigger_at.timestamp(),
};
self.time_wheel.add_task(task).await;
}
}
// 生成 bark 通知(检查用户是否启用且有 URL
if *channel_bark && bark_enabled && has_bark_url {
let notif = notification::ActiveModel {
id: Set(Uuid::new_v4()),
recipient_id: Set(*recipient_id),
target_type: Set(target_type.clone()),
target_id: Set(target_id),
trigger_at: Set(trigger_at),
channel: Set(ChannelType::Bark),
status: Set(NotificationStatus::Pending),
locked_at: Set(None),
sent_at: Set(None),
read_at: Set(None),
created_at: Set(now),
updated_at: Set(now),
offset_id: Set(*offset_id),
};
if let Ok(created) = notif.insert(&self.db).await {
// 添加到时间轮
let task = TimerTask {
id: Uuid::new_v4(),
notification_id: created.id,
trigger_at: trigger_at.timestamp(),
};
self.time_wheel.add_task(task).await;
}
}
}
}
info!(
"Generated notifications for {:?}/{}",
target_type, target_id
);
Ok(())
}
}
/// Worker 命令
#[derive(Debug)]
pub enum WorkerCommand {
/// 调度一个通知
ScheduleNotification {
notification_id: Uuid,
trigger_at: i64,
},
/// 取消一个通知
CancelNotification { notification_id: Uuid },
/// 为目标生成通知
GenerateNotifications {
target_type: TargetType,
target_id: Uuid,
},
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB