feat: add basic webui

This commit is contained in:
2024-12-30 06:39:09 +08:00
parent 608a7fb9c6
commit a4c549e7c3
462 changed files with 35900 additions and 2491 deletions

View File

@@ -0,0 +1,73 @@
use std::collections::HashMap;
use fancy_regex::Regex as FancyRegex;
use lazy_static::lazy_static;
use maplit::hashmap;
use regex::Regex;
const LANG_ZH_TW: &str = "zh-tw";
const LANG_ZH: &str = "zh";
const LANG_EN: &str = "en";
const LANG_JP: &str = "jp";
lazy_static! {
pub static ref SEASON_REGEX: Regex =
Regex::new(r"(S\|[Ss]eason\s+)(\d+)").expect("Invalid regex");
pub static ref TORRENT_PRASE_RULE_REGS: Vec<FancyRegex> = vec![
FancyRegex::new(
r"(.*) - (\d{1,4}(?!\d|p)|\d{1,4}\.\d{1,2}(?!\d|p))(?:v\d{1,2})?(?: )?(?:END)?(.*)"
)
.unwrap(),
FancyRegex::new(
r"(.*)[\[\ E](\d{1,4}|\d{1,4}\.\d{1,2})(?:v\d{1,2})?(?: )?(?:END)?[\]\ ](.*)"
)
.unwrap(),
FancyRegex::new(r"(.*)\[(?:第)?(\d*\.*\d*)[话集話](?:END)?\](.*)").unwrap(),
FancyRegex::new(r"(.*)第?(\d*\.*\d*)[话話集](?:END)?(.*)").unwrap(),
FancyRegex::new(r"(.*)(?:S\d{2})?EP?(\d+)(.*)").unwrap(),
];
pub static ref SUBTITLE_LANG: Vec<(&'static str, Vec<&'static str>)> = {
vec![
(LANG_ZH_TW, vec!["tc", "cht", "", "zh-tw"]),
(LANG_ZH, vec!["sc", "chs", "", "zh", "zh-cn"]),
(LANG_EN, vec!["en", "eng", ""]),
(LANG_JP, vec!["jp", "jpn", ""]),
]
};
pub static ref BRACKETS_REG: Regex = Regex::new(r"[\[\]()【】()]").unwrap();
pub static ref DIGIT_1PLUS_REG: Regex = Regex::new(r"\d+").unwrap();
pub static ref ZH_NUM_MAP: HashMap<&'static str, i32> = {
hashmap! {
"" => 0,
"" => 1,
"" => 2,
"" => 3,
"" => 4,
"" => 5,
"" => 6,
"" => 7,
"" => 8,
"" => 9,
"" => 10,
"廿" => 20,
"" => 100,
"" => 1000,
"" => 0,
"" => 1,
"" => 2,
"" => 3,
"" => 4,
"" => 5,
"" => 6,
"" => 7,
"" => 8,
"" => 9,
"" => 10,
"" => 20,
"" => 100,
"" => 1000,
}
};
pub static ref ZH_NUM_RE: Regex =
Regex::new(r"[〇一二三四五六七八九十廿百千零壹贰叁肆伍陆柒捌玖拾念佰仟]").unwrap();
}

View File

@@ -0,0 +1,19 @@
use thiserror::Error;
#[derive(Error, Debug)]
pub enum ParseError {
#[error("Parse bangumi season error: {0}")]
BangumiSeasonError(#[from] std::num::ParseIntError),
#[error("Parse file url error: {0}")]
FileUrlError(#[from] url::ParseError),
#[error("Parse {desc} with mime error, expected {expected}, but got {found}")]
MimeError {
desc: String,
expected: String,
found: String,
},
#[error("Parse mikan rss {url} format error")]
MikanRssFormatError { url: String },
#[error("Parse mikan rss item format error, {reason}")]
MikanRssItemFormatError { reason: String },
}

View File

@@ -0,0 +1,3 @@
pub mod styles;
pub use styles::parse_style_attr;

View File

@@ -0,0 +1,6 @@
use lightningcss::declaration::DeclarationBlock;
pub fn parse_style_attr(style_attr: &str) -> Option<DeclarationBlock> {
let result = DeclarationBlock::parse_string(style_attr, Default::default()).ok()?;
Some(result)
}

View File

@@ -0,0 +1,64 @@
use std::ops::Deref;
use loco_rs::app::{AppContext, Initializer};
use once_cell::sync::OnceCell;
use super::{AppMikanConfig, MIKAN_BASE_URL};
use crate::{config::AppConfigExt, fetch::HttpClient};
static APP_MIKAN_CLIENT: OnceCell<AppMikanClient> = OnceCell::new();
pub struct AppMikanClient {
http_client: HttpClient,
base_url: String,
}
impl AppMikanClient {
pub fn new(mut config: AppMikanConfig) -> loco_rs::Result<Self> {
let http_client =
HttpClient::new(config.http_client.take()).map_err(loco_rs::Error::wrap)?;
let base_url = config
.base_url
.unwrap_or_else(|| String::from(MIKAN_BASE_URL));
Ok(Self {
http_client,
base_url,
})
}
pub fn global() -> &'static AppMikanClient {
APP_MIKAN_CLIENT
.get()
.expect("Global mikan http client is not initialized")
}
pub fn base_url(&self) -> &str {
&self.base_url
}
}
impl Deref for AppMikanClient {
type Target = HttpClient;
fn deref(&self) -> &Self::Target {
&self.http_client
}
}
pub struct AppMikanClientInitializer;
#[async_trait::async_trait]
impl Initializer for AppMikanClientInitializer {
fn name(&self) -> String {
"AppMikanClientInitializer".to_string()
}
async fn before_run(&self, app_context: &AppContext) -> loco_rs::Result<()> {
let config = &app_context.config;
let app_mikan_conf = config.get_mikan_conf()?.unwrap_or_default();
APP_MIKAN_CLIENT.get_or_try_init(|| AppMikanClient::new(app_mikan_conf))?;
Ok(())
}
}

View File

@@ -0,0 +1,11 @@
use serde::{Deserialize, Serialize};
use crate::fetch::HttpClientConfig;
pub const MIKAN_CONF_KEY: &str = "mikan";
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
pub struct AppMikanConfig {
pub http_client: Option<HttpClientConfig>,
pub base_url: Option<String>,
}

View File

@@ -0,0 +1,4 @@
pub const MIKAN_BUCKET_KEY: &str = "mikan";
pub const MIKAN_BASE_URL: &str = "https://mikanani.me/";
pub const MIKAN_UNKNOWN_FANSUB_NAME: &str = "生肉/不明字幕";
pub const MIKAN_UNKNOWN_FANSUB_ID: &str = "202";

View File

@@ -0,0 +1,22 @@
pub mod client;
pub mod config;
pub mod constants;
pub mod rss_parser;
pub mod web_parser;
pub use client::{AppMikanClient, AppMikanClientInitializer};
pub use config::{AppMikanConfig, MIKAN_CONF_KEY};
pub use constants::{MIKAN_BASE_URL, MIKAN_BUCKET_KEY};
pub use rss_parser::{
build_mikan_bangumi_rss_link, build_mikan_subscriber_aggregation_rss_link,
parse_mikan_bangumi_id_from_rss_link, parse_mikan_rss_channel_from_rss_link,
parse_mikan_rss_items_from_rss_link, parse_mikan_subscriber_aggregation_id_from_rss_link,
MikanBangumiAggregationRssChannel, MikanBangumiRssChannel, MikanBangumiRssLink,
MikanRssChannel, MikanRssItem, MikanSubscriberAggregationRssChannel,
MikanSubscriberAggregationRssLink,
};
pub use web_parser::{
build_mikan_bangumi_homepage, build_mikan_episode_homepage,
parse_mikan_bangumi_meta_from_mikan_homepage, parse_mikan_episode_meta_from_mikan_homepage,
MikanBangumiMeta, MikanEpisodeMeta,
};

View File

@@ -0,0 +1,353 @@
use std::ops::Deref;
use chrono::DateTime;
use itertools::Itertools;
use reqwest::IntoUrl;
use serde::{Deserialize, Serialize};
use torrent::core::BITTORRENT_MIME_TYPE;
use url::Url;
use super::{
web_parser::{parse_mikan_episode_id_from_homepage, MikanEpisodeHomepage},
AppMikanClient,
};
use crate::{extract::errors::ParseError, fetch::bytes::download_bytes_with_client};
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct MikanRssItem {
pub title: String,
pub homepage: Url,
pub url: Url,
pub content_length: Option<u64>,
pub mime: String,
pub pub_date: Option<i64>,
pub mikan_episode_id: String,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct MikanBangumiRssChannel {
pub name: String,
pub url: Url,
pub mikan_bangumi_id: String,
pub mikan_fansub_id: String,
pub items: Vec<MikanRssItem>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct MikanBangumiAggregationRssChannel {
pub name: String,
pub url: Url,
pub mikan_bangumi_id: String,
pub items: Vec<MikanRssItem>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct MikanSubscriberAggregationRssChannel {
pub mikan_aggregation_id: String,
pub url: Url,
pub items: Vec<MikanRssItem>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum MikanRssChannel {
Bangumi(MikanBangumiRssChannel),
BangumiAggregation(MikanBangumiAggregationRssChannel),
SubscriberAggregation(MikanSubscriberAggregationRssChannel),
}
impl MikanRssChannel {
pub fn items(&self) -> &[MikanRssItem] {
match &self {
Self::Bangumi(MikanBangumiRssChannel { items, .. })
| Self::BangumiAggregation(MikanBangumiAggregationRssChannel { items, .. })
| Self::SubscriberAggregation(MikanSubscriberAggregationRssChannel { items, .. }) => {
items
}
}
}
pub fn into_items(self) -> Vec<MikanRssItem> {
match self {
Self::Bangumi(MikanBangumiRssChannel { items, .. })
| Self::BangumiAggregation(MikanBangumiAggregationRssChannel { items, .. })
| Self::SubscriberAggregation(MikanSubscriberAggregationRssChannel { items, .. }) => {
items
}
}
}
pub fn name(&self) -> Option<&str> {
match &self {
Self::Bangumi(MikanBangumiRssChannel { name, .. })
| Self::BangumiAggregation(MikanBangumiAggregationRssChannel { name, .. }) => {
Some(name.as_str())
}
Self::SubscriberAggregation(MikanSubscriberAggregationRssChannel { .. }) => None,
}
}
pub fn url(&self) -> &Url {
match &self {
Self::Bangumi(MikanBangumiRssChannel { url, .. })
| Self::BangumiAggregation(MikanBangumiAggregationRssChannel { url, .. })
| Self::SubscriberAggregation(MikanSubscriberAggregationRssChannel { url, .. }) => url,
}
}
}
impl TryFrom<rss::Item> for MikanRssItem {
type Error = ParseError;
fn try_from(item: rss::Item) -> Result<Self, Self::Error> {
let mime_type = item
.enclosure()
.map(|x| x.mime_type.to_string())
.unwrap_or_default();
if mime_type == BITTORRENT_MIME_TYPE {
let enclosure = item.enclosure.unwrap();
let homepage = item
.link
.ok_or_else(|| ParseError::MikanRssItemFormatError {
reason: String::from("must to have link for homepage"),
})?;
let homepage = Url::parse(&homepage)?;
let enclosure_url = Url::parse(&enclosure.url)?;
let MikanEpisodeHomepage {
mikan_episode_id, ..
} = parse_mikan_episode_id_from_homepage(&homepage).ok_or_else(|| {
ParseError::MikanRssItemFormatError {
reason: String::from("homepage link format invalid"),
}
})?;
Ok(MikanRssItem {
title: item.title.unwrap_or_default(),
homepage,
url: enclosure_url,
content_length: enclosure.length.parse().ok(),
mime: enclosure.mime_type,
pub_date: item
.pub_date
.and_then(|s| DateTime::parse_from_rfc2822(&s).ok())
.map(|s| s.timestamp_millis()),
mikan_episode_id,
})
} else {
Err(ParseError::MimeError {
expected: String::from(BITTORRENT_MIME_TYPE),
found: mime_type,
desc: String::from("MikanRssItem"),
})
}
}
}
#[derive(Debug, Clone)]
pub struct MikanBangumiRssLink {
pub mikan_bangumi_id: String,
pub mikan_fansub_id: Option<String>,
}
#[derive(Debug, Clone)]
pub struct MikanSubscriberAggregationRssLink {
pub mikan_aggregation_id: String,
}
pub fn build_mikan_bangumi_rss_link(
mikan_base_url: &str,
mikan_bangumi_id: &str,
mikan_fansub_id: Option<&str>,
) -> eyre::Result<Url> {
let mut url = Url::parse(mikan_base_url)?;
url.set_path("/RSS/Bangumi");
url.query_pairs_mut()
.append_pair("bangumiId", mikan_bangumi_id);
if let Some(mikan_fansub_id) = mikan_fansub_id {
url.query_pairs_mut()
.append_pair("subgroupid", mikan_fansub_id);
};
Ok(url)
}
pub fn build_mikan_subscriber_aggregation_rss_link(
mikan_base_url: &str,
mikan_aggregation_id: &str,
) -> eyre::Result<Url> {
let mut url = Url::parse(mikan_base_url)?;
url.set_path("/RSS/MyBangumi");
url.query_pairs_mut()
.append_pair("token", mikan_aggregation_id);
Ok(url)
}
pub fn parse_mikan_bangumi_id_from_rss_link(url: &Url) -> Option<MikanBangumiRssLink> {
if url.path() == "/RSS/Bangumi" {
url.query_pairs()
.find(|(k, _)| k == "bangumiId")
.map(|(_, v)| MikanBangumiRssLink {
mikan_bangumi_id: v.to_string(),
mikan_fansub_id: url
.query_pairs()
.find(|(k, _)| k == "subgroupid")
.map(|(_, v)| v.to_string()),
})
} else {
None
}
}
pub fn parse_mikan_subscriber_aggregation_id_from_rss_link(
url: &Url,
) -> Option<MikanSubscriberAggregationRssLink> {
if url.path() == "/RSS/MyBangumi" {
url.query_pairs().find(|(k, _)| k == "token").map(|(_, v)| {
MikanSubscriberAggregationRssLink {
mikan_aggregation_id: v.to_string(),
}
})
} else {
None
}
}
pub async fn parse_mikan_rss_items_from_rss_link(
client: Option<&AppMikanClient>,
url: impl IntoUrl,
) -> eyre::Result<Vec<MikanRssItem>> {
let channel = parse_mikan_rss_channel_from_rss_link(client, url).await?;
Ok(channel.into_items())
}
pub async fn parse_mikan_rss_channel_from_rss_link(
client: Option<&AppMikanClient>,
url: impl IntoUrl,
) -> eyre::Result<MikanRssChannel> {
let http_client = client.map(|s| s.deref());
let bytes = download_bytes_with_client(http_client, url.as_str()).await?;
let channel = rss::Channel::read_from(&bytes[..])?;
let channel_link = Url::parse(channel.link())?;
if let Some(MikanBangumiRssLink {
mikan_bangumi_id,
mikan_fansub_id,
}) = parse_mikan_bangumi_id_from_rss_link(&channel_link)
{
let channel_name = channel.title().replace("Mikan Project - ", "");
let items = channel
.items
.into_iter()
// @TODO log error
.flat_map(MikanRssItem::try_from)
.collect_vec();
if let Some(mikan_fansub_id) = mikan_fansub_id {
Ok(MikanRssChannel::Bangumi(MikanBangumiRssChannel {
name: channel_name,
mikan_bangumi_id,
mikan_fansub_id,
url: channel_link,
items,
}))
} else {
Ok(MikanRssChannel::BangumiAggregation(
MikanBangumiAggregationRssChannel {
name: channel_name,
mikan_bangumi_id,
url: channel_link,
items,
},
))
}
} else if let Some(MikanSubscriberAggregationRssLink {
mikan_aggregation_id,
..
}) = parse_mikan_subscriber_aggregation_id_from_rss_link(&channel_link)
{
let items = channel
.items
.into_iter()
// @TODO log error
.flat_map(MikanRssItem::try_from)
.collect_vec();
return Ok(MikanRssChannel::SubscriberAggregation(
MikanSubscriberAggregationRssChannel {
mikan_aggregation_id,
items,
url: channel_link,
},
));
} else {
return Err(ParseError::MikanRssFormatError {
url: url.as_str().into(),
}
.into());
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use torrent::core::BITTORRENT_MIME_TYPE;
use crate::extract::mikan::{
parse_mikan_rss_channel_from_rss_link, MikanBangumiAggregationRssChannel,
MikanBangumiRssChannel, MikanRssChannel,
};
#[tokio::test]
pub async fn test_parse_mikan_rss_channel_from_rss_link() {
{
let bangumi_url = "https://mikanani.me/RSS/Bangumi?bangumiId=3141&subgroupid=370";
let channel = parse_mikan_rss_channel_from_rss_link(None, bangumi_url)
.await
.expect("should get mikan channel from rss url");
assert_matches!(
&channel,
MikanRssChannel::Bangumi(MikanBangumiRssChannel { .. })
);
assert_matches!(&channel.name(), Some("葬送的芙莉莲"));
let items = channel.items();
let first_sub_item = items
.first()
.expect("mikan subscriptions should have at least one subs");
assert_eq!(first_sub_item.mime, BITTORRENT_MIME_TYPE);
assert!(&first_sub_item
.homepage
.as_str()
.starts_with("https://mikanani.me/Home/Episode"));
let name = first_sub_item.title.as_str();
assert!(name.contains("葬送的芙莉莲"));
}
{
let bangumi_url = "https://mikanani.me/RSS/Bangumi?bangumiId=3416";
let channel = parse_mikan_rss_channel_from_rss_link(None, bangumi_url)
.await
.expect("should get mikan channel from rss url");
assert_matches!(
&channel,
MikanRssChannel::BangumiAggregation(MikanBangumiAggregationRssChannel { .. })
);
assert_matches!(&channel.name(), Some("叹气的亡灵想隐退"));
}
}
}

View File

@@ -0,0 +1,493 @@
use std::ops::Deref;
use bytes::Bytes;
use eyre::ContextCompat;
use html_escape::decode_html_entities;
use itertools::Itertools;
use lazy_static::lazy_static;
use lightningcss::{properties::Property, values::image::Image as CSSImage};
use loco_rs::app::AppContext;
use regex::Regex;
use scraper::Html;
use url::Url;
use super::{
parse_mikan_bangumi_id_from_rss_link, AppMikanClient, MikanBangumiRssLink, MIKAN_BUCKET_KEY,
};
use crate::{
app::AppContextExt,
dal::DalContentCategory,
extract::html::parse_style_attr,
fetch::{html::download_html_with_client, image::download_image_with_client},
models::subscribers,
};
#[derive(Clone, Debug, PartialEq)]
pub struct MikanEpisodeMeta {
pub homepage: Url,
pub origin_poster_src: Option<Url>,
pub bangumi_title: String,
pub episode_title: String,
pub fansub: String,
pub mikan_bangumi_id: String,
pub mikan_fansub_id: String,
pub mikan_episode_id: String,
}
#[derive(Clone, Debug, PartialEq)]
pub struct MikanBangumiMeta {
pub homepage: Url,
pub origin_poster_src: Option<Url>,
pub bangumi_title: String,
pub mikan_bangumi_id: String,
pub mikan_fansub_id: Option<String>,
pub fansub: Option<String>,
pub mikan_fansub_candidates: Vec<(String, String)>,
}
#[derive(Clone, Debug, PartialEq)]
pub struct MikanBangumiPosterMeta {
pub origin_poster_src: Url,
pub poster_data: Option<Bytes>,
pub poster_src: Option<String>,
}
#[derive(Clone, Debug, PartialEq)]
pub struct MikanEpisodeHomepage {
pub mikan_episode_id: String,
}
lazy_static! {
static ref MIKAN_TITLE_SEASON: Regex = Regex::new("第.*季").unwrap();
}
pub fn build_mikan_bangumi_homepage(
mikan_base_url: &str,
mikan_bangumi_id: &str,
mikan_fansub_id: Option<&str>,
) -> eyre::Result<Url> {
let mut url = Url::parse(mikan_base_url)?;
url.set_path(&format!("/Home/Bangumi/{mikan_bangumi_id}"));
url.set_fragment(mikan_fansub_id);
Ok(url)
}
pub fn build_mikan_episode_homepage(
mikan_base_url: &str,
mikan_episode_id: &str,
) -> eyre::Result<Url> {
let mut url = Url::parse(mikan_base_url)?;
url.set_path(&format!("/Home/Episode/{mikan_episode_id}"));
Ok(url)
}
pub fn parse_mikan_episode_id_from_homepage(url: &Url) -> Option<MikanEpisodeHomepage> {
if url.path().starts_with("/Home/Episode/") {
let mikan_episode_id = url.path().replace("/Home/Episode/", "");
Some(MikanEpisodeHomepage { mikan_episode_id })
} else {
None
}
}
pub async fn parse_mikan_bangumi_poster_from_origin_poster_src(
client: Option<&AppMikanClient>,
origin_poster_src: Url,
) -> eyre::Result<MikanBangumiPosterMeta> {
let http_client = client.map(|s| s.deref());
let poster_data = download_image_with_client(http_client, origin_poster_src.clone()).await?;
Ok(MikanBangumiPosterMeta {
origin_poster_src,
poster_data: Some(poster_data),
poster_src: None,
})
}
pub async fn parse_mikan_bangumi_poster_from_origin_poster_src_with_cache(
ctx: &AppContext,
origin_poster_src: Url,
subscriber_id: i32,
) -> eyre::Result<MikanBangumiPosterMeta> {
let dal_client = ctx.get_dal_client();
let mikan_client = ctx.get_mikan_client();
let subscriber_pid = &subscribers::Model::find_pid_by_id_with_cache(ctx, subscriber_id).await?;
if let Some(poster_src) = dal_client
.exists_object(
DalContentCategory::Image,
subscriber_pid,
Some(MIKAN_BUCKET_KEY),
&origin_poster_src.path().replace("/images/Bangumi/", ""),
)
.await?
{
return Ok(MikanBangumiPosterMeta {
origin_poster_src,
poster_data: None,
poster_src: Some(poster_src.to_string()),
});
}
let poster_data =
download_image_with_client(Some(mikan_client.deref()), origin_poster_src.clone()).await?;
let poster_str = dal_client
.store_object(
DalContentCategory::Image,
subscriber_pid,
Some(MIKAN_BUCKET_KEY),
&origin_poster_src.path().replace("/images/Bangumi/", ""),
poster_data.clone(),
)
.await?;
Ok(MikanBangumiPosterMeta {
origin_poster_src,
poster_data: Some(poster_data),
poster_src: Some(poster_str.to_string()),
})
}
pub async fn parse_mikan_bangumi_meta_from_mikan_homepage(
client: Option<&AppMikanClient>,
url: Url,
) -> eyre::Result<MikanBangumiMeta> {
let http_client = client.map(|s| s.deref());
let url_host = url.origin().unicode_serialization();
let content = download_html_with_client(http_client, url.as_str()).await?;
let html = Html::parse_document(&content);
let bangumi_fansubs = html
.select(&scraper::Selector::parse(".subgroup-text").unwrap())
.filter_map(|el| {
if let (Some(fansub_id), Some(fansub_name)) = (
el.value()
.attr("id")
.map(|s| decode_html_entities(s).trim().to_string()),
el.select(&scraper::Selector::parse("a:nth-child(1)").unwrap())
.next()
.map(|child| {
let mut s = String::from(
child
.prev_sibling()
.and_then(|t| t.value().as_text())
.map(|s| s.trim())
.unwrap_or_default(),
);
s.extend(child.text());
decode_html_entities(&s).trim().to_string()
}),
) {
Some((fansub_id, fansub_name))
} else {
None
}
})
.collect_vec();
let fansub_info = url.fragment().and_then(|b| {
bangumi_fansubs
.iter()
.find_map(|(id, name)| if id == b { Some((id, name)) } else { None })
});
let bangumi_title = html
.select(&scraper::Selector::parse(".bangumi-title").unwrap())
.next()
.map(|el| {
decode_html_entities(&el.text().collect::<String>())
.trim()
.to_string()
})
.and_then(|title| if title.is_empty() { None } else { Some(title) })
.wrap_err_with(|| {
// todo: error handler
format!("Missing mikan bangumi official title for {}", url)
})?;
let MikanBangumiRssLink {
mikan_bangumi_id, ..
} = html
.select(&scraper::Selector::parse(".bangumi-title > .mikan-rss").unwrap())
.next()
.and_then(|el| el.value().attr("href"))
.as_ref()
.and_then(|s| url.join(s).ok())
.and_then(|rss_link_url| parse_mikan_bangumi_id_from_rss_link(&rss_link_url))
.wrap_err_with(|| {
// todo: error handler
format!("Missing mikan bangumi rss link or error format for {}", url)
})?;
let origin_poster_src = html
.select(&scraper::Selector::parse(".bangumi-poster").unwrap())
.next()
.and_then(|el| el.value().attr("style"))
.as_ref()
.and_then(|s| parse_style_attr(s))
.and_then(|style| {
style.iter().find_map(|(prop, _)| {
match prop {
Property::BackgroundImage(images) => {
for img in images {
if let CSSImage::Url(path) = img {
if let Ok(url) =
Url::parse(&url_host).and_then(|s| s.join(path.url.trim()))
{
return Some(url);
}
}
}
}
Property::Background(backgrounds) => {
for bg in backgrounds {
if let CSSImage::Url(path) = &bg.image {
if let Ok(url) =
Url::parse(&url_host).and_then(|s| s.join(path.url.trim()))
{
return Some(url);
}
}
}
}
_ => {}
}
None
})
})
.map(|mut origin_poster_src| {
origin_poster_src.set_query(None);
origin_poster_src
});
Ok(MikanBangumiMeta {
homepage: url,
bangumi_title,
origin_poster_src,
mikan_bangumi_id,
fansub: fansub_info.map(|s| s.1.to_string()),
mikan_fansub_id: fansub_info.map(|s| s.0.to_string()),
mikan_fansub_candidates: bangumi_fansubs.clone(),
})
}
pub async fn parse_mikan_episode_meta_from_mikan_homepage(
client: Option<&AppMikanClient>,
url: Url,
) -> eyre::Result<MikanEpisodeMeta> {
let http_client = client.map(|s| s.deref());
let url_host = url.origin().unicode_serialization();
let content = download_html_with_client(http_client, url.as_str()).await?;
let html = Html::parse_document(&content);
let bangumi_title = html
.select(&scraper::Selector::parse(".bangumi-title").unwrap())
.next()
.map(|el| {
decode_html_entities(&el.text().collect::<String>())
.trim()
.to_string()
})
.and_then(|title| if title.is_empty() { None } else { Some(title) })
.wrap_err_with(|| {
// todo: error handler
format!("Missing mikan bangumi official title for {}", url)
})?;
let episode_title = html
.select(&scraper::Selector::parse("title").unwrap())
.next()
.map(|el| {
decode_html_entities(&el.text().collect::<String>())
.replace(" - Mikan Project", "")
.trim()
.to_string()
})
.and_then(|title| if title.is_empty() { None } else { Some(title) })
.wrap_err_with(|| {
// todo: error handler
format!("Missing mikan episode official title for {}", url)
})?;
let (mikan_bangumi_id, mikan_fansub_id) = html
.select(&scraper::Selector::parse(".bangumi-title > .mikan-rss").unwrap())
.next()
.and_then(|el| el.value().attr("href"))
.as_ref()
.and_then(|s| url.join(s).ok())
.and_then(|rss_link_url| parse_mikan_bangumi_id_from_rss_link(&rss_link_url))
.and_then(
|MikanBangumiRssLink {
mikan_bangumi_id,
mikan_fansub_id,
..
}| {
mikan_fansub_id.map(|mikan_fansub_id| (mikan_bangumi_id, mikan_fansub_id))
},
)
.wrap_err_with(|| {
// todo: error handler
format!("Missing mikan bangumi rss link or error format for {}", url)
})?;
let fansub = html
.select(&scraper::Selector::parse(".bangumi-info>.magnet-link-wrap").unwrap())
.next()
.map(|el| {
decode_html_entities(&el.text().collect::<String>())
.trim()
.to_string()
})
.wrap_err_with(|| {
// todo: error handler
format!("Missing mikan bangumi fansub name for {}", url)
})?;
let origin_poster_src = html
.select(&scraper::Selector::parse(".bangumi-poster").unwrap())
.next()
.and_then(|el| el.value().attr("style"))
.as_ref()
.and_then(|s| parse_style_attr(s))
.and_then(|style| {
style.iter().find_map(|(prop, _)| {
match prop {
Property::BackgroundImage(images) => {
for img in images {
if let CSSImage::Url(path) = img {
if let Ok(url) =
Url::parse(&url_host).and_then(|s| s.join(path.url.trim()))
{
return Some(url);
}
}
}
}
Property::Background(backgrounds) => {
for bg in backgrounds {
if let CSSImage::Url(path) = &bg.image {
if let Ok(url) =
Url::parse(&url_host).and_then(|s| s.join(path.url.trim()))
{
return Some(url);
}
}
}
}
_ => {}
}
None
})
})
.map(|mut origin_poster_src| {
origin_poster_src.set_query(None);
origin_poster_src
});
let MikanEpisodeHomepage {
mikan_episode_id, ..
} = parse_mikan_episode_id_from_homepage(&url)
.wrap_err_with(|| format!("Failed to extract mikan_episode_id from {}", &url))?;
Ok(MikanEpisodeMeta {
mikan_bangumi_id,
mikan_fansub_id,
bangumi_title,
episode_title,
homepage: url,
origin_poster_src,
fansub,
mikan_episode_id,
})
}
#[cfg(test)]
mod test {
use std::assert_matches::assert_matches;
use url::Url;
use zune_image::{codecs::ImageFormat, image::Image};
use super::{
parse_mikan_bangumi_meta_from_mikan_homepage,
parse_mikan_bangumi_poster_from_origin_poster_src,
parse_mikan_episode_meta_from_mikan_homepage,
};
#[tokio::test]
async fn test_parse_mikan_episode() {
let test_fn = async || -> eyre::Result<()> {
let url_str =
"https://mikanani.me/Home/Episode/475184dce83ea2b82902592a5ac3343f6d54b36a";
let url = Url::parse(url_str)?;
let ep_meta = parse_mikan_episode_meta_from_mikan_homepage(None, url.clone()).await?;
assert_eq!(ep_meta.homepage, url);
assert_eq!(ep_meta.bangumi_title, "葬送的芙莉莲");
assert_eq!(
ep_meta.origin_poster_src,
Some(Url::parse(
"https://mikanani.me/images/Bangumi/202309/5ce9fed1.jpg"
)?)
);
assert_eq!(ep_meta.fansub, "LoliHouse");
assert_eq!(ep_meta.mikan_fansub_id, "370");
assert_eq!(ep_meta.mikan_bangumi_id, "3141");
assert_matches!(ep_meta.origin_poster_src, Some(..));
let bgm_poster = parse_mikan_bangumi_poster_from_origin_poster_src(
None,
ep_meta.origin_poster_src.unwrap(),
)
.await?;
let u8_data = bgm_poster.poster_data.expect("should have poster data");
let image = Image::read(u8_data.to_vec(), Default::default());
assert!(
image.is_ok_and(|img| img
.metadata()
.get_image_format()
.is_some_and(|fmt| matches!(fmt, ImageFormat::JPEG))),
"should start with valid jpeg data magic number"
);
Ok(())
};
test_fn().await.expect("test parse mikan failed");
}
#[tokio::test]
async fn test_parse_mikan_bangumi() {
let test_fn = async || -> eyre::Result<()> {
let url_str = "https://mikanani.me/Home/Bangumi/3416#370";
let url = Url::parse(url_str)?;
let bgm_meta = parse_mikan_bangumi_meta_from_mikan_homepage(None, url.clone()).await?;
assert_eq!(bgm_meta.homepage, url);
assert_eq!(bgm_meta.bangumi_title, "叹气的亡灵想隐退");
assert_eq!(
bgm_meta.origin_poster_src,
Some(Url::parse(
"https://mikanani.me/images/Bangumi/202410/480ef127.jpg"
)?)
);
assert_eq!(bgm_meta.fansub, Some(String::from("LoliHouse")));
assert_eq!(bgm_meta.mikan_fansub_id, Some(String::from("370")));
assert_eq!(bgm_meta.mikan_bangumi_id, "3416");
assert_eq!(
bgm_meta.homepage.as_str(),
"https://mikanani.me/Home/Bangumi/3416#370"
);
assert_eq!(bgm_meta.mikan_fansub_candidates.len(), 6);
Ok(())
};
test_fn().await.expect("test parse mikan failed");
}
}

View File

@@ -0,0 +1,6 @@
pub mod defs;
pub mod errors;
pub mod html;
pub mod mikan;
pub mod rawname;
pub mod torrent;

View File

@@ -0,0 +1,5 @@
pub mod parser;
pub use parser::{
extract_season_from_title_body, parse_episode_meta_from_raw_name, RawEpisodeMeta,
};

View File

@@ -0,0 +1,843 @@
use std::borrow::Cow;
use itertools::Itertools;
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize};
use crate::extract::defs::{DIGIT_1PLUS_REG, ZH_NUM_MAP, ZH_NUM_RE};
const NAME_EXTRACT_REPLACE_ADHOC1_REPLACED: &str = "$1/$2";
lazy_static! {
static ref TITLE_RE: Regex = Regex::new(
r#"(.*|\[.*])( -? \d+|\[\d+]|\[\d+.?[vV]\d]|第\d+[话話集]|\[第?\d+[话話集]]|\[\d+.?END]|[Ee][Pp]?\d+|\[\s*\d+\s*[\-\~]\s*\d+\s*\p{scx=Han}*[话話集]\s*])(.*)"#
).unwrap();
static ref EP_COLLECTION_RE:Regex = Regex::new(r#"\[?\s*\d+\s*[\-\~]\s*\d+\s*\p{scx=Han}*合?[话話集]\s*]?"#).unwrap();
static ref MOVIE_TITLE_RE:Regex = Regex::new(r#"(.*|\[.*])(剧场版|[Mm]ovie|电影)(.*?)$"#).unwrap();
static ref RESOLUTION_RE: Regex = Regex::new(r"1080|720|2160|4K|2K").unwrap();
static ref SOURCE_L1_RE: Regex = Regex::new(r"B-Global|[Bb]aha|[Bb]ilibili|AT-X|W[Ee][Bb][Rr][Ii][Pp]|Sentai|B[Dd][Rr][Ii][Pp]|UHD[Rr][Ii][Pp]|NETFLIX").unwrap();
static ref SOURCE_L2_RE: Regex = Regex::new(r"AMZ|CR|W[Ee][Bb]|B[Dd]").unwrap();
static ref SUB_RE: Regex = Regex::new(r"[简繁日字幕]|CH|BIG5|GB").unwrap();
static ref PREFIX_RE: Regex =
Regex::new(r"[^\w\s\p{Unified_Ideograph}\p{scx=Han}\p{scx=Hira}\p{scx=Kana}-]").unwrap();
static ref EN_BRACKET_SPLIT_RE: Regex = Regex::new(r"[\[\]]").unwrap();
static ref MOVIE_SEASON_EXTRACT_RE: Regex = Regex::new(r"剧场版|Movie|电影").unwrap();
static ref MAIN_TITLE_PREFIX_PROCESS_RE1: Regex = Regex::new(r"新番|月?番").unwrap();
static ref MAIN_TITLE_PREFIX_PROCESS_RE2: Regex = Regex::new(r"[港澳台]{1,3}地区").unwrap();
static ref MAIN_TITLE_PRE_PROCESS_BACKETS_RE: Regex = Regex::new(r"\[.+\]").unwrap();
static ref MAIN_TITLE_PRE_PROCESS_BACKETS_RE_SUB1: Regex = Regex::new(r"^.*?\[").unwrap();
static ref SEASON_EXTRACT_SEASON_ALL_RE: Regex = Regex::new(r"S\d{1,2}|Season \d{1,2}|[第].[季期]|1st|2nd|3rd|\d{1,2}th").unwrap();
static ref SEASON_EXTRACT_SEASON_EN_PREFIX_RE: Regex = Regex::new(r"Season|S").unwrap();
static ref SEASON_EXTRACT_SEASON_EN_NTH_RE: Regex = Regex::new(r"1st|2nd|3rd|\d{1,2}th").unwrap();
static ref SEASON_EXTRACT_SEASON_ZH_PREFIX_RE: Regex = Regex::new(r"[第 ].*[季期(部分)]|部分").unwrap();
static ref SEASON_EXTRACT_SEASON_ZH_PREFIX_SUB_RE: Regex = Regex::new(r"[第季期 ]").unwrap();
static ref NAME_EXTRACT_REMOVE_RE: Regex = Regex::new(r"[(]仅限[港澳台]{1,3}地区[)]").unwrap();
static ref NAME_EXTRACT_SPLIT_RE: Regex = Regex::new(r"/|\s{2}|-\s{2}|\]\[").unwrap();
static ref NAME_EXTRACT_REPLACE_ADHOC1_RE: Regex = Regex::new(r"([\p{scx=Han}\s\(\)]{5,})_([a-zA-Z]{2,})").unwrap();
static ref NAME_JP_TEST: Regex = Regex::new(r"[\p{scx=Hira}\p{scx=Kana}]{2,}").unwrap();
static ref NAME_ZH_TEST: Regex = Regex::new(r"[\p{scx=Han}]{2,}").unwrap();
static ref NAME_EN_TEST: Regex = Regex::new(r"[a-zA-Z]{3,}").unwrap();
static ref TAGS_EXTRACT_SPLIT_RE: Regex = Regex::new(r"[\[\]()_]").unwrap();
static ref CLEAR_SUB_RE: Regex = Regex::new(r"_MP4|_MKV").unwrap();
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
pub struct RawEpisodeMeta {
pub name_en: Option<String>,
pub name_en_no_season: Option<String>,
pub name_jp: Option<String>,
pub name_jp_no_season: Option<String>,
pub name_zh: Option<String>,
pub name_zh_no_season: Option<String>,
pub season: i32,
pub season_raw: Option<String>,
pub episode_index: i32,
pub subtitle: Option<String>,
pub source: Option<String>,
pub fansub: Option<String>,
pub resolution: Option<String>,
}
fn extract_fansub(raw_name: &str) -> Option<&str> {
let mut groups = EN_BRACKET_SPLIT_RE.splitn(raw_name, 3);
groups.nth(1)
}
fn replace_ch_bracket_to_en(raw_name: &str) -> String {
raw_name.replace('【', "[").replace('】', "]")
}
fn title_body_pre_process(title_body: &str, fansub: Option<&str>) -> eyre::Result<String> {
let raw_without_fansub = if let Some(fansub) = fansub {
let fan_sub_re = Regex::new(&format!(".{fansub}."))?;
fan_sub_re.replace_all(title_body, "")
} else {
Cow::Borrowed(title_body)
};
let raw_with_prefix_replaced = PREFIX_RE.replace_all(&raw_without_fansub, "/");
let mut arg_group = raw_with_prefix_replaced
.split('/')
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.collect::<Vec<_>>();
if arg_group.len() == 1 {
arg_group = arg_group.first_mut().unwrap().split(' ').collect();
}
let mut raw = raw_without_fansub.to_string();
for arg in arg_group.iter() {
if (arg_group.len() <= 5 && MAIN_TITLE_PREFIX_PROCESS_RE1.is_match(arg))
|| (MAIN_TITLE_PREFIX_PROCESS_RE2.is_match(arg))
{
let sub = Regex::new(&format!(".{arg}."))?;
raw = sub.replace_all(&raw, "").to_string();
}
}
if let Some(m) = MAIN_TITLE_PRE_PROCESS_BACKETS_RE.find(&raw) {
if m.len() as f32 > (raw.len() as f32) * 0.5 {
let mut raw1 = MAIN_TITLE_PRE_PROCESS_BACKETS_RE_SUB1
.replace(&raw, "")
.chars()
.collect_vec();
while let Some(ch) = raw1.pop() {
if ch == ']' {
break;
}
}
raw = raw1.into_iter().collect();
}
}
Ok(raw.to_string())
}
pub fn extract_season_from_title_body(title_body: &str) -> (String, Option<String>, i32) {
let name_and_season = EN_BRACKET_SPLIT_RE.replace_all(title_body, " ");
let seasons = SEASON_EXTRACT_SEASON_ALL_RE
.find(&name_and_season)
.into_iter()
.map(|s| s.as_str())
.collect_vec();
if seasons.is_empty() {
return (title_body.to_string(), None, 1);
}
let mut season = 1;
let mut season_raw = None;
let name = SEASON_EXTRACT_SEASON_ALL_RE.replace_all(&name_and_season, "");
for s in seasons {
season_raw = Some(s);
if let Some(m) = SEASON_EXTRACT_SEASON_EN_PREFIX_RE.find(s) {
if let Ok(s) = SEASON_EXTRACT_SEASON_ALL_RE
.replace_all(m.as_str(), "")
.parse::<i32>()
{
season = s;
break;
}
}
if let Some(m) = SEASON_EXTRACT_SEASON_EN_NTH_RE.find(s) {
if let Some(s) = DIGIT_1PLUS_REG
.find(m.as_str())
.and_then(|s| s.as_str().parse::<i32>().ok())
{
season = s;
break;
}
}
if let Some(m) = SEASON_EXTRACT_SEASON_ZH_PREFIX_RE.find(s) {
if let Ok(s) = SEASON_EXTRACT_SEASON_ZH_PREFIX_SUB_RE
.replace(m.as_str(), "")
.parse::<i32>()
{
season = s;
break;
}
if let Some(m) = ZH_NUM_RE.find(m.as_str()) {
season = ZH_NUM_MAP[m.as_str()];
break;
}
}
}
(name.to_string(), season_raw.map(|s| s.to_string()), season)
}
fn extract_name_from_title_body_name_section(
title_body_name_section: &str,
) -> (Option<String>, Option<String>, Option<String>) {
let mut name_en = None;
let mut name_zh = None;
let mut name_jp = None;
let replaced1 = NAME_EXTRACT_REMOVE_RE.replace_all(title_body_name_section, "");
let replaced2 = NAME_EXTRACT_REPLACE_ADHOC1_RE
.replace_all(&replaced1, NAME_EXTRACT_REPLACE_ADHOC1_REPLACED);
let trimmed = replaced2.trim();
let mut split = NAME_EXTRACT_SPLIT_RE
.split(trimmed)
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.map(|s| s.to_string())
.collect_vec();
if split.len() == 1 {
let mut split_space = split[0].split(' ').collect_vec();
let mut search_indices = vec![0];
if split_space.len() > 1 {
search_indices.push(split_space.len() - 1);
}
for i in search_indices {
if NAME_ZH_TEST.is_match(split_space[i]) {
let chs = split_space[i];
split_space.remove(i);
split = vec![chs.to_string(), split_space.join(" ")];
break;
}
}
}
for item in split {
if NAME_JP_TEST.is_match(&item) && name_jp.is_none() {
name_jp = Some(item);
} else if NAME_ZH_TEST.is_match(&item) && name_zh.is_none() {
name_zh = Some(item);
} else if NAME_EN_TEST.is_match(&item) && name_en.is_none() {
name_en = Some(item);
}
}
(name_en, name_zh, name_jp)
}
fn extract_episode_index_from_title_episode(title_episode: &str) -> Option<i32> {
DIGIT_1PLUS_REG
.find(title_episode)?
.as_str()
.parse::<i32>()
.ok()
}
fn clear_sub(sub: Option<String>) -> Option<String> {
sub.map(|s| CLEAR_SUB_RE.replace_all(&s, "").to_string())
}
fn extract_tags_from_title_extra(
title_extra: &str,
) -> (Option<String>, Option<String>, Option<String>) {
let replaced = TAGS_EXTRACT_SPLIT_RE.replace_all(title_extra, " ");
let elements = replaced
.split(' ')
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.collect_vec();
let mut sub = None;
let mut resolution = None;
let mut source = None;
for element in elements.iter() {
if SUB_RE.is_match(element) {
sub = Some(element.to_string())
} else if RESOLUTION_RE.is_match(element) {
resolution = Some(element.to_string())
} else if SOURCE_L1_RE.is_match(element) {
source = Some(element.to_string())
}
}
if source.is_none() {
for element in elements {
if SOURCE_L2_RE.is_match(element) {
source = Some(element.to_string())
}
}
}
(clear_sub(sub), resolution, source)
}
pub fn check_is_movie(title: &str) -> bool {
MOVIE_TITLE_RE.is_match(title)
}
pub fn parse_episode_meta_from_raw_name(s: &str) -> eyre::Result<RawEpisodeMeta> {
let raw_title = s.trim();
let raw_title_without_ch_brackets = replace_ch_bracket_to_en(raw_title);
let fansub = extract_fansub(&raw_title_without_ch_brackets);
let movie_capture = check_is_movie(&raw_title_without_ch_brackets);
if let Some(title_re_match_obj) = MOVIE_TITLE_RE
.captures(&raw_title_without_ch_brackets)
.or(TITLE_RE.captures(&raw_title_without_ch_brackets))
{
let mut title_body = title_re_match_obj
.get(1)
.map(|s| s.as_str().trim())
.unwrap_or_else(|| unreachable!("TITLE_RE has at least 3 capture groups"))
.to_string();
let mut title_episode = title_re_match_obj
.get(2)
.map(|s| s.as_str().trim())
.unwrap_or_else(|| unreachable!("TITLE_RE has at least 3 capture groups"));
let title_extra = title_re_match_obj
.get(3)
.map(|s| s.as_str().trim())
.unwrap_or_else(|| unreachable!("TITLE_RE has at least 3 capture groups"));
if movie_capture {
title_body += title_episode;
title_episode = "";
} else if EP_COLLECTION_RE.is_match(title_episode) {
title_episode = "";
}
let title_body = title_body_pre_process(&title_body, fansub)?;
let (name_without_season, season_raw, season) = extract_season_from_title_body(&title_body);
let (name_en, name_zh, name_jp) = extract_name_from_title_body_name_section(&title_body);
let (name_en_no_season, name_zh_no_season, name_jp_no_season) =
extract_name_from_title_body_name_section(&name_without_season);
let episode_index = extract_episode_index_from_title_episode(title_episode).unwrap_or(1);
let (sub, resolution, source) = extract_tags_from_title_extra(title_extra);
Ok(RawEpisodeMeta {
name_en,
name_en_no_season,
name_jp,
name_jp_no_season,
name_zh,
name_zh_no_season,
season,
season_raw,
episode_index,
subtitle: sub,
source,
fansub: fansub.map(|s| s.to_string()),
resolution,
})
} else {
Err(eyre::eyre!(
"Can not parse episode meta from raw filename {}",
raw_title
))
}
}
#[cfg(test)]
mod tests {
use super::{parse_episode_meta_from_raw_name, RawEpisodeMeta};
fn test_raw_ep_parser_case(raw_name: &str, expected: &str) {
let expected: Option<RawEpisodeMeta> = serde_json::from_str(expected).unwrap_or_default();
let found = parse_episode_meta_from_raw_name(raw_name).ok();
if expected != found {
println!(
"expected {} and found {} are not equal",
serde_json::to_string_pretty(&expected).unwrap(),
serde_json::to_string_pretty(&found).unwrap()
)
}
assert_eq!(expected, found);
}
#[test]
fn test_parse_ep_with_all_parts_wrapped() {
test_raw_ep_parser_case(
r#"[新Sub][1月新番][我心里危险的东西 第二季][05][HEVC][10Bit][1080P][简日双语][招募翻译]"#,
r#"{
"name_zh": "我心里危险的东西",
"name_zh_no_season": "我心里危险的东西",
"season": 2,
"season_raw": "第二季",
"episode_index": 5,
"subtitle": "简日双语",
"source": null,
"fansub": "新Sub",
"resolution": "1080P"
}"#,
)
}
#[test]
fn test_parse_ep_with_title_wrapped_by_one_square_bracket_and_season_prefix() {
test_raw_ep_parser_case(
r#"【喵萌奶茶屋】★01月新番★[我内心的糟糕念头 / Boku no Kokoro no Yabai Yatsu][18][1080p][简日双语][招募翻译]"#,
r#"{
"name_en": "Boku no Kokoro no Yabai Yatsu",
"name_en_no_season": "Boku no Kokoro no Yabai Yatsu",
"name_zh": "我内心的糟糕念头",
"name_zh_no_season": "我内心的糟糕念头",
"season": 1,
"season_raw": null,
"episode_index": 18,
"subtitle": "简日双语",
"source": null,
"fansub": "喵萌奶茶屋",
"resolution": "1080p"
}"#,
);
}
#[test]
fn test_parse_ep_with_ep_and_version() {
test_raw_ep_parser_case(
r#"[LoliHouse] 因为不是真正的伙伴而被逐出勇者队伍,流落到边境展开慢活人生 2nd / Shin no Nakama 2nd - 08v2 [WebRip 1080p HEVC-10bit AAC][简繁内封字幕]"#,
r#"{
"name_en": "Shin no Nakama 2nd",
"name_en_no_season": "Shin no Nakama",
"name_zh": "因为不是真正的伙伴而被逐出勇者队伍,流落到边境展开慢活人生 2nd",
"name_zh_no_season": "因为不是真正的伙伴而被逐出勇者队伍,流落到边境展开慢活人生",
"season": 2,
"season_raw": "2nd",
"episode_index": 8,
"subtitle": "简繁内封字幕",
"source": "WebRip",
"fansub": "LoliHouse",
"resolution": "1080p"
}"#,
)
}
#[test]
fn test_parse_ep_with_en_title_only() {
test_raw_ep_parser_case(
r"[动漫国字幕组&LoliHouse] THE MARGINAL SERVICE - 08 [WebRip 1080p HEVC-10bit AAC][简繁内封字幕]",
r#"{
"name_en": "THE MARGINAL SERVICE",
"name_en_no_season": "THE MARGINAL SERVICE",
"season": 1,
"episode_index": 8,
"subtitle": "简繁内封字幕",
"source": "WebRip",
"fansub": "动漫国字幕组&LoliHouse",
"resolution": "1080p"
}"#,
)
}
#[test]
fn test_parse_ep_with_two_zh_title() {
test_raw_ep_parser_case(
r#"[LoliHouse] 事与愿违的不死冒险者 / 非自愿的不死冒险者 / Nozomanu Fushi no Boukensha - 01 [WebRip 1080p HEVC-10bit AAC][简繁内封字幕]"#,
r#"{
"name_en": "Nozomanu Fushi no Boukensha",
"name_en_no_season": "Nozomanu Fushi no Boukensha",
"name_zh": "事与愿违的不死冒险者",
"name_zh_no_season": "事与愿违的不死冒险者",
"season": 1,
"season_raw": null,
"episode_index": 1,
"subtitle": "简繁内封字幕",
"source": "WebRip",
"fansub": "LoliHouse",
"resolution": "1080p"
}"#,
)
}
#[test]
fn test_parse_ep_with_en_zh_jp_titles() {
test_raw_ep_parser_case(
r#"[喵萌奶茶屋&LoliHouse] 碰之道 / ぽんのみち / Pon no Michi - 07 [WebRip 1080p HEVC-10bit AAC][简繁日内封字幕]"#,
r#"{
"name_en": "Pon no Michi",
"name_jp": "ぽんのみち",
"name_zh": "碰之道",
"name_en_no_season": "Pon no Michi",
"name_jp_no_season": "ぽんのみち",
"name_zh_no_season": "碰之道",
"season": 1,
"season_raw": null,
"episode_index": 7,
"subtitle": "简繁日内封字幕",
"source": "WebRip",
"fansub": "喵萌奶茶屋&LoliHouse",
"resolution": "1080p"
}"#,
)
}
#[test]
fn test_parse_ep_with_nth_season() {
test_raw_ep_parser_case(
r#"[ANi] Yowai Character Tomozakikun / 弱角友崎同学 2nd STAGE - 09 [1080P][Baha][WEB-DL][AAC AVC][CHT][MP4]"#,
r#"{
"name_en": "Yowai Character Tomozakikun",
"name_en_no_season": "Yowai Character Tomozakikun",
"name_zh": "弱角友崎同学 2nd STAGE",
"name_zh_no_season": "弱角友崎同学",
"season": 2,
"season_raw": "2nd",
"episode_index": 9,
"subtitle": "CHT",
"source": "Baha",
"fansub": "ANi",
"resolution": "1080P"
}"#,
)
}
#[test]
fn test_parse_ep_with_season_en_and_season_zh() {
test_raw_ep_parser_case(
r#"[豌豆字幕组&LoliHouse] 王者天下 第五季 / Kingdom S5 - 07 [WebRip 1080p HEVC-10bit AAC][简繁外挂字幕]"#,
r#"{
"name_en": "Kingdom S5",
"name_en_no_season": "Kingdom",
"name_zh": "王者天下 第五季",
"name_zh_no_season": "王者天下",
"season": 5,
"season_raw": "第五季",
"episode_index": 7,
"subtitle": "简繁外挂字幕",
"source": "WebRip",
"fansub": "豌豆字幕组&LoliHouse",
"resolution": "1080p"
}"#,
)
}
#[test]
fn test_parse_ep_with_airota_fansub_style_case1() {
test_raw_ep_parser_case(
r#"【千夏字幕组】【爱丽丝与特蕾丝的虚幻工厂_Alice to Therese no Maboroshi Koujou】[剧场版][WebRip_1080p_HEVC][简繁内封][招募新人]"#,
r#"{
"name_en": "Alice to Therese no Maboroshi Koujou",
"name_en_no_season": "Alice to Therese no Maboroshi Koujou",
"name_zh": "爱丽丝与特蕾丝的虚幻工厂",
"name_zh_no_season": "爱丽丝与特蕾丝的虚幻工厂",
"season": 1,
"episode_index": 1,
"subtitle": "简繁内封",
"source": "WebRip",
"fansub": "千夏字幕组",
"resolution": "1080p"
}"#,
)
}
#[test]
fn test_parse_ep_with_airota_fansub_style_case2() {
test_raw_ep_parser_case(
r#"[千夏字幕组&喵萌奶茶屋][电影 轻旅轻营 (摇曳露营) _Yuru Camp Movie][剧场版][UHDRip_2160p_HEVC][繁体][千夏15周年]"#,
r#"{
"name_en": "Yuru Camp Movie",
"name_en_no_season": "Yuru Camp Movie",
"name_zh": "电影 轻旅轻营 (摇曳露营)",
"name_zh_no_season": "电影 轻旅轻营 (摇曳露营)",
"season": 1,
"episode_index": 1,
"subtitle": "繁体",
"source": "UHDRip",
"fansub": "千夏字幕组&喵萌奶茶屋",
"resolution": "2160p"
}"#,
)
}
#[test]
fn test_parse_ep_with_large_episode_style() {
test_raw_ep_parser_case(
r#"[梦蓝字幕组]New Doraemon 哆啦A梦新番[747][2023.02.25][AVC][1080P][GB_JP][MP4]"#,
r#"{
"name_en": "New Doraemon",
"name_en_no_season": "New Doraemon",
"name_zh": "哆啦A梦新番",
"name_zh_no_season": "哆啦A梦新番",
"season": 1,
"episode_index": 747,
"subtitle": "GB",
"fansub": "梦蓝字幕组",
"resolution": "1080P"
}"#,
)
}
#[test]
fn test_parse_ep_with_many_square_brackets_split_title() {
test_raw_ep_parser_case(
r#"【MCE汉化组】[剧场版-摇曳露营][Yuru Camp][Movie][简日双语][1080P][x264 AAC]"#,
r#"{
"name_en": "Yuru Camp",
"name_en_no_season": "Yuru Camp",
"name_zh": "剧场版-摇曳露营",
"name_zh_no_season": "剧场版-摇曳露营",
"season": 1,
"episode_index": 1,
"subtitle": "简日双语",
"fansub": "MCE汉化组",
"resolution": "1080P"
}"#,
)
}
#[test]
fn test_parse_ep_with_implicit_lang_title_sep() {
test_raw_ep_parser_case(
r#"[织梦字幕组][尼尔:机械纪元 NieR Automata Ver1.1a][02集][1080P][AVC][简日双语]"#,
r#"{
"name_en": "NieR Automata Ver1.1a",
"name_en_no_season": "NieR Automata Ver1.1a",
"name_zh": "尼尔:机械纪元",
"name_zh_no_season": "尼尔:机械纪元",
"season": 1,
"episode_index": 2,
"subtitle": "简日双语",
"fansub": "织梦字幕组",
"resolution": "1080P"
}"#,
)
}
#[test]
fn test_parse_ep_with_square_brackets_wrapped_and_space_split() {
test_raw_ep_parser_case(
r#"[天月搬运组][迷宫饭 Delicious in Dungeon][03][日语中字][MKV][1080P][NETFLIX][高画质版]"#,
r#"
{
"name_en": "Delicious in Dungeon",
"name_en_no_season": "Delicious in Dungeon",
"name_zh": "迷宫饭",
"name_zh_no_season": "迷宫饭",
"season": 1,
"episode_index": 3,
"subtitle": "日语中字",
"source": "NETFLIX",
"fansub": "天月搬运组",
"resolution": "1080P"
}
"#,
)
}
#[test]
fn test_parse_ep_with_start_with_brackets_wrapped_season_info_prefix() {
test_raw_ep_parser_case(
r#"[爱恋字幕社][1月新番][迷宫饭][Dungeon Meshi][01][1080P][MP4][简日双语] "#,
r#"{
"name_en": "Dungeon Meshi",
"name_en_no_season": "Dungeon Meshi",
"name_zh": "迷宫饭",
"name_zh_no_season": "迷宫饭",
"season": 1,
"episode_index": 1,
"subtitle": "简日双语",
"fansub": "爱恋字幕社",
"resolution": "1080P"
}"#,
)
}
#[test]
fn test_parse_ep_with_small_no_title_extra_brackets_case() {
test_raw_ep_parser_case(
r#"[ANi] Mahou Shoujo ni Akogarete / 梦想成为魔法少女 [年龄限制版] - 09 [1080P][Baha][WEB-DL][AAC AVC][CHT][MP4]"#,
r#"{
"name_en": "Mahou Shoujo ni Akogarete",
"name_en_no_season": "Mahou Shoujo ni Akogarete",
"name_zh": "梦想成为魔法少女 [年龄限制版]",
"name_zh_no_season": "梦想成为魔法少女 [年龄限制版]",
"season": 1,
"episode_index": 9,
"subtitle": "CHT",
"source": "Baha",
"fansub": "ANi",
"resolution": "1080P"
}"#,
)
}
#[test]
fn test_parse_ep_title_leading_space_style() {
test_raw_ep_parser_case(
r#"[ANi] 16bit 的感动 ANOTHER LAYER - 01 [1080P][Baha][WEB-DL][AAC AVC][CHT][MP4]"#,
r#"{
"name_zh": "16bit 的感动 ANOTHER LAYER",
"name_zh_no_season": "16bit 的感动 ANOTHER LAYER",
"season": 1,
"season_raw": null,
"episode_index": 1,
"subtitle": "CHT",
"source": "Baha",
"fansub": "ANi",
"resolution": "1080P"
}"#,
)
}
#[test]
fn test_parse_ep_title_leading_month_and_wrapped_brackets_style() {
test_raw_ep_parser_case(
r#"【喵萌奶茶屋】★07月新番★[银砂糖师与黑妖精 ~ Sugar Apple Fairy Tale ~][13][1080p][简日双语][招募翻译]"#,
r#"{
"name_en": "~ Sugar Apple Fairy Tale ~",
"name_en_no_season": "~ Sugar Apple Fairy Tale ~",
"name_zh": "银砂糖师与黑妖精",
"name_zh_no_season": "银砂糖师与黑妖精",
"season": 1,
"episode_index": 13,
"subtitle": "简日双语",
"fansub": "喵萌奶茶屋",
"resolution": "1080p"
}"#,
)
}
#[test]
fn test_parse_ep_title_leading_month_style() {
test_raw_ep_parser_case(
r#"【极影字幕社】★4月新番 天国大魔境 Tengoku Daimakyou 第05话 GB 720P MP4字幕社招人内详"#,
r#"{
"name_en": "Tengoku Daimakyou",
"name_en_no_season": "Tengoku Daimakyou",
"name_zh": "天国大魔境",
"name_zh_no_season": "天国大魔境",
"season": 1,
"episode_index": 5,
"subtitle": "字幕社招人内详",
"source": null,
"fansub": "极影字幕社",
"resolution": "720P"
}"#,
)
}
#[test]
fn test_parse_ep_tokusatsu_style() {
test_raw_ep_parser_case(
r#"[MagicStar] 假面骑士Geats / 仮面ライダーギーツ EP33 [WEBDL] [1080p] [TTFC]【生】"#,
r#"{
"name_jp": "仮面ライダーギーツ",
"name_jp_no_season": "仮面ライダーギーツ",
"name_zh": "假面骑士Geats",
"name_zh_no_season": "假面骑士Geats",
"season": 1,
"episode_index": 33,
"source": "WEBDL",
"fansub": "MagicStar",
"resolution": "1080p"
}"#,
)
}
#[test]
fn test_parse_ep_with_multi_lang_zh_title() {
test_raw_ep_parser_case(
r#"[百冬练习组&LoliHouse] BanG Dream! 少女乐团派对☆PICO FEVER / Garupa Pico: Fever! - 26 [WebRip 1080p HEVC-10bit AAC][简繁内封字幕][END] [101.69 MB]"#,
r#"{
"name_en": "Garupa Pico: Fever!",
"name_en_no_season": "Garupa Pico: Fever!",
"name_zh": "BanG Dream! 少女乐团派对☆PICO FEVER",
"name_zh_no_season": "BanG Dream! 少女乐团派对☆PICO FEVER",
"season": 1,
"episode_index": 26,
"subtitle": "简繁内封字幕",
"source": "WebRip",
"fansub": "百冬练习组&LoliHouse",
"resolution": "1080p"
}"#,
)
}
#[test]
fn test_ep_collections() {
test_raw_ep_parser_case(
r#"[奶²&LoliHouse] 蘑菇狗 / Kinokoinu: Mushroom Pup [01-12 精校合集][WebRip 1080p HEVC-10bit AAC][简日内封字幕]"#,
r#"{
"name_en": "Kinokoinu: Mushroom Pup",
"name_en_no_season": "Kinokoinu: Mushroom Pup",
"name_zh": "蘑菇狗",
"name_zh_no_season": "蘑菇狗",
"season": 1,
"episode_index": 1,
"subtitle": "简日内封字幕",
"source": "WebRip",
"fansub": "奶²&LoliHouse",
"resolution": "1080p",
"name": " 蘑菇狗 / Kinokoinu: Mushroom Pup [01-12 精校合集]"
}"#,
);
test_raw_ep_parser_case(
r#"[LoliHouse] 叹气的亡灵想隐退 / Nageki no Bourei wa Intai shitai [01-13 合集][WebRip 1080p HEVC-10bit AAC][简繁内封字幕][Fin]"#,
r#"{
"name_en": "Nageki no Bourei wa Intai shitai",
"name_en_no_season": "Nageki no Bourei wa Intai shitai",
"name_jp": null,
"name_jp_no_season": null,
"name_zh": "叹气的亡灵想隐退",
"name_zh_no_season": "叹气的亡灵想隐退",
"season": 1,
"season_raw": null,
"episode_index": 1,
"subtitle": "简繁内封字幕",
"source": "WebRip",
"fansub": "LoliHouse",
"resolution": "1080p"
}"#,
);
test_raw_ep_parser_case(
r#"[LoliHouse] 精灵幻想记 第二季 / Seirei Gensouki S2 [01-12 合集][WebRip 1080p HEVC-10bit AAC][简繁内封字幕][Fin]"#,
r#"{
"name_en": "Seirei Gensouki S2",
"name_en_no_season": "Seirei Gensouki",
"name_zh": "精灵幻想记 第二季",
"name_zh_no_season": "精灵幻想记",
"season": 2,
"season_raw": "第二季",
"episode_index": 1,
"subtitle": "简繁内封字幕",
"source": "WebRip",
"fansub": "LoliHouse",
"resolution": "1080p"
}"#,
);
test_raw_ep_parser_case(
r#"[喵萌奶茶屋&LoliHouse] 超自然武装当哒当 / 胆大党 / Dandadan [01-12 精校合集][WebRip 1080p HEVC-10bit AAC][简繁日内封字幕][Fin]"#,
r#" {
"name_en": "Dandadan",
"name_en_no_season": "Dandadan",
"name_zh": "超自然武装当哒当",
"name_zh_no_season": "超自然武装当哒当",
"season": 1,
"episode_index": 1,
"subtitle": "简繁日内封字幕",
"source": "WebRip",
"fansub": "喵萌奶茶屋&LoliHouse",
"resolution": "1080p"
}"#,
);
}
// TODO: FIXME
#[test]
fn test_bad_cases() {
test_raw_ep_parser_case(
r#"[7³ACG x 桜都字幕组] 摇曳露营△ 剧场版/映画 ゆるキャン△/Eiga Yuru Camp△ [简繁字幕] BDrip 1080p x265 FLAC 2.0"#,
r#"{
"name_zh": "摇曳露营△剧场版",
"name_zh_no_season": "摇曳露营△剧场版",
"season": 1,
"season_raw": null,
"episode_index": 1,
"subtitle": "简繁字幕",
"source": "BDrip",
"fansub": "7³ACG x 桜都字幕组",
"resolution": "1080p"
}"#,
);
test_raw_ep_parser_case(
r#"【幻樱字幕组】【4月新番】【古见同学有交流障碍症 第二季 Komi-san wa, Komyushou Desu. S02】【22】【GB_MP4】【1920X1080】"#,
r#"{
"name_en": "第二季 Komi-san wa, Komyushou Desu. S02",
"name_en_no_season": "Komi-san wa, Komyushou Desu.",
"name_zh": "古见同学有交流障碍症",
"name_zh_no_season": "古见同学有交流障碍症",
"season": 2,
"season_raw": "第二季",
"episode_index": 22,
"subtitle": "GB",
"fansub": "幻樱字幕组",
"resolution": "1920X1080"
}"#,
);
}
}

View File

@@ -0,0 +1,3 @@
mod parser;
pub use parser::*;

View File

@@ -0,0 +1,316 @@
use eyre::OptionExt;
use fancy_regex::Regex as FancyRegex;
use lazy_static::lazy_static;
use quirks_path::Path;
use regex::Regex;
use serde::{Deserialize, Serialize};
use crate::extract::defs::SUBTITLE_LANG;
lazy_static! {
static ref TORRENT_EP_PARSE_RULES: Vec<FancyRegex> = {
vec![
FancyRegex::new(
r"(.*) - (\d{1,4}(?!\d|p)|\d{1,4}\.\d{1,2}(?!\d|p))(?:v\d{1,2})?(?: )?(?:END)?(.*)",
)
.unwrap(),
FancyRegex::new(
r"(.*)[\[\ E](\d{1,4}|\d{1,4}\.\d{1,2})(?:v\d{1,2})?(?: )?(?:END)?[\]\ ](.*)",
)
.unwrap(),
FancyRegex::new(r"(.*)\[(?:第)?(\d*\.*\d*)[话集話](?:END)?\](.*)").unwrap(),
FancyRegex::new(r"(.*)第?(\d*\.*\d*)[话話集](?:END)?(.*)").unwrap(),
FancyRegex::new(r"(.*)(?:S\d{2})?EP?(\d+)(.*)").unwrap(),
]
};
static ref GET_FANSUB_SPLIT_RE: Regex = Regex::new(r"[\[\]()【】()]").unwrap();
static ref GET_FANSUB_FULL_MATCH_RE: Regex = Regex::new(r"^\d+$").unwrap();
static ref GET_SEASON_AND_TITLE_SUB_RE: Regex = Regex::new(r"([Ss]|Season )\d{1,3}").unwrap();
static ref GET_SEASON_AND_TITLE_FIND_RE: Regex =
Regex::new(r"([Ss]|Season )(\d{1,3})").unwrap();
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct TorrentEpisodeMediaMeta {
pub fansub: Option<String>,
pub title: String,
pub season: i32,
pub episode_index: i32,
pub extname: String,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct TorrentEpisodeSubtitleMeta {
pub media: TorrentEpisodeMediaMeta,
pub lang: Option<String>,
}
fn get_fansub(group_and_title: &str) -> (Option<&str>, &str) {
let n = GET_FANSUB_SPLIT_RE
.split(group_and_title)
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.collect::<Vec<_>>();
match (n.first(), n.get(1)) {
(None, None) => (None, ""),
(Some(n0), None) => (None, *n0),
(Some(n0), Some(n1)) => {
if GET_FANSUB_FULL_MATCH_RE.is_match(n1) {
(None, group_and_title)
} else {
(Some(*n0), *n1)
}
}
_ => unreachable!("vec contains n1 must contains n0"),
}
}
fn get_season_and_title(season_and_title: &str) -> (String, i32) {
let replaced_title = GET_SEASON_AND_TITLE_SUB_RE.replace_all(season_and_title, "");
let title = replaced_title.trim().to_string();
let season = GET_SEASON_AND_TITLE_FIND_RE
.captures(season_and_title)
.map(|m| {
m.get(2)
.unwrap_or_else(|| unreachable!("season regex should have 2 groups"))
.as_str()
.parse::<i32>()
.unwrap_or_else(|_| unreachable!("season should be a number"))
})
.unwrap_or(1);
(title, season)
}
fn get_subtitle_lang(media_name: &str) -> Option<&str> {
let media_name_lower = media_name.to_lowercase();
for (lang, lang_aliases) in SUBTITLE_LANG.iter() {
if lang_aliases
.iter()
.any(|alias| media_name_lower.contains(alias))
{
return Some(lang);
}
}
None
}
pub fn parse_episode_media_meta_from_torrent(
torrent_path: &Path,
torrent_name: Option<&str>,
season: Option<i32>,
) -> eyre::Result<TorrentEpisodeMediaMeta> {
let media_name = torrent_path
.file_name()
.ok_or_else(|| eyre::eyre!("failed to get file name of {}", torrent_path))?;
let mut match_obj = None;
for rule in TORRENT_EP_PARSE_RULES.iter() {
match_obj = if let Some(torrent_name) = torrent_name.as_ref() {
rule.captures(torrent_name)?
} else {
rule.captures(media_name)?
};
if match_obj.is_some() {
break;
}
}
if let Some(match_obj) = match_obj {
let group_season_and_title = match_obj
.get(1)
.ok_or_else(|| eyre::eyre!("should have 1 group"))?
.as_str();
let (fansub, season_and_title) = get_fansub(group_season_and_title);
let (title, season) = if let Some(season) = season {
let (title, _) = get_season_and_title(season_and_title);
(title, season)
} else {
get_season_and_title(season_and_title)
};
let episode_index = match_obj
.get(2)
.ok_or_eyre("should have 2 group")?
.as_str()
.parse::<i32>()
.unwrap_or(1);
let extname = torrent_path
.extension()
.map(|e| format!(".{}", e))
.unwrap_or_default();
Ok(TorrentEpisodeMediaMeta {
fansub: fansub.map(|s| s.to_string()),
title,
season,
episode_index,
extname,
})
} else {
Err(eyre::eyre!(
"failed to parse episode media meta from torrent_path='{}' torrent_name='{:?}'",
torrent_path,
torrent_name
))
}
}
pub fn parse_episode_subtitle_meta_from_torrent(
torrent_path: &Path,
torrent_name: Option<&str>,
season: Option<i32>,
) -> eyre::Result<TorrentEpisodeSubtitleMeta> {
let media_meta = parse_episode_media_meta_from_torrent(torrent_path, torrent_name, season)?;
let media_name = torrent_path
.file_name()
.ok_or_else(|| eyre::eyre!("failed to get file name of {}", torrent_path))?;
let lang = get_subtitle_lang(media_name);
Ok(TorrentEpisodeSubtitleMeta {
media: media_meta,
lang: lang.map(|s| s.to_string()),
})
}
#[cfg(test)]
mod tests {
use quirks_path::Path;
use super::{
parse_episode_media_meta_from_torrent, parse_episode_subtitle_meta_from_torrent,
TorrentEpisodeMediaMeta, TorrentEpisodeSubtitleMeta,
};
#[test]
fn test_lilith_raws_media() {
test_torrent_ep_parser(
r#"[Lilith-Raws] Boku no Kokoro no Yabai Yatsu - 01 [Baha][WEB-DL][1080p][AVC AAC][CHT][MP4].mp4"#,
r#"{"fansub": "Lilith-Raws", "title": "Boku no Kokoro no Yabai Yatsu", "season": 1, "episode_index": 1, "extname": ".mp4"}"#,
);
}
#[test]
fn test_sakurato_media() {
test_torrent_ep_parser(
r#"[Sakurato] Tonikaku Kawaii S2 [03][AVC-8bit 1080p AAC][CHS].mp4"#,
r#"{"fansub": "Sakurato", "title": "Tonikaku Kawaii", "season": 2, "episode_index": 3, "extname": ".mp4"}"#,
)
}
#[test]
fn test_lolihouse_media() {
test_torrent_ep_parser(
r#"[SweetSub&LoliHouse] Heavenly Delusion - 08 [WebRip 1080p HEVC-10bit AAC ASSx2].mkv"#,
r#"{"fansub": "SweetSub&LoliHouse", "title": "Heavenly Delusion", "season": 1, "episode_index": 8, "extname": ".mkv"}"#,
)
}
#[test]
fn test_sbsub_media() {
test_torrent_ep_parser(
r#"[SBSUB][CONAN][1082][V2][1080P][AVC_AAC][CHS_JP](C1E4E331).mp4"#,
r#"{"fansub": "SBSUB", "title": "CONAN", "season": 1, "episode_index": 1082, "extname": ".mp4"}"#,
)
}
#[test]
fn test_non_fansub_media() {
test_torrent_ep_parser(
r#"海盗战记 (2019) S04E11.mp4"#,
r#"{"title": "海盗战记 (2019)", "season": 4, "episode_index": 11, "extname": ".mp4"}"#,
)
}
#[test]
fn test_non_fansub_media_with_dirname() {
test_torrent_ep_parser(
r#"海盗战记/海盗战记 S01E01.mp4"#,
r#"{"title": "海盗战记", "season": 1, "episode_index": 1, "extname": ".mp4"}"#,
);
}
#[test]
fn test_non_fansub_tc_subtitle() {
test_torrent_ep_parser(
r#"海盗战记 S01E08.zh-tw.ass"#,
r#"{"media": { "title": "海盗战记", "season": 1, "episode_index": 8, "extname": ".ass" }, "lang": "zh-tw"}"#,
);
}
#[test]
fn test_non_fansub_sc_subtitle() {
test_torrent_ep_parser(
r#"海盗战记 S01E01.SC.srt"#,
r#"{ "media": { "title": "海盗战记", "season": 1, "episode_index": 1, "extname": ".srt" }, "lang": "zh" }"#,
)
}
#[test]
fn test_non_fansub_media_with_season_zero() {
test_torrent_ep_parser(
r#"水星的魔女(2022) S00E19.mp4"#,
r#"{"fansub": null,"title": "水星的魔女(2022)","season": 0,"episode_index": 19,"extname": ".mp4"}"#,
)
}
#[test]
fn test_shimian_fansub_media() {
test_torrent_ep_parser(
r#"【失眠搬运组】放学后失眠的你-Kimi wa Houkago Insomnia - 06 [bilibili - 1080p AVC1 CHS-JP].mp4"#,
r#"{"fansub": "失眠搬运组","title": "放学后失眠的你-Kimi wa Houkago Insomnia","season": 1,"episode_index": 6,"extname": ".mp4"}"#,
)
}
pub fn test_torrent_ep_parser(raw_name: &str, expected: &str) {
let extname = Path::new(raw_name)
.extension()
.map(|e| format!(".{}", e))
.unwrap_or_default()
.to_lowercase();
if extname == ".srt" || extname == ".ass" {
let expected: Option<TorrentEpisodeSubtitleMeta> = serde_json::from_str(expected).ok();
let found_raw =
parse_episode_subtitle_meta_from_torrent(Path::new(raw_name), None, None);
let found = found_raw.as_ref().ok().cloned();
if expected != found {
if found_raw.is_ok() {
println!(
"expected {} and found {} are not equal",
serde_json::to_string_pretty(&expected).unwrap(),
serde_json::to_string_pretty(&found).unwrap()
)
} else {
println!(
"expected {} and found {:#?} are not equal",
serde_json::to_string_pretty(&expected).unwrap(),
found_raw
)
}
}
assert_eq!(expected, found);
} else {
let expected: Option<TorrentEpisodeMediaMeta> = serde_json::from_str(expected).ok();
let found_raw = parse_episode_media_meta_from_torrent(Path::new(raw_name), None, None);
let found = found_raw.as_ref().ok().cloned();
if expected != found {
if found_raw.is_ok() {
println!(
"expected {} and found {} are not equal",
serde_json::to_string_pretty(&expected).unwrap(),
serde_json::to_string_pretty(&found).unwrap()
)
} else {
println!(
"expected {} and found {:#?} are not equal",
serde_json::to_string_pretty(&expected).unwrap(),
found_raw
)
}
}
assert_eq!(expected, found);
}
}
}