Merge pull request 'dev' (#2) from dev into main

Reviewed-on: #2
This commit is contained in:
岛风 2024-06-27 01:27:45 +08:00
commit a517fabd68

View File

@ -3,60 +3,75 @@ import urllib.parse
import argparse import argparse
import re import re
import time import time
import yaml
def natural_sort_key(s): def natural_sort_key(s):
"""为了自然排序的辅助函数,将字符串中的数字部分转换成整数""" """为了自然排序的辅助函数,将字符串中的数字部分转换成整数"""
return [int(text) if text.isdigit() else text.lower() for text in re.split('([0-9]+)', s)] return [int(text) if text.isdigit() else text.lower() for text in re.split('([0-9]+)', s)]
def generate_urls(file_names, base_url, sub_directory): def generate_urls(file_paths, base_url, sub_directory, min_size):
"""根据文件名、基础URL和子目录生成URL链接""" """根据文件路径、基础URL、子目录和最小文件大小生成URL链接"""
urls = [] urls = {}
# 确保base_url和sub_directory以斜线结尾如果sub_directory不为空
if not base_url.endswith('/'): if not base_url.endswith('/'):
base_url += '/' base_url += '/'
if sub_directory and not sub_directory.endswith('/'): if sub_directory and not sub_directory.endswith('/'):
sub_directory += '/' sub_directory += '/'
current_timestamp = int(time.time()) # 获取当前时间的时间戳,转换为整数 current_timestamp = int(time.time()) # 移到循环外
for name in sorted(file_names, key=natural_sort_key): # 使用自定义排序 for path in sorted(file_paths, key=natural_sort_key):
file_path = os.path.join('.', name) file_size_bytes = os.path.getsize(path)
file_size_bytes = os.path.getsize(file_path) # 获取文件大小(字节),为整数 if file_size_bytes < min_size:
# 对文件名进行URL编码 continue
encoded_name = urllib.parse.quote(name) relative_path = os.path.relpath(path, start='.')
url = f"{base_url}{sub_directory}{encoded_name}" encoded_path = urllib.parse.quote(relative_path)
urls.append(f"{file_size_bytes}:{current_timestamp}:{url}") url = f"{base_url}{sub_directory}{encoded_path}"
dir_name = os.path.dirname(relative_path)
if dir_name not in urls:
urls[dir_name] = []
urls[dir_name].append(f"{file_size_bytes}:{current_timestamp}:{url}")
return urls return urls
def save_urls(urls, output_file): def save_urls(urls, output_file, root_folder):
"""将URL链接保存到文本文件中""" """将URL链接保存到YAML文件中"""
with open(output_file, 'w', encoding='utf-8') as f: with open(output_file, 'w', encoding='utf-8') as f:
for url in urls: f.write(f"{root_folder}:\n")
f.write(url + '\n') for dir_name, files in urls.items():
if dir_name == '.':
for file in files:
f.write(f" {file}\n")
else:
f.write(f" {dir_name}:\n")
for file in files:
f.write(f" {file}\n")
def parse_arguments(): def parse_arguments():
"""解析命令行参数""" """解析命令行参数"""
parser = argparse.ArgumentParser(description='Generate URLs from file names.') parser = argparse.ArgumentParser(description='Generate URLs from file names.')
parser.add_argument('--dir', type=str, default='', help='Sub-directory for generating file URLs (optional)') parser.add_argument('--dir', type=str, default='', help='Sub-directory for generating file URLs (optional)')
parser.add_argument('--output', type=str, default='urls.yaml', help='Output file name (default: urls.yaml)')
parser.add_argument('--base-url', type=str, default='https://link.kite.kim/feng', help='Base URL for generating file URLs (default: https://link.kite.kim/feng)')
parser.add_argument('--min-size', type=int, default=128*1024, help='Minimum file size in bytes (default: 128KB)')
parser.add_argument('--rf', type=str, required=True, help='Root folder name for YAML output')
return parser.parse_args() return parser.parse_args()
def main(): def list_files_recursive(start_path='.', exclude_files=None):
# 解析命令行参数 """递归列出目录及其子目录中的所有文件,排除指定的文件"""
args = parse_arguments() if exclude_files is None:
exclude_files = set()
# 固定的base_url值 file_paths = []
base_url = 'https://link.kite.kim/feng' for root, dirs, files in os.walk(start_path):
for file in files:
# 获取当前目录下的所有文件名,排除此脚本文件和隐藏文件 if file not in exclude_files:
current_script = os.path.basename(__file__) file_paths.append(os.path.join(root, file))
file_names = [f for f in os.listdir('.') if os.path.isfile(f) and f != current_script and not f.startswith('.')] return file_paths
# 生成URL链接
urls = generate_urls(file_names, base_url, args.dir)
# 保存URL链接到文本文件
output_file = 'urls.txt'
save_urls(urls, output_file)
print(f"URL链接已保存到{output_file}") def main():
args = parse_arguments()
current_script = os.path.basename(__file__)
exclude_files = {current_script} # 排除当前脚本文件
file_paths = list_files_recursive('.', exclude_files)
urls = generate_urls(file_paths, args.base_url, args.dir, args.min_size)
save_urls(urls, args.output, args.rf)
print(f"URL链接已保存到{args.output}")
if __name__ == '__main__': if __name__ == '__main__':
main() main()