优化程序,剔除128k以下文件
This commit is contained in:
parent
3a04b4a504
commit
fcc54a6685
26
urls/main.py
26
urls/main.py
@ -5,19 +5,21 @@ import re
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
def natural_sort_key(s):
|
def natural_sort_key(s):
|
||||||
"""为了自然排序的辅助函数,将字符串中的数字部分转换成整数"""
|
"""为了自然排序的辅助函数,将字符串中的数字部分转换成整数"""
|
||||||
return [int(text) if text.isdigit() else text.lower() for text in re.split('([0-9]+)', s)]
|
return [int(text) if text.isdigit() else text.lower() for text in re.split('([0-9]+)', s)]
|
||||||
|
|
||||||
def generate_urls(file_paths, base_url, sub_directory):
|
def generate_urls(file_paths, base_url, sub_directory, min_size):
|
||||||
"""根据文件路径、基础URL和子目录生成URL链接"""
|
"""根据文件路径、基础URL、子目录和最小文件大小生成URL链接"""
|
||||||
urls = []
|
urls = []
|
||||||
if not base_url.endswith('/'):
|
if not base_url.endswith('/'):
|
||||||
base_url += '/'
|
base_url += '/'
|
||||||
if sub_directory and not sub_directory.endswith('/'):
|
if sub_directory and not sub_directory.endswith('/'):
|
||||||
sub_directory += '/'
|
sub_directory += '/'
|
||||||
current_timestamp = int(time.time())
|
current_timestamp = int(time.time()) # 移到循环外
|
||||||
for path in sorted(file_paths, key=natural_sort_key):
|
for path in sorted(file_paths, key=natural_sort_key):
|
||||||
file_size_bytes = os.path.getsize(path)
|
file_size_bytes = os.path.getsize(path)
|
||||||
|
if file_size_bytes < min_size:
|
||||||
|
continue
|
||||||
relative_path = os.path.relpath(path, start='.')
|
relative_path = os.path.relpath(path, start='.')
|
||||||
encoded_path = urllib.parse.quote(relative_path)
|
encoded_path = urllib.parse.quote(relative_path)
|
||||||
url = f"{base_url}{sub_directory}{encoded_path}"
|
url = f"{base_url}{sub_directory}{encoded_path}"
|
||||||
@ -36,22 +38,26 @@ def parse_arguments():
|
|||||||
parser.add_argument('--dir', type=str, default='', help='Sub-directory for generating file URLs (optional)')
|
parser.add_argument('--dir', type=str, default='', help='Sub-directory for generating file URLs (optional)')
|
||||||
parser.add_argument('--output', type=str, default='urls.txt', help='Output file name (default: urls.txt)')
|
parser.add_argument('--output', type=str, default='urls.txt', help='Output file name (default: urls.txt)')
|
||||||
parser.add_argument('--base-url', type=str, default='https://link.kite.kim/feng', help='Base URL for generating file URLs (default: https://link.kite.kim/feng)')
|
parser.add_argument('--base-url', type=str, default='https://link.kite.kim/feng', help='Base URL for generating file URLs (default: https://link.kite.kim/feng)')
|
||||||
|
parser.add_argument('--min-size', type=int, default=128*1024, help='Minimum file size in bytes (default: 128KB)')
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
def list_files_recursive(start_path='.'):
|
def list_files_recursive(start_path='.', exclude_files=None):
|
||||||
"""递归列出目录及其子目录中的所有文件"""
|
"""递归列出目录及其子目录中的所有文件,排除指定的文件"""
|
||||||
|
if exclude_files is None:
|
||||||
|
exclude_files = set()
|
||||||
file_paths = []
|
file_paths = []
|
||||||
for root, dirs, files in os.walk(start_path):
|
for root, dirs, files in os.walk(start_path):
|
||||||
for file in files:
|
for file in files:
|
||||||
file_paths.append(os.path.join(root, file))
|
if file not in exclude_files:
|
||||||
|
file_paths.append(os.path.join(root, file))
|
||||||
return file_paths
|
return file_paths
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
args = parse_arguments()
|
args = parse_arguments()
|
||||||
current_script = os.path.basename(__file__)
|
current_script = os.path.basename(__file__)
|
||||||
file_paths = list_files_recursive('.')
|
exclude_files = {current_script} # 排除当前脚本文件
|
||||||
file_paths = [f for f in file_paths if os.path.isfile(f) and os.path.basename(f) != current_script and not os.path.basename(f).startswith('.')]
|
file_paths = list_files_recursive('.', exclude_files)
|
||||||
urls = generate_urls(file_paths, args.base_url, args.dir)
|
urls = generate_urls(file_paths, args.base_url, args.dir, args.min_size)
|
||||||
save_urls(urls, args.output)
|
save_urls(urls, args.output)
|
||||||
print(f"URL链接已保存到{args.output}")
|
print(f"URL链接已保存到{args.output}")
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user