This commit is contained in:
honghuayuan 2026-03-16 23:36:00 +08:00
parent 5a1d91b5da
commit 03a140397a
31 changed files with 6765 additions and 966 deletions

View File

@ -0,0 +1,7 @@
{
"permissions": {
"allow": [
"Bash(python -c \":*)"
]
}
}

13
config/database.json Normal file
View File

@ -0,0 +1,13 @@
{
"database": {
"host": "192.168.100.33",
"port": 3306,
"username": "zhenggantian",
"password": "123456",
"database": "logistics",
"charset": "utf8mb4",
"pool_size": 10,
"max_overflow": 5,
"pool_recycle": 3600
}
}

BIN
data/售价尾端价格.xlsx (Stored with Git LFS)

Binary file not shown.

4
dataaccess/__init__.py Normal file
View File

@ -0,0 +1,4 @@
"""数据访问层模块"""
from dataaccess.base_dao import BaseDAO
__all__ = ["BaseDAO"]

65
dataaccess/base_dao.py Normal file
View File

@ -0,0 +1,65 @@
"""基础数据访问类"""
import pandas as pd
from typing import Any, Dict, List, Optional
from utils.gtools import MySQLconnect
class BaseDAO:
"""基础数据访问对象"""
_cache: Dict[str, pd.DataFrame] = {}
_cache_enabled: bool = True
def __init__(self, table_name: str):
self.table_name = table_name
def _get_connection(self, dbname: str = None) -> MySQLconnect:
"""获取数据库连接"""
return MySQLconnect(dbname)
def get_all(self, dbname: str = None) -> pd.DataFrame:
"""获取所有数据"""
cache_key = f"{dbname}:{self.table_name}"
if self._cache_enabled and cache_key in self._cache:
return self._cache[cache_key]
with self._get_connection(dbname) as conn:
df = pd.read_sql(f"SELECT * FROM {self.table_name}", conn.con)
if self._cache_enabled:
self._cache[cache_key] = df
return df
def get_by_condition(self, conditions: Dict[str, Any], dbname: str = None) -> pd.DataFrame:
"""根据条件查询数据"""
where_clause = " AND ".join([f"{k} = '{v}'" for k, v in conditions.items()])
query = f"SELECT * FROM {self.table_name} WHERE {where_clause}"
with self._get_connection(dbname) as conn:
df = pd.read_sql(query, conn.con)
return df
def execute_query(self, query: str, dbname: str = None) -> pd.DataFrame:
"""执行自定义查询"""
with self._get_connection(dbname) as conn:
df = pd.read_sql(query, conn.con)
return df
@classmethod
def clear_cache(cls):
"""清空缓存"""
cls._cache.clear()
@classmethod
def disable_cache(cls):
"""禁用缓存"""
cls._cache_enabled = False
@classmethod
def enable_cache(cls):
"""启用缓存"""
cls._cache_enabled = True

49
dataaccess/company_dao.py Normal file
View File

@ -0,0 +1,49 @@
"""物流公司数据访问类"""
import pandas as pd
from typing import Dict, List, Optional
from dataaccess.base_dao import BaseDAO
class CompanyDAO(BaseDAO):
"""物流公司数据访问"""
def __init__(self):
super().__init__("logistics_company")
def get_all_companies(self, country: str = None, active_only: bool = True) -> List[Dict]:
"""获取所有物流公司"""
conditions = {}
if country:
conditions["country"] = country
if active_only:
conditions["active"] = 1
if conditions:
df = self.get_by_condition(conditions)
else:
df = self.get_all()
if df.empty:
return []
return df.to_dict("records")
def get_company_info(self, company_code: str) -> Optional[Dict]:
"""获取物流公司详细信息"""
df = self.get_by_condition({"company_code": company_code})
if df.empty:
return None
return df.iloc[0].to_dict()
def get_companies_by_type(self, country: str, logistics_type: str) -> List[Dict]:
"""根据物流类型获取公司列表"""
query = f"""
SELECT * FROM {self.table_name}
WHERE country = '{country}'
AND logistics_type = '{logistics_type}'
AND active = 1
"""
df = self.execute_query(query)
if df.empty:
return []
return df.to_dict("records")

View File

@ -0,0 +1,65 @@
"""英国物流价格数据访问类"""
import pandas as pd
from typing import Dict, List, Optional
from dataaccess.base_dao import BaseDAO
class UKPriceDAO(BaseDAO):
"""英国物流价格数据访问"""
def __init__(self):
super().__init__("uk_logistics_price")
def get_all_companies(self) -> List[str]:
"""获取所有英国物流公司"""
df = self.get_all()
if df.empty:
return []
return df["company"].unique().tolist()
def get_company_price(self, company: str) -> pd.DataFrame:
"""获取指定物流公司的价格数据"""
return self.get_by_condition({"company": company})
def get_company_config(self, company: str) -> Dict:
"""获取物流公司配置信息"""
df = self.get_by_condition({"company": company})
if df.empty:
return {}
config = {}
for _, row in df.iterrows():
key = row.get("config_key")
value = row.get("config_value")
if key:
config[key] = value
return config
class UKPostcodeDAO(BaseDAO):
"""英国邮编分区数据访问"""
def __init__(self):
super().__init__("uk_postcode_zone")
def get_zone(self, postcode: str) -> Optional[str]:
"""根据邮编获取分区"""
postcode_prefix = postcode.split()[0].upper()
df = self.get_by_condition({"postcode_prefix": postcode_prefix})
if df.empty:
return None
return df.iloc[0].get("zone")
def is_remote(self, postcode: str) -> bool:
"""判断是否偏远"""
postcode_prefix = postcode.split()[0].upper()
remote_prefixes = ["BT", "IM", "JE", "ZE", "GY", "HS", "PO", "IV", "KA", "KW", "PH", "PA"]
if not any(postcode_prefix.startswith(p) for p in remote_prefixes):
return False
df = self.get_by_condition({"postcode_prefix": postcode_prefix, "is_remote": "1"})
return not df.empty

View File

@ -0,0 +1,70 @@
"""美国物流价格数据访问类"""
import pandas as pd
from typing import Dict, List, Optional
from dataaccess.base_dao import BaseDAO
class USPriceDAO(BaseDAO):
"""美国物流价格数据访问"""
def __init__(self):
super().__init__("us_logistics_price")
def get_all_companies(self) -> List[str]:
"""获取所有美国物流公司"""
df = self.get_all()
if df.empty:
return []
return df["company"].unique().tolist()
def get_company_price(self, company: str) -> pd.DataFrame:
"""获取指定物流公司的价格数据"""
return self.get_by_condition({"company": company})
def get_company_config(self, company: str) -> Dict:
"""获取物流公司配置信息"""
df = self.get_by_condition({"company": company})
if df.empty:
return {}
config = {}
for _, row in df.iterrows():
key = row.get("config_key")
value = row.get("config_value")
if key:
config[key] = value
return config
class USPostcodeDAO(BaseDAO):
"""美国邮编分区数据访问"""
def __init__(self):
super().__init__("us_postcode_zone")
def get_zone(self, postcode: str, port: str = "west") -> Optional[str]:
"""根据邮编获取分区"""
postcode_5 = postcode[:5]
df = self.get_by_condition({"postcode": postcode_5, "port": port})
if df.empty:
return None
return df.iloc[0].get("zone")
def get_remote_type(self, postcode: str) -> int:
"""获取偏远类型: 0-非偏远, 1-偏远, 2-超偏远, 3-超超偏远"""
postcode_5 = postcode[:5]
df = self.get_by_condition({"postcode": postcode_5})
if df.empty:
return 0
return int(df.iloc[0].get("remote_type", 0))
def is_contiguous(self, postcode: str) -> bool:
"""判断是否在本土范围内"""
postcode_5 = postcode[:5]
df = self.get_by_condition({"postcode": postcode_5})
return not df.empty

View File

@ -5,14 +5,15 @@ import re
import pytesseract import pytesseract
from PIL import Image from PIL import Image
from tempfile import NamedTemporaryFile from tempfile import NamedTemporaryFile
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.file_detector import UselessFileDetector #跳过文件检测 1
from selenium import webdriver
import redis import redis
def vercode(cookie=None): def vercode(cookie=None):
urls = 'https://cp.maso.hk:4433/index.php?main=login&act=vercode'#图片链接(每秒更新,距离当前时间最近的时间戳最近为可使用图片) urls = 'https://cp.baycheer.com:4433/index.php?main=login&act=vercode'#图片链接(每秒更新,距离当前时间最近的时间戳最近为可使用图片)
if cookie is not None: if cookie is not None:
haders = { haders = {
'Cookie': cookie #使用之前的图片链接Cooike 'Cookie': cookie #使用之前的图片链接Cooike
@ -43,7 +44,7 @@ def Vc(user='honghuayuan',pswd= 'a12345'):
try : try :
r = redis.StrictRedis(host='192.168.100.44', port=7379, db=11,password="123456") r = redis.StrictRedis(host='192.168.100.44', port=7379, db=11,password="123456")
viewcookie = r.get('cpmasosessid'+user) viewcookie = r.get('cpmasosessid'+user)
url="https://cp.maso.hk/index.php?main=panel"#主页 url="https://cp.baycheer.com/index.php?main=panel"#主页
header={'Cookie':viewcookie.decode('utf8')} header={'Cookie':viewcookie.decode('utf8')}
resp = requests.get(url=url,headers=header) resp = requests.get(url=url,headers=header)
exists = re.findall(r'欢迎进入本公司后台管理系统',resp.text) exists = re.findall(r'欢迎进入本公司后台管理系统',resp.text)
@ -57,7 +58,7 @@ def Vc(user='honghuayuan',pswd= 'a12345'):
while resetcookie ==1: while resetcookie ==1:
urls = 'https://cp.maso.hk:4433/index.php?main=login&act=vercode'#图片链接(每秒更新,距离当前时间最近的时间戳最近为可使用图片) urls = 'https://cp.baycheer.com:4433/index.php?main=login&act=vercode'#图片链接(每秒更新,距离当前时间最近的时间戳最近为可使用图片)
res = requests.post(url=urls) res = requests.post(url=urls)
ress = re.findall('Cookie (.*?) for',str(res.cookies))#获取图片链接Cooike图片Cooike必须和登录请求的Cooike保持一致 ress = re.findall('Cookie (.*?) for',str(res.cookies))#获取图片链接Cooike图片Cooike必须和登录请求的Cooike保持一致
f1 = NamedTemporaryFile(mode='wb+',suffix='.png') f1 = NamedTemporaryFile(mode='wb+',suffix='.png')
@ -77,7 +78,7 @@ def Vc(user='honghuayuan',pswd= 'a12345'):
bim.save(f2)#处理完成导入 bim.save(f2)#处理完成导入
f2.seek(0) f2.seek(0)
text = pytesseract.image_to_string(Image.open(f2))#开始识别 text = pytesseract.image_to_string(Image.open(f2))#开始识别
url = 'https://cp.maso.hk:4433/index.php?main=login&act=check' url = 'https://cp.baycheer.com:4433/index.php?main=login&act=check'
haders = { haders = {
'Cookie': ress[0] #使用之前的图片链接Cooike 'Cookie': ress[0] #使用之前的图片链接Cooike
} }
@ -204,9 +205,9 @@ def zenid():
options.add_argument('--disable-gpu') options.add_argument('--disable-gpu')
options.add_argument(' -port=9222') options.add_argument(' -port=9222')
driver=webdriver.Remote(service.service_url,options=options) driver=webdriver.Remote(service.service_url,options=options)
mainurl='http://cp.maso.hk/index.php?main=main' mainurl='http://cp.baycheer.com/index.php?main=main'
panelUrl = "http://cp.maso.hk/index.php?main=panel"#地址 panelUrl = "http://cp.baycheer.com/index.php?main=panel"#地址
remoteurl='http://cp.maso.hk/index.php?main=sys_remote_login&act=remotelogin&id=303' remoteurl='http://cp.baycheer.com/index.php?main=sys_remote_login&act=remotelogin&id=303'
driver.get(mainurl) driver.get(mainurl)
driver.delete_all_cookies() driver.delete_all_cookies()
driver.add_cookie({'name':'sessid','value':f'{t}'}) driver.add_cookie({'name':'sessid','value':f'{t}'})

View File

@ -5,6 +5,7 @@ import re
import pandas import pandas
from logisticsClass.logisticsBaseClass import LogisticsType, TailLogistics from logisticsClass.logisticsBaseClass import LogisticsType, TailLogistics
from utils.gtools import MySQLconnect
class DPDLogistics_UK(TailLogistics): class DPDLogistics_UK(TailLogistics):
@ -218,53 +219,75 @@ class KPNVlogistics_UK(TailLogistics):
country = 'United Kingdom' country = 'United Kingdom'
company = '卡派-NV' company = '卡派-NV'
currency = 'GBP' currency = 'GBP'
logistics_type = LogisticsType.COURIER logistics_type = LogisticsType.COURIER
# 数据库表数据(内存缓存)
_zone_cache = None # 邮编分区缓存
_price_cache = None # 价格缓存
parent_current_directory = Path(__file__).parent.parent
price_path = parent_current_directory.joinpath("data")
_price_files = price_path.joinpath("英国卡派.xlsx")
ltl_cost = None
ltl_zone = None
def __new__(cls): def __new__(cls):
"""实现单例模式,只加载一次文件""" """实现单例模式,从数据库加载数据"""
if cls.ltl_cost is None or cls.ltl_zone is None: if cls._zone_cache is None or cls._price_cache is None:
cls.ltl_cost = pandas.read_excel(cls._price_files,sheet_name="运费") cls._load_from_db()
cls.ltl_zone = pandas.read_excel(cls._price_files,sheet_name="分区")
return super().__new__(cls) return super().__new__(cls)
@classmethod
def _load_from_db(cls):
"""从数据库加载分区和价格数据"""
conn = MySQLconnect('logistics')
with conn as c:
# 加载分区表
c.cur.execute("SELECT `邮编`, `区域` FROM uk_XLCarrier_postcode_partition")
cls._zone_cache = {}
for row in c.cur.fetchall():
postcode = str(row[0]) if row[0] else ""
zone = str(row[1]) if row[1] else ""
cls._zone_cache[postcode] = zone
# 加载价格表
c.cur.execute("SELECT `分区`, `托盘`, `运费` FROM uk_XLCarrier_postcode_fee")
cls._price_cache = {}
for row in c.cur.fetchall():
zone = str(row[0]) if row[0] else ""
tuopan = int(row[1]) if row[1] else 0
fee = float(row[2]) if row[2] else 0
cls._price_cache[(zone, tuopan)] = fee
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self.base_fee = 0 self.base_fee = 0
self.fuel_rate = 0.1 self.fuel_rate = 0.1
def is_remote(self,postcode):
def is_remote(self, postcode):
"""根据邮编分区,返回分区""" """根据邮编分区,返回分区"""
postcode_prefix = postcode.split()[0].upper() postcode_prefix = postcode.split()[0].upper()
postcode_prefix = str(postcode_prefix) zone = self._zone_cache.get(postcode_prefix)
zone_df = self.ltl_zone[self.ltl_zone['邮编']== postcode_prefix] if zone:
if not zone_df.empty: return zone
return zone_df['区域'].values[0]
return "不在配送范围内" return "不在配送范围内"
def calculate_fee(self, packages, postcode): def calculate_fee(self, packages, postcode):
detail_amount = { detail_amount = {
"base":0.00, "base": 0.00,
"fuel":0.00, "fuel": 0.00,
"tail_amount":0.00 "tail_amount": 0.00
} }
zone = self.is_remote(postcode) zone = self.is_remote(postcode)
if zone == "不在配送范围内": if zone == "不在配送范围内":
detail_amount['tail_amount'] = 99999 detail_amount['tail_amount'] = 99999
return detail_amount return detail_amount
for package in packages: for package in packages:
tuopan = math.ceil(package.fst_size/120) tuopan = math.ceil(package.fst_size / 120)
tuopan = min(tuopan, 7) tuopan = min(tuopan, 7)
base_df = self.ltl_cost[(self.ltl_cost['分区']==zone)&(self.ltl_cost['托盘']==tuopan)] fee = self._price_cache.get((zone, tuopan))
if base_df.empty: if fee is None:
detail_amount['tail_amount'] = 99999 detail_amount['tail_amount'] = 99999
return detail_amount return detail_amount
self.base_fee = base_df['运费'].values[0] self.base_fee = fee
price = self.base_fee * tuopan/len(packages) price = self.base_fee * tuopan / len(packages)
detail_amount['base'] += price detail_amount['base'] += price
detail_amount['fuel'] = detail_amount['base'] * self.fuel_rate detail_amount['fuel'] = detail_amount['base'] * self.fuel_rate
detail_amount['tail_amount'] = detail_amount['base']+detail_amount['fuel'] detail_amount['tail_amount'] = detail_amount['base'] + detail_amount['fuel']
return detail_amount return detail_amount
# class KPDXLogistics_UK(TailLogistics): # class KPDXLogistics_UK(TailLogistics):

View File

@ -5,6 +5,7 @@ import pandas
from logisticsClass.logisticsBaseClass import LogisticsType, TailLogistics from logisticsClass.logisticsBaseClass import LogisticsType, TailLogistics
from data.us_zone import zone_west, zone_east from data.us_zone import zone_west, zone_east
from pathlib import Path from pathlib import Path
from utils.gtools import MySQLconnect
""" """
port:west(default),east port:west(default),east
@ -84,6 +85,10 @@ class FedexLogistics(WestLogistics_US):
"""Fedex""" """Fedex"""
country = "United States" country = "United States"
country_code = "US" country_code = "US"
# 价格缓存(子类可覆盖)
_price_cache = None
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self.volume_weight_ratio:int # lbs抛重系数 self.volume_weight_ratio:int # lbs抛重系数
@ -109,6 +114,16 @@ class FedexLogistics(WestLogistics_US):
self.bigpackage_2:float # 大包裹费 self.bigpackage_2:float # 大包裹费
self.bigpackage_3:float self.bigpackage_3:float
self.bigpackage_5:float self.bigpackage_5:float
def get_price(self, lbs: int, zone: int) -> float:
"""获取价格(子类可覆盖)"""
if self._price_cache is not None:
return self._price_cache.get(lbs, {}).get(zone, 0)
# 默认从DataFrame获取(兼容旧代码)
if self.base_price is not None:
result = self.base_price[self.base_price['lbs.'] == lbs][zone]
return result.values[0] if len(result) > 0 else 0
return 0
self.bigpackage_7:float self.bigpackage_7:float
self.bigpackage_peak:float # 大包裹旺季附加费 self.bigpackage_peak:float # 大包裹旺季附加费
self.return_package:float # 超大包裹(不可发) self.return_package:float # 超大包裹(不可发)
@ -185,8 +200,8 @@ class FedexLogistics(WestLogistics_US):
detail_amount['big_package_peak'] += self.bigpackage_peak if package.girth_inch >130 or package.fst_inch >96 else 0 detail_amount['big_package_peak'] += self.bigpackage_peak if package.girth_inch >130 or package.fst_inch >96 else 0
detail_amount['residential_delivery'] += self.residential detail_amount['residential_delivery'] += self.residential
detail_amount['residential_peak'] += self.residential_peak detail_amount['residential_peak'] += self.residential_peak
detail_amount['base'] +=self.base_price[self.base_price['lbs.']==math.ceil(cal_weight)][zone].values[0] detail_amount['base'] += self.get_price(math.ceil(cal_weight), zone)
for key in detail_amount: for key in detail_amount:
if key!= 'tail_amount' and key!= 'fuel': if key!= 'tail_amount' and key!= 'fuel':
@ -198,18 +213,37 @@ class FedexLogistics(WestLogistics_US):
class FedexPPLogistics_US(FedexLogistics): class FedexPPLogistics_US(FedexLogistics):
company="Fedex-彩虹小马" company="Fedex-彩虹小马"
parent_current_directory = Path(__file__).parent.parent # 数据库价格缓存
price_path = parent_current_directory.joinpath("data") _price_cache = None
_price_files = price_path.joinpath("美国快递.xlsx")
base_price = None
def __new__(cls): def __new__(cls):
if cls.base_price is None: if cls._price_cache is None:
cls.base_price = pandas.read_excel(cls._price_files,sheet_name='邮差小马') cls._load_from_db()
return super().__new__(cls) return super().__new__(cls)
@classmethod
def _load_from_db(cls):
"""从数据库加载价格数据"""
conn = MySQLconnect('logistics')
with conn as c:
c.cur.execute("SELECT `lbs.`, `2`, `3`, `4`, `5`, `6`, `7`, `8` FROM us_delivery_postpony")
cls._price_cache = {}
for row in c.cur.fetchall():
lbs = int(row[0])
cls._price_cache[lbs] = {
2: float(row[1]) if row[1] else 0,
3: float(row[2]) if row[2] else 0,
4: float(row[3]) if row[3] else 0,
5: float(row[4]) if row[4] else 0,
6: float(row[5]) if row[5] else 0,
7: float(row[6]) if row[6] else 0,
8: float(row[7]) if row[7] else 0,
}
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self.volume_weight_ratio=250 # lbs抛重系数 self.volume_weight_ratio=250 # lbs抛重系数
self.residential = 6.38 self.residential = 6.38
self.residential_peak = 0 # 0.33 0.6 # 报价表没写,账单有 self.residential_peak = 0 # 0.33 0.6 # 报价表没写,账单有
self.oversize_2 = 4.50 self.oversize_2 = 4.50
self.oversize_3 = 4.99 self.oversize_3 = 4.99
@ -231,7 +265,7 @@ class FedexPPLogistics_US(FedexLogistics):
self.bigpackage_3 = 36.16 self.bigpackage_3 = 36.16
self.bigpackage_5 = 38.57 self.bigpackage_5 = 38.57
self.bigpackage_7 = 41.78 self.bigpackage_7 = 41.78
self.bigpackage_peak = 0 # 45.26 53.56# 大包裹旺季附加费 self.bigpackage_peak = 0 # 45.26 53.56# 大包裹旺季附加费
self.fuel_rate = 0.16 # 燃油费率 self.fuel_rate = 0.16 # 燃油费率
self.return_package = 1419.34 # 超大包裹(不可发) self.return_package = 1419.34 # 超大包裹(不可发)
@ -239,14 +273,33 @@ class FedexKHLogistics_US(FedexLogistics):
"""金宏亚""" """金宏亚"""
company = "Fedex-金宏亚" company = "Fedex-金宏亚"
parent_current_directory = Path(__file__).parent.parent # 数据库价格缓存
price_path = parent_current_directory.joinpath("data") _price_cache = None
_price_files = price_path.joinpath("美国快递.xlsx")
base_price = None
def __new__(cls): def __new__(cls):
if cls.base_price is None: if cls._price_cache is None:
cls.base_price = pandas.read_excel(cls._price_files,sheet_name='金宏亚') cls._load_from_db()
return super().__new__(cls) return super().__new__(cls)
@classmethod
def _load_from_db(cls):
"""从数据库加载价格数据"""
conn = MySQLconnect('logistics')
with conn as c:
c.cur.execute("SELECT `lbs.`, `2`, `3`, `4`, `5`, `6`, `7`, `8` FROM us_delivery_kinghood")
cls._price_cache = {}
for row in c.cur.fetchall():
lbs = int(row[0])
cls._price_cache[lbs] = {
2: float(row[1]) if row[1] else 0,
3: float(row[2]) if row[2] else 0,
4: float(row[3]) if row[3] else 0,
5: float(row[4]) if row[4] else 0,
6: float(row[5]) if row[5] else 0,
7: float(row[6]) if row[6] else 0,
8: float(row[7]) if row[7] else 0,
}
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self.volume_weight_ratio = 250 # lbs抛重系数 self.volume_weight_ratio = 250 # lbs抛重系数
@ -280,14 +333,33 @@ class FedexHOMELogistics_US(FedexLogistics):
"""FEDEX-HOME (1-35%-30%)""" """FEDEX-HOME (1-35%-30%)"""
company = "Fedex-HOME" company = "Fedex-HOME"
parent_current_directory = Path(__file__).parent.parent # 数据库价格缓存
price_path = parent_current_directory.joinpath("data") _price_cache = None
_price_files = price_path.joinpath("美国快递.xlsx")
base_price = None
def __new__(cls): def __new__(cls):
if cls.base_price is None: if cls._price_cache is None:
cls.base_price = pandas.read_excel(cls._price_files,sheet_name='FEDEX') cls._load_from_db()
return super().__new__(cls) return super().__new__(cls)
@classmethod
def _load_from_db(cls):
"""从数据库加载价格数据"""
conn = MySQLconnect('logistics')
with conn as c:
c.cur.execute("SELECT `lbs.`, `2`, `3`, `4`, `5`, `6`, `7`, `8` FROM us_fedex_home")
cls._price_cache = {}
for row in c.cur.fetchall():
lbs = int(row[0])
cls._price_cache[lbs] = {
2: float(row[1]) if row[1] else 0,
3: float(row[2]) if row[2] else 0,
4: float(row[3]) if row[3] else 0,
5: float(row[4]) if row[4] else 0,
6: float(row[5]) if row[5] else 0,
7: float(row[6]) if row[6] else 0,
8: float(row[7]) if row[7] else 0,
}
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self.volume_weight_ratio = 250 # lbs抛重系数 self.volume_weight_ratio = 250 # lbs抛重系数
@ -316,18 +388,38 @@ class FedexHOMELogistics_US(FedexLogistics):
self.bigpackage_peak =0 # 42.25# 大包裹旺季附加费 self.bigpackage_peak =0 # 42.25# 大包裹旺季附加费
self.return_package = 1325 # 超大包裹(不可发) self.return_package = 1325 # 超大包裹(不可发)
self.fuel_rate = 0.18 # 燃油费率 self.fuel_rate = 0.18 # 燃油费率
class FedexGROUDLogistics_US(FedexLogistics): class FedexGROUDLogistics_US(FedexLogistics):
"""FEDEX-GROUD (1-35%-30%)""" """FEDEX-GROUD (1-35%-30%)"""
company = "Fedex-GROUD" company = "Fedex-GROUD"
parent_current_directory = Path(__file__).parent.parent # 数据库价格缓存(与FedexHOMELogistics_US共享us_fedex_home表)
price_path = parent_current_directory.joinpath("data") _price_cache = None
_price_files = price_path.joinpath("美国快递.xlsx")
base_price = None
def __new__(cls): def __new__(cls):
if cls.base_price is None: if cls._price_cache is None:
cls.base_price = pandas.read_excel(cls._price_files,sheet_name='FEDEX') cls._load_from_db()
return super().__new__(cls) return super().__new__(cls)
@classmethod
def _load_from_db(cls):
"""从数据库加载价格数据"""
conn = MySQLconnect('logistics')
with conn as c:
c.cur.execute("SELECT `lbs.`, `2`, `3`, `4`, `5`, `6`, `7`, `8` FROM us_fedex_home")
cls._price_cache = {}
for row in c.cur.fetchall():
lbs = int(row[0])
cls._price_cache[lbs] = {
2: float(row[1]) if row[1] else 0,
3: float(row[2]) if row[2] else 0,
4: float(row[3]) if row[3] else 0,
5: float(row[4]) if row[4] else 0,
6: float(row[5]) if row[5] else 0,
7: float(row[6]) if row[6] else 0,
8: float(row[7]) if row[7] else 0,
}
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self.volume_weight_ratio = 250 # lbs抛重系数 self.volume_weight_ratio = 250 # lbs抛重系数

165
logistics_service.py Normal file
View File

@ -0,0 +1,165 @@
"""物流费用计算统一入口服务"""
import re
from typing import Dict, List, Any, Optional
from utils.Package import Package, Package_group
from utils.countryOperator import OperateCountry
from logisticsClass.logisticsBaseClass import PortType
class LogisticsService:
"""物流费用计算统一服务"""
@staticmethod
def _detect_country(postcode: str) -> str:
"""根据邮编格式自动识别国家"""
postcode = postcode.strip().upper()
# 英国邮编格式:字母+数字(+字母)+空格+数字+字母
# 支持格式: SW1A 1AA, M1 1AA, BT1 1AA, AA11 1AA 等
if re.match(r'^[A-Z]{1,2}[0-9][A-Z0-9]?\s?[0-9][A-Z]{2}$', postcode):
return "UK"
# 美国邮编格式5位数字或5位-4位
if re.match(r'^\d{5}(-\d{4})?$', postcode):
return "US"
# 澳洲邮编格式4位数字
if re.match(r'^\d{4}$', postcode):
return "AU"
# 欧洲格式(德国、法国等)
if re.match(r'^\d{5}$', postcode):
return "DE" # 默认德国
raise ValueError(f"无法识别的邮编格式: {postcode}")
@staticmethod
def _parse_packages(packages_data: List[Dict]) -> Package_group:
"""解析包裹数据"""
packages = []
for i, pkg in enumerate(packages_data):
name = pkg.get("name", f"包裹{i+1}")
length = pkg.get("length", 0)
width = pkg.get("width", 0)
height = pkg.get("height", 0)
weight = pkg.get("weight", 0)
package = Package(name, length, width, height, weight)
packages.append(package)
return Package_group(packages)
@staticmethod
def calculate(postcode: str, packages_data: List[Dict],
port: PortType = PortType.DEFAULT) -> Dict[str, Any]:
"""
计算物流费用并返回最优渠道
Args:
postcode: 收件人邮编
packages_data: 包裹数据列表格式为
[
{"length": 63, "width": 59, "height": 48, "weight": 8000}, # 单位cm, g
...
]
port: 港口类型默认DEFAULT
Returns:
包含最优渠道和所有渠道费用的字典
"""
# 1. 识别国家
country = LogisticsService._detect_country(postcode)
# 2. 解析包裹
packages = LogisticsService._parse_packages(packages_data)
# 3. 创建国家操作对象
op_country = OperateCountry(country, port, packages, postcode)
# 4. 获取所有渠道费用
all_fees = op_country.get_all_tail_info()
# 5. 找出最优渠道
valid_fees = {k: v for k, v in all_fees.items() if v < 99999}
if not valid_fees:
return {
"country": country,
"postcode": postcode,
"optimal_channel": None,
"optimal_fee": None,
"currency": None,
"all_channels": all_fees,
"error": "所有渠道均不可用"
}
optimal_channel = min(valid_fees, key=valid_fees.get)
optimal_fee = valid_fees[optimal_channel]
currency = op_country.get_tail_currency(optimal_channel)
# 6. 构建结果
result = {
"country": country,
"postcode": postcode,
"optimal_channel": optimal_channel,
"optimal_fee": optimal_fee,
"currency": currency,
"all_channels": {},
"package_count": len(packages_data),
"total_weight": sum(p.weight for p in packages) / 1000, # kg
}
# 7. 添加所有渠道详情
for company, fee in all_fees.items():
company_type = op_country.get_logistic_type(company)
company_currency = op_country.get_tail_currency(company)
result["all_channels"][company] = {
"fee": fee if fee < 99999 else None,
"currency": company_currency,
"type": company_type,
"available": fee < 99999
}
return result
@staticmethod
def calculate_us(postcode: str, packages_data: List[Dict],
port: PortType = PortType.DEFAULT) -> Dict[str, Any]:
"""计算美国物流费用"""
return LogisticsService.calculate(postcode, packages_data, port)
@staticmethod
def calculate_uk(postcode: str, packages_data: List[Dict],
port: PortType = PortType.DEFAULT) -> Dict[str, Any]:
"""计算英国物流费用"""
return LogisticsService.calculate(postcode, packages_data, port)
@staticmethod
def calculate_au(postcode: str, packages_data: List[Dict],
port: PortType = PortType.DEFAULT) -> Dict[str, Any]:
"""计算澳洲物流费用"""
return LogisticsService.calculate(postcode, packages_data, port)
@staticmethod
def calculate_eur(postcode: str, packages_data: List[Dict],
port: PortType = PortType.DEFAULT) -> Dict[str, Any]:
"""计算欧洲物流费用"""
return LogisticsService.calculate(postcode, packages_data, port)
@staticmethod
def get_company_detail(postcode: str, packages_data: List[Dict],
company_name: str) -> Dict[str, Any]:
"""获取指定物流公司的费用明细"""
country = LogisticsService._detect_country(postcode)
packages = LogisticsService._parse_packages(packages_data)
op_country = OperateCountry(country, PortType.DEFAULT, packages, postcode)
detail = op_country.get_detail_amount(company_name, packages, postcode)
currency = op_country.get_tail_currency(company_name)
return {
"company": company_name,
"currency": currency,
"detail": detail,
"total": detail.get("tail_amount", 0)
}

469
scripts/import_data.py Normal file
View File

@ -0,0 +1,469 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
数据导入脚本
将XLSX文件中的数据导入到数据库
"""
import pandas as pd
import numpy as np
from pathlib import Path
from utils.gtools import MySQLconnect
from utils.config_manager import config
class DataImporter:
"""数据导入类"""
def __init__(self):
self.data_dir = Path(__file__).parent.parent / "data"
def get_connection(self, dbname: str = None):
"""获取数据库连接"""
return MySQLconnect(dbname)
def execute_sql(self, sql: str, dbname: str = None):
"""执行SQL语句"""
with self.get_connection(dbname) as conn:
conn.cur.execute(sql)
conn.con.commit()
def execute_many(self, sql: str, data: list, dbname: str = None):
"""批量执行SQL"""
with self.get_connection(dbname) as conn:
conn.cur.executemany(sql, data)
conn.con.commit()
def truncate_table(self, table_name: str, dbname: str = None):
"""清空表"""
self.execute_sql(f"TRUNCATE TABLE {table_name}", dbname)
# ==================== 英国数据导入 ====================
def import_uk_postcode_zone(self):
"""导入英国邮编分区"""
print("导入英国邮编分区...")
df = pd.read_excel(self.data_dir / "英国卡派.xlsx", sheet_name="分区")
df.columns = ["postcode_prefix", "zone"]
data = []
for _, row in df.iterrows():
postcode = str(row["postcode_prefix"]).strip()
zone = str(row["zone"]).strip()
is_remote = 0
data.append((postcode, zone, is_remote))
sql = "INSERT IGNORE INTO uk_postcode_zone (postcode_prefix, zone, is_remote) VALUES (%s, %s, %s)"
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录")
def import_uk_kp_nv_price(self):
"""导入英国卡派NV运费"""
print("导入英国卡派NV运费...")
df = pd.read_excel(self.data_dir / "英国卡派.xlsx", sheet_name="运费")
df.columns = ["zone", "tuopan", "fee"]
data = []
for _, row in df.iterrows():
zone = str(row["zone"]).strip()
tuopan = int(row["tuopan"])
fee = float(row["fee"])
data.append((zone, tuopan, fee))
sql = "INSERT INTO uk_kp_nv_price (zone, tuopan, fee) VALUES (%s, %s, %s)"
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录")
# ==================== 美国数据导入 ====================
def import_us_fedex_pp_price(self):
"""导入美国Fedex邮差小马价格"""
print("导入美国Fedex邮差小马价格...")
df = pd.read_excel(self.data_dir / "美国快递.xlsx", sheet_name="邮差小马")
# 转换列名
cols = ["lbs"] + [str(c) for c in df.columns[1:]]
df.columns = cols
data = []
for _, row in df.iterrows():
lbs = int(row["lbs"])
row_data = [lbs]
for i in range(2, 9):
val = row.get(str(i), 0)
row_data.append(float(val) if pd.notna(val) else 0)
data.append(tuple(row_data))
sql = """INSERT INTO us_fedex_pp_price
(lbs, zone_2, zone_3, zone_4, zone_5, zone_6, zone_7, zone_8)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"""
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录")
def import_us_fedex_kh_price(self):
"""导入美国Fedex金宏亚价格"""
print("导入美国Fedex金宏亚价格...")
df = pd.read_excel(self.data_dir / "美国快递.xlsx", sheet_name="金宏亚")
cols = ["lbs"] + [str(c) for c in df.columns[1:]]
df.columns = cols
data = []
for _, row in df.iterrows():
lbs = int(row["lbs"])
row_data = [lbs]
for i in range(2, 9):
val = row.get(str(i), 0)
row_data.append(float(val) if pd.notna(val) else 0)
data.append(tuple(row_data))
sql = """INSERT INTO us_fedex_kh_price
(lbs, zone_2, zone_3, zone_4, zone_5, zone_6, zone_7, zone_8)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"""
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录")
def import_us_fedex_price(self):
"""导入美国Fedex价格"""
print("导入美国Fedex价格...")
for sheet_name in ["FEDEX", "FEDEX国内"]:
try:
df = pd.read_excel(self.data_dir / "美国快递.xlsx", sheet_name=sheet_name)
if "lbs." in df.columns:
cols = ["lbs"] + [str(c) for c in df.columns[1:8]]
df.columns = cols
data = []
for _, row in df.iterrows():
lbs = int(row["lbs"])
row_data = [lbs]
for i in range(2, 9):
val = row.get(str(i), 0)
row_data.append(float(val) if pd.notna(val) else 0)
data.append(tuple(row_data))
table = "us_fedex_price" if "FEDEX" in sheet_name and "国内" not in sheet_name else "us_fedex_price"
sql = f"""INSERT INTO {table}
(lbs, zone_2, zone_3, zone_4, zone_5, zone_6, zone_7, zone_8)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"""
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录 ({sheet_name})")
except Exception as e:
print(f" 跳过 {sheet_name}: {e}")
def import_us_giga_price(self):
"""导入美国GIGA价格"""
print("导入美国GIGA价格...")
df = pd.read_excel(self.data_dir / "GIGA base_fee_20240607223514.xlsx", sheet_name="Local Fee Data")
data = []
for _, row in df.iterrows():
zip_code = str(int(row["Zip Code"])) if pd.notna(row["Zip Code"]) else ""
delivery_warehouse = str(row["Delivery Warehouse"]) if pd.notna(row["Delivery Warehouse"]) else ""
general_area = str(row["General Area"]) if pd.notna(row["General Area"]) else ""
fee_type = str(row["Fee Type"]) if pd.notna(row["Fee Type"]) else ""
zone = str(row["Zone"]) if pd.notna(row["Zone"]) else ""
local_pickup_fee = float(row["Local Pickup Fee"]) if pd.notna(row["Local Pickup Fee"]) else 0
warehouse_handling_fee = float(row["Warehouse Handling Fee"]) if pd.notna(row["Warehouse Handling Fee"]) else 0
delivery_fee_rate = float(row["Delivery Fee Rate"]) if pd.notna(row["Delivery Fee Rate"]) else 0
additional_delivery_fee = float(row["Additional Delivery Fee"]) if pd.notna(row["Additional Delivery Fee"]) else 0
assembly_fee = float(row["Assembly Fee"]) if pd.notna(row["Assembly Fee"]) else 0
data.append((zip_code, delivery_warehouse, general_area, fee_type, zone,
local_pickup_fee, warehouse_handling_fee, delivery_fee_rate,
additional_delivery_fee, assembly_fee))
sql = """INSERT INTO us_giga_price
(zip_code, delivery_warehouse, general_area, fee_type, zone,
local_pickup_fee, warehouse_handling_fee, delivery_fee_rate,
additional_delivery_fee, assembly_fee)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录")
def import_us_ceva_price(self):
"""导入美国CEVA价格"""
print("导入美国CEVA价格...")
# CEVA base rate
df = pd.read_excel(self.data_dir / "CEVA.xlsx", sheet_name="ceva_base_rate")
df.columns = ["ceva_weight"] + list(df.columns[1:])
data = []
for _, row in df.iterrows():
ceva_weight = row["ceva_weight"]
if pd.isna(ceva_weight):
continue
row_data = [ceva_weight]
for col in df.columns[1:]:
val = row[col]
row_data.append(float(val) if pd.notna(val) else 0)
data.append(tuple(row_data))
sql = """INSERT INTO us_ceva_price
(ceva_weight, zone_ca, zone_wa, zone_or, zone_nv, zone_az,
zone_co, zone_ut, zone_nm, remote_area_surcharge)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录 (ceva_base_rate)")
# CEVA remote zone
df = pd.read_excel(self.data_dir / "CEVA.xlsx", sheet_name="remote_zone")
df.columns = ["postal_code", "state", "beyond_zone", "remote_type"]
data = []
for _, row in df.iterrows():
postal_code = str(int(row["postal_code"])) if pd.notna(row["postal_code"]) else ""
state = str(row["state"]) if pd.notna(row["state"]) else ""
beyond_zone = str(row["beyond_zone"]) if pd.notna(row["beyond_zone"]) else ""
remote_type = str(row["remote_type"]) if pd.notna(row["remote_type"]) else "standard"
data.append((postal_code, state, beyond_zone, remote_type))
sql = "INSERT IGNORE INTO us_ceva_zone (postal_code, state, beyond_zone, remote_type) VALUES (%s, %s, %s, %s)"
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录 (remote_zone)")
def import_us_metro_price(self):
"""导入美国Metro价格"""
print("导入美国Metro价格...")
for sheet_name in ["cuft_25", "cuft_35", "over35_per_cuft", "over35_min"]:
try:
df = pd.read_excel(self.data_dir / "Metro.xlsx", sheet_name=sheet_name)
if "Origins" not in df.columns:
continue
data = []
for _, row in df.iterrows():
origins = str(row["Origins"]) if pd.notna(row["Origins"]) else ""
row_data = [origins]
for col in df.columns[1:]:
val = row[col]
row_data.append(float(val) if pd.notna(val) else 0)
data.append(tuple(row_data))
cols = ", ".join([f"zone_{i}l" for i in range(1, 10)])
sql = f"INSERT INTO us_metro_price (origins, {cols}) VALUES (%s, {cols.replace('zone_', 'zone_')})"
# 简化处理
sql = "INSERT INTO us_metro_price (origins, zone_1l, zone_2l, zone_3l, zone_4l, zone_5l, zone_6l, zone_7l, zone_8l, zone_9l) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录 ({sheet_name})")
except Exception as e:
print(f" 跳过 {sheet_name}: {e}")
# Metro zone table
try:
df = pd.read_excel(self.data_dir / "Metro.xlsx", sheet_name="zone_table")
df.columns = ["zip_code", "new_zone_name"]
data = []
for _, row in df.iterrows():
zip_code = str(int(row["zip_code"])) if pd.notna(row["zip_code"]) else ""
zone = str(row["new_zone_name"]) if pd.notna(row["new_zone_name"]) else ""
data.append((zip_code, zone))
sql = "INSERT IGNORE INTO us_metro_zone (zip_code, new_zone_name) VALUES (%s, %s)"
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录 (zone_table)")
except Exception as e:
print(f" zone_table: {e}")
def import_us_xmiles_zone(self):
"""导入美国XMILES邮编"""
print("导入美国XMILES邮编...")
df = pd.read_excel(self.data_dir / "XMILES.xlsx", sheet_name="postcode_table")
# 处理可能的列名问题
cols = df.columns.tolist()
if len(cols) >= 2:
df.columns = ["postcode", "area"]
data = []
for _, row in df.iterrows():
postcode = str(int(row["postcode"])) if pd.notna(row["postcode"]) else ""
area = str(row["area"]) if pd.notna(row["area"]) else ""
data.append((postcode, area))
sql = "INSERT IGNORE INTO us_xmiles_zone (postcode, area) VALUES (%s, %s)"
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录")
def import_us_am_price(self):
"""导入美国AM卡派价格"""
print("导入美国AM卡派价格...")
# price表
df = pd.read_excel(self.data_dir / "美国卡派-AM.xlsx", sheet_name="price")
df.columns = ["pu_zone", "dl_zone", "zone_combo", "minimum", "maximum",
"fee_without_sc", "shipping_cost", "internalid", "externalid", "surcharge"]
data = []
for _, row in df.iterrows():
pu_zone = str(row["pu_zone"]) if pd.notna(row["pu_zone"]) else ""
dl_zone = str(row["dl_zone"]) if pd.notna(row["dl_zone"]) else ""
zone_combo = str(row["zone_combo"]) if pd.notna(row["zone_combo"]) else ""
minimum = float(row["minimum"]) if pd.notna(row["minimum"]) else 0
maximum = float(row["maximum"]) if pd.notna(row["maximum"]) else 0
fee_without_sc = float(row["fee_without_sc"]) if pd.notna(row["fee_without_sc"]) else 0
shipping_cost = float(row["shipping_cost"]) if pd.notna(row["shipping_cost"]) else 0
surcharge = float(row["surcharge"]) if pd.notna(row["surcharge"]) else 0
data.append((pu_zone, dl_zone, zone_combo, minimum, maximum,
fee_without_sc, shipping_cost, surcharge))
sql = """INSERT INTO us_am_price
(pu_zone, dl_zone, zone_combo, minimum_weight, maximum_weight,
fee_without_sc, shipping_cost, surcharge)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"""
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录 (price)")
# postcode表
df = pd.read_excel(self.data_dir / "美国卡派-AM.xlsx", sheet_name="postcode_table")
df.columns = ["zip_code", "zone"]
data = []
for _, row in df.iterrows():
zip_code = str(int(row["zip_code"])) if pd.notna(row["zip_code"]) else ""
zone = str(row["zone"]) if pd.notna(row["zone"]) else ""
data.append((zip_code, zone))
sql = "INSERT IGNORE INTO us_am_postcode (zip_code, zone) VALUES (%s, %s)"
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录 (postcode_table)")
# ==================== 澳洲数据导入 ====================
def import_au_eparcel_price(self):
"""导入澳洲eparcel价格"""
print("导入澳洲eparcel价格...")
df = pd.read_excel(self.data_dir / "澳洲三大渠道.xlsx", sheet_name="eparcel")
cols = ["post"] + [str(c) for c in df.columns[1:]]
df.columns = cols
data = []
for _, row in df.iterrows():
post = str(row["post"]) if pd.notna(row["post"]) else ""
row_data = [post]
for col in cols[1:]:
val = row.get(col, 0)
row_data.append(float(val) if pd.notna(val) else 0)
data.append(tuple(row_data))
sql = """INSERT INTO au_eparcel_price
(post, weight_0_5, weight_1, weight_2, weight_3, weight_4,
weight_5, weight_7, weight_10, weight_15)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录")
def import_au_all(self):
"""导入澳洲toll和allied数据"""
print("导入澳洲toll和allied数据...")
# toll
try:
df = pd.read_excel(self.data_dir / "澳洲三大渠道.xlsx", sheet_name="toll")
df.columns = ["post", "zone_1", "zone_2", "zone_3", "zone_4"]
data = []
for _, row in df.iterrows():
post = str(row["post"]) if pd.notna(row["post"]) else ""
data.append((post,
float(row["zone_1"]) if pd.notna(row["zone_1"]) else 0,
float(row["zone_2"]) if pd.notna(row["zone_2"]) else 0,
float(row["zone_3"]) if pd.notna(row["zone_3"]) else 0,
float(row["zone_4"]) if pd.notna(row["zone_4"]) else 0))
sql = "INSERT INTO au_toll_price (post, zone_1, zone_2, zone_3, zone_4) VALUES (%s, %s, %s, %s, %s)"
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录 (toll)")
except Exception as e:
print(f" toll: {e}")
# allied
try:
df = pd.read_excel(self.data_dir / "澳洲三大渠道.xlsx", sheet_name="allied")
df.columns = ["post", "zone_1", "zone_2", "zone_3", "zone_4"]
data = []
for _, row in df.iterrows():
post = str(row["post"]) if pd.notna(row["post"]) else ""
data.append((post,
float(row["zone_1"]) if pd.notna(row["zone_1"]) else 0,
float(row["zone_2"]) if pd.notna(row["zone_2"]) else 0,
float(row["zone_3"]) if pd.notna(row["zone_3"]) else 0,
float(row["zone_4"]) if pd.notna(row["zone_4"]) else 0))
sql = "INSERT INTO au_allied_price (post, zone_1, zone_2, zone_3, zone_4) VALUES (%s, %s, %s, %s, %s)"
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录 (allied)")
except Exception as e:
print(f" allied: {e}")
# ==================== 欧洲数据导入 ====================
def import_eur_dhl_price(self):
"""导入欧洲DHL价格"""
print("导入欧洲DHL价格...")
try:
df = pd.read_excel(self.data_dir / "欧洲卡派.xlsx", sheet_name="DHL卡派IP报价")
df.columns = ["type", "country", "postalcode", "country_postalcode",
"ip_1", "ip_2", "ip_3", "ip_4", "ip_5", "ip_6"]
data = []
for _, row in df.iterrows():
price_type = str(row["type"]) if pd.notna(row["type"]) else ""
country = str(row["country"]) if pd.notna(row["country"]) else ""
postalcode = str(row["postalcode"]) if pd.notna(row["postalcode"]) else ""
data.append((price_type, country, postalcode,
float(row["ip_1"]) if pd.notna(row["ip_1"]) else 0,
float(row["ip_2"]) if pd.notna(row["ip_2"]) else 0,
float(row["ip_3"]) if pd.notna(row["ip_3"]) else 0,
float(row["ip_4"]) if pd.notna(row["ip_4"]) else 0,
float(row["ip_5"]) if pd.notna(row["ip_5"]) else 0,
float(row["ip_6"]) if pd.notna(row["ip_6"]) else 0))
sql = """INSERT INTO eur_dhl_price
(price_type, country, postalcode, ip_1, ip_2, ip_3, ip_4, ip_5, ip_6)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)"""
self.execute_many(sql, data)
print(f" 已导入 {len(data)} 条记录")
except Exception as e:
print(f" 欧洲DHL: {e}")
# ==================== 主函数 ====================
def import_all(self):
"""导入所有数据"""
print("开始导入数据...")
# 英国
self.import_uk_postcode_zone()
self.import_uk_kp_nv_price()
# 美国
self.import_us_fedex_pp_price()
self.import_us_fedex_kh_price()
self.import_us_fedex_price()
self.import_us_giga_price()
self.import_us_ceva_price()
self.import_us_metro_price()
self.import_us_xmiles_zone()
self.import_us_am_price()
# 澳洲
self.import_au_eparcel_price()
self.import_au_all()
# 欧洲
self.import_eur_dhl_price()
print("\n数据导入完成!")
if __name__ == "__main__":
importer = DataImporter()
importer.import_all()

View File

@ -1,11 +1,37 @@
# 英国海运订单费用,返回单个sku的订单费用和订单类型 # 英国海运订单费用,返回单个sku的订单费用和订单类型
def uk_ocean_order_price(packages,k): import sys
sys.path.append(r'D:\workspace\dags\logistics')
import pandas as pd
import math
from utils.Package import Package, Package_group
import re
def uk_ocean_order_price(packages_dict_str,k):
""" """
入参:packages的类型是Package_group,里面包含多个Package,这是一个类,这个类今天在群里发过 入参:packages的类型是Package_group,里面包含多个Package,这是一个类,这个类今天在群里发过
入参:k是物流分摊费,也就是k 入参:k是物流分摊费,也就是k
出参: 订单物流费订单类型 出参: 订单物流费订单类型
""" """
packages = Package_group()
def extract_number(value):
# 提取字符串中的第一个数字
match = re.search(r"[-+]?\d*\.\d+|\d+", str(value))
return float(match.group()) if match else 0.0
packages_dict = eval(packages_dict_str)
if len(packages_dict) == 0:
return (0,0)
for key, package in packages_dict.items():
package[''] = extract_number(package[''])
package[''] = extract_number(package[''])
package[''] = extract_number(package[''])
package['重量'] = extract_number(package['重量'])
if package[''] == 0 or package[''] == 0 or package[''] == 0 or package['重量'] == 0:
return (0,0)
packages.add_package(Package(key,package[''], package[''], package[''], package['重量']))
# 计算uk经济直达费用 # 计算uk经济直达费用
order_fee = 0 order_fee = 0
express_fee = 0 express_fee = 0
@ -20,7 +46,7 @@ def uk_ocean_order_price(packages,k):
base_fee = 3.7/0.359*1.3-k-0.8*package.get_volume_weight(6000) base_fee = 3.7/0.359*1.3-k-0.8*package.get_volume_weight(6000)
else: else:
base_fee = 999999 base_fee = 999999
if package.fst_size >=100 and package.sed_size >=60 and package.weight >=30000: if package.fst_size >=100 or package.sed_size >=60 or package.weight >=30000:
other_fee1 =45 other_fee1 =45
order_type1 += '大包裹' order_type1 += '大包裹'
@ -44,4 +70,45 @@ def uk_ocean_order_price(packages,k):
else: else:
order_fee = ltl_fee order_fee = ltl_fee
order_type = order_type2 order_type = order_type2
return max(round(order_fee,2),2), order_type return max(round(order_fee,2),2), order_type
if __name__ == '__main__':
import sys
sys.path.append(r'D:\workspace\dags\logistics')
sql = """SELECT
t1.*,
t2.`物流分摊`
FROM
`dim_erp_sku_package_vol_info`t1 left join ods.stg_bayshop_litfad_sku t2 on t1.erp_sku = t2.SKU
where id >=%s AND id <=%s
AND uk_price IS NULL
AND `物流分摊` IS NOT NULL
"""
from utils.gtools import MySQLconnect
from tqdm import tqdm
import pandas as pd
with MySQLconnect('dwd') as db:
for i in tqdm(range(0,150)):
# count=0
print(i,"开始")
dfsql = sql % (i*100000, (i+1)*100000-1)
df = pd.read_sql(dfsql, db.engine())
if len(df) == 0:
continue
df[['us_price','logitcs_type']] = df.apply(lambda x: uk_ocean_order_price(x['erp_package_vol'],x['物流分摊']), axis=1, result_type='expand')
pd.io.sql.to_sql(df, "temp_update",db.eng, if_exists='replace', index=False )
#添加主键ID
modifysql = """ALTER TABLE `temp_update` ADD PRIMARY KEY (`id`)
"""
db.cur.execute(modifysql)
# se = [tuple([round(x['us_price'],2),x['id']]) for y,x in df.iterrows()]
update_sql = """
UPDATE dim_erp_sku_package_vol_info AS target
JOIN temp_update AS src
ON target.id = src.id -- 根据主键关联
SET target.uk_price = src.us_price;"""
# db.cur.executemany(update_sql, se)
db.cur.execute(update_sql)
db.con.commit()
print(i,"结束")

View File

@ -1,6 +1,11 @@
import sys
sys.path.append(r'D:\workspace\dags\logistics')
import pandas as pd import pandas as pd
import math import math
express_price = pd.read_excel(r'D:\test\logistics\data\售价尾端价格.xlsx', sheet_name='Sheet1') from utils.Package import Package, Package_group
import re
express_price = pd.read_excel(r'D:\workspace\dags\logistics\data\售价尾端价格.xlsx', sheet_name='Sheet1')
key_column = express_price.iloc[:, 8] # 第 I 列 key_column = express_price.iloc[:, 8] # 第 I 列
value_column = express_price.iloc[:, 9] # 第 J 列 value_column = express_price.iloc[:, 9] # 第 J 列
small_column = express_price.iloc[:, 10] # 第 K 列 small_column = express_price.iloc[:, 10] # 第 K 列
@ -9,7 +14,26 @@ air_small_dict = dict(zip(key_column, small_column))
air_big_dict = dict(zip(key_column, big_column)) air_big_dict = dict(zip(key_column, big_column))
# 转换成字典 # 转换成字典
ocean_price_dict = dict(zip(key_column, value_column)) ocean_price_dict = dict(zip(key_column, value_column))
def ocean_order_price(packages): def ocean_order_price(packages_dict_str):
packages = Package_group()
def extract_number(value):
# 提取字符串中的第一个数字
match = re.search(r"[-+]?\d*\.\d+|\d+", str(value))
return float(match.group()) if match else 0.0
packages_dict = eval(packages_dict_str)
if len(packages_dict) == 0:
return (0,0)
for key, package in packages_dict.items():
package[''] = extract_number(package[''])
package[''] = extract_number(package[''])
package[''] = extract_number(package[''])
package['重量'] = extract_number(package['重量'])
if package[''] == 0 or package[''] == 0 or package[''] == 0 or package['重量'] == 0:
return (0,0)
packages.add_package(Package(key,package[''], package[''], package[''], package['重量']))
express_fee = 0 # 快递基础费 express_fee = 0 # 快递基础费
long_fee = 0 # 超长费 long_fee = 0 # 超长费
weight_fee = 0 # 超重费 weight_fee = 0 # 超重费
@ -19,7 +43,8 @@ def ocean_order_price(packages):
express_type_length = '' express_type_length = ''
for package in packages: for package in packages:
for key, value in ocean_price_dict.items(): for key, value in ocean_price_dict.items():
if package.weight <=key:
if max(package.get_volume_weight(8500)*1000, package.weight) <=key:
express_fee+=value express_fee+=value
break break
if package.fst_size>=116 or package.sed_size>=71 or package.girth>=251: if package.fst_size>=116 or package.sed_size>=71 or package.girth>=251:
@ -32,6 +57,7 @@ def ocean_order_price(packages):
if package.fst_size>=238 or package.girth>=315: if package.fst_size>=238 or package.girth>=315:
big_fee+=61.6 big_fee+=61.6
express_type_length ="大包裹" express_type_length ="大包裹"
express_fee = 9999999 if express_fee ==0 else express_fee
express_fee = express_fee + long_fee + weight_fee + big_fee express_fee = express_fee + long_fee + weight_fee + big_fee
express_type = express_type_length + express_type_weight express_type = express_type_length + express_type_weight
@ -127,4 +153,34 @@ def air_order_price(packages):
else: else:
express_fee+=(((min(max(package.density,37),337)*0.093+27.7-1.08)/6+0.65-1.06)*package.get_volume_weight(8500))/0.45+price express_fee+=(((min(max(package.density,37),337)*0.093+27.7-1.08)/6+0.65-1.06)*package.get_volume_weight(8500))/0.45+price
express_type='FEDEX' express_type='FEDEX'
return express_fee, express_type return express_fee, express_type
if __name__ == '__main__':
sql = "SELECT * FROM `dim_erp_sku_package_vol_info` where id >=%s AND id <=%s AND logis_type IS NULL"
from utils.gtools import MySQLconnect
from tqdm import tqdm
with MySQLconnect('dwd') as db:
for i in tqdm(range(1,150)):
# count=0
print(i,"开始")
dfsql = sql % (i*100000, (i+1)*100000-1)
df = pd.read_sql(dfsql, db.engine())
if len(df) == 0:
continue
df[['us_price','logitcs_type']] = df.apply(lambda x: ocean_order_price(x['erp_package_vol']), axis=1, result_type='expand')
pd.io.sql.to_sql(df, "temp_update",db.eng, if_exists='replace', index=False )
#添加主键ID
modifysql = """ALTER TABLE `temp_update` ADD PRIMARY KEY (`id`)
"""
db.cur.execute(modifysql)
# se = [tuple([round(x['us_price'],2),x['id']]) for y,x in df.iterrows()]
update_sql = """
UPDATE dim_erp_sku_package_vol_info AS target
JOIN temp_update AS src
ON target.id = src.id -- 根据主键关联
SET target.logis_type = src.logitcs_type;"""
# db.cur.executemany(update_sql, se)
db.cur.execute(update_sql)
db.con.commit()
print(i,"结束")

337
sql/create_tables.sql Normal file
View File

@ -0,0 +1,337 @@
-- 物流费用计算系统数据库表结构
-- 创建数据库: logistics
-- ============================================
-- 物流公司配置表
-- ============================================
CREATE TABLE IF NOT EXISTS logistics_company (
id INT AUTO_INCREMENT PRIMARY KEY,
company_code VARCHAR(50) NOT NULL COMMENT '公司代码',
company_name VARCHAR(100) NOT NULL COMMENT '公司名称',
country VARCHAR(10) NOT NULL COMMENT '国家代码: US, UK, AU, DE等',
logistics_type VARCHAR(20) NOT NULL COMMENT '物流类型: EXPRESS, COURIER, OCEAN, AIR',
port VARCHAR(20) DEFAULT 'DEFAULT' COMMENT '港口: DEFAULT, WEST, EAST等',
currency VARCHAR(10) DEFAULT 'USD' COMMENT '货币',
active TINYINT DEFAULT 1 COMMENT '是否启用: 0-禁用, 1-启用',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
UNIQUE KEY uk_company_code (company_code)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='物流公司配置表';
-- ============================================
-- 英国物流价格表
-- ============================================
-- 英国卡派-分区表
CREATE TABLE IF NOT EXISTS uk_postcode_zone (
id INT AUTO_INCREMENT PRIMARY KEY,
postcode_prefix VARCHAR(10) NOT NULL COMMENT '邮编前缀',
zone VARCHAR(10) NOT NULL COMMENT '分区',
is_remote TINYINT DEFAULT 0 COMMENT '是否偏远: 0-否, 1-是',
UNIQUE KEY uk_postcode (postcode_prefix)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='英国邮编分区表';
-- 英国卡派-运费表
CREATE TABLE IF NOT EXISTS uk_kp_nv_price (
id INT AUTO_INCREMENT PRIMARY KEY,
zone VARCHAR(10) NOT NULL COMMENT '分区',
tuopan INT NOT NULL COMMENT '托盘数',
fee DECIMAL(10,2) NOT NULL COMMENT '运费',
INDEX idx_zone (zone)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='英国卡派NV运费表';
-- ============================================
-- 美国物流价格表
-- ============================================
-- 美国邮编分区表
CREATE TABLE IF NOT EXISTS us_postcode_zone (
id INT AUTO_INCREMENT PRIMARY KEY,
postcode VARCHAR(5) NOT NULL COMMENT '邮编(5位)',
port VARCHAR(20) DEFAULT 'west' COMMENT '港口: west, east',
zone VARCHAR(10) COMMENT '分区',
remote_type INT DEFAULT 0 COMMENT '偏远类型: 0-非偏远, 1-偏远, 2-超偏远, 3-超超偏远',
INDEX idx_postcode (postcode)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='美国邮编分区表';
-- 美国快递-邮差小马 价格表
CREATE TABLE IF NOT EXISTS us_fedex_pp_price (
id INT AUTO_INCREMENT PRIMARY KEY,
lbs INT NOT NULL COMMENT '重量(磅)',
zone_2 DECIMAL(10,2) COMMENT '2区价格',
zone_3 DECIMAL(10,2) COMMENT '3区价格',
zone_4 DECIMAL(10,2) COMMENT '4区价格',
zone_5 DECIMAL(10,2) COMMENT '5区价格',
zone_6 DECIMAL(10,2) COMMENT '6区价格',
zone_7 DECIMAL(10,2) COMMENT '7区价格',
zone_8 DECIMAL(10,2) COMMENT '8区价格',
INDEX idx_lbs (lbs)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='美国Fedex邮差小马价格表';
-- 美国快递-金宏亚 价格表
CREATE TABLE IF NOT EXISTS us_fedex_kh_price (
id INT AUTO_INCREMENT PRIMARY KEY,
lbs INT NOT NULL COMMENT '重量(磅)',
zone_2 DECIMAL(10,2) COMMENT '2区价格',
zone_3 DECIMAL(10,2) COMMENT '3区价格',
zone_4 DECIMAL(10,2) COMMENT '4区价格',
zone_5 DECIMAL(10,2) COMMENT '5区价格',
zone_6 DECIMAL(10,2) COMMENT '6区价格',
zone_7 DECIMAL(10,2) COMMENT '7区价格',
zone_8 DECIMAL(10,2) COMMENT '8区价格',
INDEX idx_lbs (lbs)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='美国Fedex金宏亚价格表';
-- 美国快递-FEDEX 价格表
CREATE TABLE IF NOT EXISTS us_fedex_price (
id INT AUTO_INCREMENT PRIMARY KEY,
lbs INT NOT NULL COMMENT '重量(磅)',
zone_2 DECIMAL(10,2) COMMENT '2区价格',
zone_3 DECIMAL(10,2) COMMENT '3区价格',
zone_4 DECIMAL(10,2) COMMENT '4区价格',
zone_5 DECIMAL(10,2) COMMENT '5区价格',
zone_6 DECIMAL(10,2) COMMENT '6区价格',
zone_7 DECIMAL(10,2) COMMENT '7区价格',
zone_8 DECIMAL(10,2) COMMENT '8区价格',
INDEX idx_lbs (lbs)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='美国FEDEX价格表';
-- 美国卡派-GIGA 价格表
CREATE TABLE IF NOT EXISTS us_giga_price (
id INT AUTO_INCREMENT PRIMARY KEY,
zip_code VARCHAR(5) NOT NULL COMMENT '邮编',
delivery_warehouse VARCHAR(50) COMMENT '仓库',
general_area VARCHAR(50) COMMENT '地区',
fee_type VARCHAR(20) COMMENT '费用类型',
zone VARCHAR(10) COMMENT '分区',
local_pickup_fee DECIMAL(10,2) COMMENT '本地取货费',
warehouse_handling_fee DECIMAL(10,2) COMMENT '仓库操作费',
delivery_fee_rate DECIMAL(10,2) COMMENT '配送费率',
additional_delivery_fee DECIMAL(10,2) COMMENT '额外配送费',
assembly_fee DECIMAL(10,2) COMMENT '装配费',
INDEX idx_zip (zip_code)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='美国GIGA价格表';
-- 美国卡派-CEVA 价格表
CREATE TABLE IF NOT EXISTS us_ceva_price (
id INT AUTO_INCREMENT PRIMARY KEY,
ceva_weight INT NOT NULL COMMENT 'CEVA重量',
zone_ca DECIMAL(10,2) COMMENT 'CA区价格',
zone_wa DECIMAL(10,2) COMMENT 'WA区价格',
zone_or DECIMAL(10,2) COMMENT 'OR区价格',
zone_nv DECIMAL(10,2) COMMENT 'NV区价格',
zone_az DECIMAL(10,2) COMMENT 'AZ区价格',
zone_co DECIMAL(10,2) COMMENT 'CO区价格',
zone_ut DECIMAL(10,2) COMMENT 'UT区价格',
zone_nm DECIMAL(10,2) COMMENT 'NM区价格',
remote_area_surcharge DECIMAL(10,2) COMMENT '偏远地区附加费',
INDEX idx_weight (ceva_weight)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='美国CEVA价格表';
-- 美国卡派-CEVA 邮编分区表
CREATE TABLE IF NOT EXISTS us_ceva_zone (
id INT AUTO_INCREMENT PRIMARY KEY,
postal_code VARCHAR(5) NOT NULL COMMENT '邮编',
state VARCHAR(20) COMMENT '',
beyond_zone VARCHAR(10) COMMENT '超出分区',
remote_type VARCHAR(20) DEFAULT 'standard' COMMENT '偏远类型: standard, remote',
INDEX idx_postal (postal_code)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='美国CEVA邮编分区表';
-- 美国卡派-CEVA 分区表
CREATE TABLE IF NOT EXISTS us_ceva_zone_grade (
id INT AUTO_INCREMENT PRIMARY KEY,
to_state VARCHAR(20) NOT NULL COMMENT '目的地州',
ca DECIMAL(10,2) COMMENT 'CA分区',
wa DECIMAL(10,2) COMMENT 'WA分区',
INDEX idx_state (to_state)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='美国CEVA分区等级表';
-- 美国卡派-Metro 价格表
CREATE TABLE IF NOT EXISTS us_metro_price (
id INT AUTO_INCREMENT PRIMARY KEY,
origins VARCHAR(20) NOT NULL COMMENT '出发地',
zone_1l DECIMAL(10,2) COMMENT 'Zone 1L价格',
zone_2l DECIMAL(10,2) COMMENT 'Zone 2L价格',
zone_3l DECIMAL(10,2) COMMENT 'Zone 3L价格',
zone_4l DECIMAL(10,2) COMMENT 'Zone 4L价格',
zone_5l DECIMAL(10,2) COMMENT 'Zone 5L价格',
zone_6l DECIMAL(10,2) COMMENT 'Zone 6L价格',
zone_7l DECIMAL(10,2) COMMENT 'Zone 7L价格',
zone_8l DECIMAL(10,2) COMMENT 'Zone 8L价格',
zone_9l DECIMAL(10,2) COMMENT 'Zone 9L价格',
INDEX idx_origins (origins)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='美国Metro价格表';
-- 美国卡派-Metro 邮编分区表
CREATE TABLE IF NOT EXISTS us_metro_zone (
id INT AUTO_INCREMENT PRIMARY KEY,
zip_code VARCHAR(5) NOT NULL COMMENT '邮编',
new_zone_name VARCHAR(20) COMMENT '新分区名',
INDEX idx_zip (zip_code)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='美国Metro邮编分区表';
-- 美国卡派-Metro 偏远表
CREATE TABLE IF NOT EXISTS us_metro_remote (
id INT AUTO_INCREMENT PRIMARY KEY,
zip_code VARCHAR(5) NOT NULL COMMENT '邮编',
area_type VARCHAR(50) COMMENT '区域类型',
INDEX idx_zip (zip_code)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='美国Metro偏远表';
-- 美国卡派-XMILES 邮编表
CREATE TABLE IF NOT EXISTS us_xmiles_zone (
id INT AUTO_INCREMENT PRIMARY KEY,
postcode VARCHAR(5) NOT NULL COMMENT '邮编',
area VARCHAR(20) COMMENT '地区',
INDEX idx_postcode (postcode)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='美国XMILES邮编分区表';
-- 美国卡派-AM 价格表
CREATE TABLE IF NOT EXISTS us_am_price (
id INT AUTO_INCREMENT PRIMARY KEY,
pu_zone VARCHAR(5) NOT NULL COMMENT 'PU区',
dl_zone VARCHAR(5) NOT NULL COMMENT 'DL区',
zone_combo VARCHAR(10) NOT NULL COMMENT '分区组合',
minimum_weight DECIMAL(10,2) NOT NULL COMMENT '最小重量',
maximum_weight DECIMAL(10,2) NOT NULL COMMENT '最大重量',
fee_without_sc DECIMAL(10,2) COMMENT '无附加费价格',
shipping_cost DECIMAL(10,2) COMMENT '运费',
surcharge DECIMAL(10,2) DEFAULT 0 COMMENT '附加费',
INDEX idx_zone_combo (zone_combo)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='美国AM卡派价格表';
-- 美国卡派-AM 邮编表
CREATE TABLE IF NOT EXISTS us_am_postcode (
id INT AUTO_INCREMENT PRIMARY KEY,
zip_code VARCHAR(5) NOT NULL COMMENT '邮编',
zone VARCHAR(5) NOT NULL COMMENT '分区',
INDEX idx_zip (zip_code)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='美国AM卡派邮编表';
-- ============================================
-- 澳洲物流价格表
-- ============================================
-- 澳洲eparcel价格表
CREATE TABLE IF NOT EXISTS au_eparcel_price (
id INT AUTO_INCREMENT PRIMARY KEY,
post VARCHAR(20) NOT NULL COMMENT '邮寄方式',
weight_0_5 DECIMAL(10,2) COMMENT '0.5kg价格',
weight_1 DECIMAL(10,2) COMMENT '1kg价格',
weight_2 DECIMAL(10,2) COMMENT '2kg价格',
weight_3 DECIMAL(10,2) COMMENT '3kg价格',
weight_4 DECIMAL(10,2) COMMENT '4kg价格',
weight_5 DECIMAL(10,2) COMMENT '5kg价格',
weight_7 DECIMAL(10,2) COMMENT '7kg价格',
weight_10 DECIMAL(10,2) COMMENT '10kg价格',
weight_15 DECIMAL(10,2) COMMENT '15kg价格',
INDEX idx_post (post)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='澳洲eparcel价格表';
-- 澳洲eparcel邮编表
CREATE TABLE IF NOT EXISTS au_eparcel_zone (
id INT AUTO_INCREMENT PRIMARY KEY,
postcode VARCHAR(10) NOT NULL COMMENT '邮编',
zone VARCHAR(10) COMMENT '分区',
INDEX idx_postcode (postcode)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='澳洲eparcel邮编表';
-- 澳洲toll价格表
CREATE TABLE IF NOT EXISTS au_toll_price (
id INT AUTO_INCREMENT PRIMARY KEY,
post VARCHAR(20) NOT NULL COMMENT '邮寄方式',
zone_1 DECIMAL(10,2) COMMENT '1区价格',
zone_2 DECIMAL(10,2) COMMENT '2区价格',
zone_3 DECIMAL(10,2) COMMENT '3区价格',
zone_4 DECIMAL(10,2) COMMENT '4区价格',
INDEX idx_post (post)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='澳洲toll价格表';
-- 澳洲toll邮编表
CREATE TABLE IF NOT EXISTS au_toll_zone (
id INT AUTO_INCREMENT PRIMARY KEY,
postcode VARCHAR(10) NOT NULL COMMENT '邮编',
zone VARCHAR(10) COMMENT '分区',
INDEX idx_postcode (postcode)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='澳洲toll邮编表';
-- 澳洲toll偏远表
CREATE TABLE IF NOT EXISTS au_toll_remote (
id INT AUTO_INCREMENT PRIMARY KEY,
postcode VARCHAR(10) NOT NULL COMMENT '邮编',
remote_type VARCHAR(20) COMMENT '偏远类型',
INDEX idx_postcode (postcode)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='澳洲toll偏远表';
-- 澳洲allied价格表
CREATE TABLE IF NOT EXISTS au_allied_price (
id INT AUTO_INCREMENT PRIMARY KEY,
post VARCHAR(20) NOT NULL COMMENT '邮寄方式',
zone_1 DECIMAL(10,2) COMMENT '1区价格',
zone_2 DECIMAL(10,2) COMMENT '2区价格',
zone_3 DECIMAL(10,2) COMMENT '3区价格',
zone_4 DECIMAL(10,2) COMMENT '4区价格',
INDEX idx_post (post)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='澳洲allied价格表';
-- 澳洲allied邮编表
CREATE TABLE IF NOT EXISTS au_allied_zone (
id INT AUTO_INCREMENT PRIMARY KEY,
postcode VARCHAR(10) NOT NULL COMMENT '邮编',
zone VARCHAR(10) COMMENT '分区',
remote_zone VARCHAR(10) COMMENT '偏远分区',
INDEX idx_postcode (postcode)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='澳洲allied邮编表';
-- 澳洲allied偏远表
CREATE TABLE IF NOT EXISTS au_allied_remote (
id INT AUTO_INCREMENT PRIMARY KEY,
postcode VARCHAR(10) NOT NULL COMMENT '邮编',
remote_type VARCHAR(20) COMMENT '偏远类型',
INDEX idx_postcode (postcode)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='澳洲allied偏远表';
-- ============================================
-- 欧洲物流价格表
-- ============================================
-- 欧洲卡派-DHL价格表
CREATE TABLE IF NOT EXISTS eur_dhl_price (
id INT AUTO_INCREMENT PRIMARY KEY,
price_type VARCHAR(20) NOT NULL COMMENT '价格类型',
country VARCHAR(50) NOT NULL COMMENT '国家',
postalcode VARCHAR(10) COMMENT '邮编',
ip_1 DECIMAL(10,2) COMMENT '1IP价格',
ip_2 DECIMAL(10,2) COMMENT '2IP价格',
ip_3 DECIMAL(10,2) COMMENT '3IP价格',
ip_4 DECIMAL(10,2) COMMENT '4IP价格',
ip_5 DECIMAL(10,2) COMMENT '5IP价格',
ip_6 DECIMAL(10,2) COMMENT '6IP价格',
INDEX idx_country (country)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='欧洲DHL卡派价格表';
-- ============================================
-- 物流公司配置数据
-- ============================================
INSERT INTO logistics_company (company_code, company_name, country, logistics_type, port, currency) VALUES
-- 英国
('UK_DPD', '智谷-DPD', 'UK', 'EXPRESS', 'DEFAULT', 'GBP'),
('UK_BIG', '智谷-大件', 'UK', 'COURIER', 'DEFAULT', 'GBP'),
('UK_KPZG', '海GB-卡派', 'UK', 'COURIER', 'DEFAULT', 'GBP'),
('UK_KPNV', '卡派-NV', 'UK', 'COURIER', 'DEFAULT', 'GBP'),
-- 美国
('US_FEDEX_PP', 'Fedex-邮差小马', 'US', 'EXPRESS', 'WEST', 'USD'),
('US_FEDEX_KH', 'Fedex-金宏亚', 'US', 'EXPRESS', 'WEST', 'USD'),
('US_FEDEX_HOME', 'Fedex-HOME', 'US', 'EXPRESS', 'WEST', 'USD'),
('US_FEDEX_GROUND', 'Fedex-GROUND', 'US', 'EXPRESS', 'WEST', 'USD'),
('US_GIGA', '大健-GIGA', 'US', 'COURIER', 'DEFAULT', 'USD'),
('US_CEVA', '大健-CEVA', 'US', 'COURIER', 'DEFAULT', 'USD'),
('US_METRO', 'Metro-SAIR', 'US', 'COURIER', 'DEFAULT', 'USD'),
('US_XMILES', 'XMILES-SAIR', 'US', 'COURIER', 'DEFAULT', 'USD'),
('US_AM_WEST', 'AM-美西', 'US', 'COURIER', 'WEST', 'USD'),
('US_AM_EAST', 'AM-美东', 'US', 'COURIER', 'EAST', 'USD'),
-- 澳洲
('AU_EPARCEL', 'AU-eparcel', 'AU', 'EXPRESS', 'DEFAULT', 'AUD'),
('AU_TOLL', 'AU-Toll', 'AU', 'COURIER', 'DEFAULT', 'AUD'),
('AU_ALLIED', 'AU-Allied', 'AU', 'COURIER', 'DEFAULT', 'AUD');

View File

@ -0,0 +1,176 @@
"""物流费用计算服务测试文件"""
import pytest
from logistics_service import LogisticsService
from logisticsClass.logisticsBaseClass import PortType
class TestLogisticsService:
"""物流服务测试类"""
def test_detect_country_uk(self):
"""测试英国邮编识别"""
assert LogisticsService._detect_country("PA2 9BF") == "UK"
assert LogisticsService._detect_country("SW1A 1AA") == "UK"
assert LogisticsService._detect_country("BT1 1AA") == "UK"
def test_detect_country_us(self):
"""测试美国邮编识别"""
assert LogisticsService._detect_country("10001") == "US"
assert LogisticsService._detect_country("10001-1234") == "US"
assert LogisticsService._detect_country("90210") == "US"
def test_detect_country_au(self):
"""测试澳洲邮编识别"""
assert LogisticsService._detect_country("2000") == "AU"
assert LogisticsService._detect_country("3000") == "AU"
def test_parse_packages(self):
"""测试包裹解析"""
packages_data = [
{"name": "包裹1", "length": 63, "width": 59, "height": 48, "weight": 8000},
{"name": "包裹2", "length": 50, "width": 40, "height": 30, "weight": 5000},
]
packages = LogisticsService._parse_packages(packages_data)
assert len(packages.packages) == 2
assert packages.packages[0].fst_size == 63
assert packages.packages[1].fst_size == 50
def test_parse_packages_default_name(self):
"""测试包裹解析-默认名称"""
packages_data = [
{"length": 63, "width": 59, "height": 48, "weight": 8000},
]
packages = LogisticsService._parse_packages(packages_data)
assert len(packages.packages) == 1
def test_calculate_uk(self):
"""测试英国物流费用计算"""
packages = [{"length": 63, "width": 59, "height": 48, "weight": 8000}]
result = LogisticsService.calculate_uk("PA2 9BF", packages)
assert result["country"] == "UK"
assert result["postcode"] == "PA2 9BF"
assert result["optimal_channel"] is not None
assert result["optimal_fee"] is not None
assert result["currency"] == "GBP"
assert len(result["all_channels"]) > 0
def test_calculate_uk_london(self):
"""测试英国伦敦邮编"""
packages = [{"length": 30, "width": 20, "height": 10, "weight": 2000}]
result = LogisticsService.calculate_uk("SW1A 1AA", packages)
assert result["country"] == "UK"
assert result["optimal_channel"] is not None
def test_calculate_us(self):
"""测试美国物流费用计算"""
packages = [{"length": 63, "width": 59, "height": 48, "weight": 8000}]
result = LogisticsService.calculate_us("10001", packages)
assert result["country"] == "US"
assert result["postcode"] == "10001"
assert result["optimal_channel"] is not None
assert result["optimal_fee"] is not None
assert result["currency"] == "USD"
def test_calculate_us_west(self):
"""测试美国美西邮编"""
packages = [{"length": 50, "width": 40, "height": 30, "weight": 5000}]
result = LogisticsService.calculate_us("90001", packages)
assert result["country"] == "US"
assert result["optimal_channel"] is not None
def test_calculate_auto_detect(self):
"""测试自动识别国家"""
packages = [{"length": 63, "width": 59, "height": 48, "weight": 8000}]
# 英国
result_uk = LogisticsService.calculate("PA2 9BF", packages)
assert result_uk["country"] == "UK"
# 美国
result_us = LogisticsService.calculate("10001", packages)
assert result_us["country"] == "US"
def test_calculate_multiple_packages(self):
"""测试多包裹计算"""
packages = [
{"length": 63, "width": 59, "height": 48, "weight": 8000},
{"length": 50, "width": 40, "height": 30, "weight": 5000},
{"length": 40, "width": 30, "height": 20, "weight": 3000},
]
result = LogisticsService.calculate_uk("PA2 9BF", packages)
assert result["package_count"] == 3
assert result["total_weight"] == 16.0 # kg
def test_get_company_detail(self):
"""测试获取指定物流公司详情"""
packages = [{"length": 63, "width": 59, "height": 48, "weight": 8000}]
detail = LogisticsService.get_company_detail("PA2 9BF", packages, "智谷-DPD")
assert detail["company"] == "智谷-DPD"
assert detail["currency"] == "GBP"
assert "detail" in detail
assert "total" in detail
def test_invalid_postcode(self):
"""测试无效邮编"""
packages = [{"length": 63, "width": 59, "height": 48, "weight": 8000}]
with pytest.raises(ValueError):
LogisticsService.calculate("invalid", packages)
def test_result_structure(self):
"""测试返回结果结构"""
packages = [{"length": 63, "width": 59, "height": 48, "weight": 8000}]
result = LogisticsService.calculate_uk("PA2 9BF", packages)
# 验证必需字段
required_fields = [
"country", "postcode", "optimal_channel", "optimal_fee",
"currency", "all_channels", "package_count", "total_weight"
]
for field in required_fields:
assert field in result, f"缺少字段: {field}"
# 验证渠道详情结构
for company, info in result["all_channels"].items():
assert "fee" in info
assert "currency" in info
assert "type" in info
assert "available" in info
class TestPackageCalculation:
"""包裹计算测试类"""
def test_small_package(self):
"""测试小包裹"""
packages = [{"length": 20, "width": 15, "height": 10, "weight": 500}]
result = LogisticsService.calculate_uk("SW1A 1AA", packages)
assert result["optimal_fee"] is not None
def test_large_package(self):
"""测试大包裹"""
packages = [{"length": 200, "width": 100, "height": 80, "weight": 50000}]
result = LogisticsService.calculate_uk("PA2 9BF", packages)
# 大包裹可能被某些渠道拒绝
assert result is not None
def test_heavy_package(self):
"""测试重包裹"""
packages = [{"length": 50, "width": 40, "height": 30, "weight": 80000}]
result = LogisticsService.calculate_us("10001", packages)
assert result is not None
if __name__ == "__main__":
pytest.main([__file__, "-v"])

53
utils/config_manager.py Normal file
View File

@ -0,0 +1,53 @@
"""配置管理模块"""
import json
import os
from pathlib import Path
from typing import Any, Dict
class ConfigManager:
"""配置管理器"""
_instance = None
_config: Dict[str, Any] = {}
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self):
if not self._config:
self._load_config()
def _load_config(self):
"""加载配置文件"""
config_dir = Path(__file__).parent.parent / "config"
config_file = config_dir / "database.json"
if not config_file.exists():
raise FileNotFoundError(f"配置文件不存在: {config_file}")
with open(config_file, "r", encoding="utf-8") as f:
self._config = json.load(f)
def get(self, key: str, default: Any = None) -> Any:
"""获取配置项"""
keys = key.split(".")
value = self._config
for k in keys:
if isinstance(value, dict):
value = value.get(k)
if value is None:
return default
else:
return default
return value
def get_database_config(self) -> Dict[str, Any]:
"""获取数据库配置"""
return self._config.get("database", {})
# 全局配置实例
config = ConfigManager()

View File

@ -1,13 +1,22 @@
import pymysql import pymysql
from sqlalchemy import create_engine from sqlalchemy import create_engine
from utils.config_manager import config
class MySQLconnect(): class MySQLconnect():
def __init__(self, dbname: str): def __init__(self, dbname: str = None):
if isinstance(dbname, str): db_config = config.get_database_config()
self.dbname = dbname self.host = db_config.get("host", "192.168.100.33")
self.host = '192.168.100.33' self.port = db_config.get("port", 3306)
else: self.user = db_config.get("username", "zhenggantian")
self.password = db_config.get("password", "123456")
self.dbname = dbname or db_config.get("database", "logistics")
self.charset = db_config.get("charset", "utf8")
self.pool_size = db_config.get("pool_size", 10)
self.max_overflow = db_config.get("max_overflow", 5)
self.pool_recycle = db_config.get("pool_recycle", 3600)
if not isinstance(self.dbname, str):
raise TypeError("dbname must be a string") raise TypeError("dbname must be a string")
def __enter__(self): def __enter__(self):
@ -24,9 +33,19 @@ class MySQLconnect():
raise raise
def engine(self): def engine(self):
return create_engine("mysql+pymysql://zhenggantian:123456@" + self.host + f":3306/{self.dbname}", return create_engine(
pool_size=10, max_overflow=5, pool_recycle=3600) f"mysql+pymysql://{self.user}:{self.password}@{self.host}:{self.port}/{self.dbname}",
pool_size=self.pool_size,
max_overflow=self.max_overflow,
pool_recycle=self.pool_recycle
)
def connect(self): def connect(self):
return pymysql.connect(host=self.host, port=3306, database=self.dbname, user="zhenggantian", password="123456", return pymysql.connect(
charset="utf8") host=self.host,
port=self.port,
database=self.dbname,
user=self.user,
password=self.password,
charset=self.charset
)

BIN
~$单包裹SKU售价分析1.xlsx (Stored with Git LFS)

Binary file not shown.

BIN
~$表头更新方案.xlsx (Stored with Git LFS)

Binary file not shown.

BIN
拦截数据/~$1-3月利润分段.xlsx (Stored with Git LFS)

Binary file not shown.

BIN
拦截数据/~$batch_release.xlsx (Stored with Git LFS)

Binary file not shown.

BIN
拦截数据/~$product_property_data.xlsx (Stored with Git LFS)

Binary file not shown.

BIN
拦截数据/~$工作日报-文雪茜.xlsx (Stored with Git LFS)

Binary file not shown.

BIN
拦截数据/~$拦截总表.xlsx (Stored with Git LFS)

Binary file not shown.

Binary file not shown.

BIN
拦截数据/~$订单数据.xlsx (Stored with Git LFS)

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@ -28,7 +28,15 @@ def get_package_real_vol_by_api(packages_id):
# package_width = resp[0][1] # package_width = resp[0][1]
# package_hight = resp[0][2] # package_hight = resp[0][2]
# 拦截 # 拦截
url = f'https://cp.maso.hk/index.php?main=biphp&act=package_fund&key=W6BOYJ7BH27YCGRFCA0LWBVKMU1KRU5Q&package={package_id}' url = f'https://cp.baycheer.com/index.php?main=biphp&act=package_fund&key=W6BOYJ7BH27YCGRFCA0LWBVKMU1KRU5Q&package={package_id}'
resp = requests.get(url).json() resp = requests.get(url).json()
if resp['code'] == "0": if resp['code'] == "0":
weight = int(float(resp['data'][0]['weight'])*1000) weight = int(float(resp['data'][0]['weight'])*1000)

BIN
跟单测试源文件.xlsx (Stored with Git LFS) Normal file

Binary file not shown.