logistics/cal_sell_price.ipynb

24 KiB

In [2]:
from utils.gtools import MySQLconnect
import pandas as pd
ads = MySQLconnect('ads')
engine = ads.engine()
conn = ads.connect()
# sql = """   SELECT
#                 sku.SKU,
#                 包裹数据,
#                 成本价
#             FROM
#                 ods.`stg_bayshop_litfad_sku` sku
#                 LEFT JOIN ads.new_erp_sku_size size ON sku.SKU=size.SKU
# 							 WHERE sku.状态 = "启用"
# 							 AND EXISTS (SELECT 1 FROM ods.stg_bayshop_litfad_spu s1 where s1.`产品PID` = sku.`产品PID`  AND s1.状态 = "正常销售" and s1.`产品分类` regexp "light")  AND sku.添加时间 >="2024-01-01"
# """
# df = pd.read_sql(sql, conn)
In [3]:
# 计算售价


from sell.sell_price import call_sell_and_order_price
import json
import pandas as pd
DATE_LIST =     ["2024-01-01","2024-02-01","2024-03-01","2024-04-01","2024-05-01","2024-06-01","2024-07-01","2024-08-01","2024-09-01","2024-10-01","2024-11-01","2024-12-01","2025-01-01","2025-02-01","2025-03-01"]

def cal_sell_price(df):
    """
    计算所有SKU的售价,物流分摊费
    """
    for index, row in df.iterrows():
        try:
            package_dict = json.loads(row['包裹数据'])
            sell_price,order_price,order_type = call_sell_and_order_price(row['成本价'], package_dict)
        except Exception as e:
            print(f" {row['SKU']} 报错: {e}")
            continue
        df.loc[index, '售价'] = sell_price
        df.loc[index, '物流分摊费'] = order_price
        df.loc[index, '物流类型'] = order_type
        # print(f"sku:{row['SKU']},售价:{sell_price},物流分摊费:{order_price},物流类型:{order_type}")
    return df
for date in DATE_LIST:
    sql = f"""   SELECT
	sku.SKU,
	包裹数据,
	成本价,
	产品售价,
	`产品品类`,
	`产品分类` 
FROM
	ods.`stg_bayshop_litfad_sku` sku
	LEFT JOIN ads.new_erp_sku_size size ON sku.SKU = size.SKU 
	LEFT JOIN ods.stg_bayshop_litfad_spu spu ON sku.`产品PID` =spu.`产品PID`
WHERE
	sku.状态 = "启用" 
	AND spu.`产品品类` REGEXP "66"
	AND spu.`状态` ="正常销售"
	AND DATE_FORMAT( sku.添加时间, "%Y-%m-01" )= "{date}"
    """
    df = pd.read_sql(sql, conn)
    df1 = cal_sell_price(df)
    df1.to_excel(f'售价_{date}.xlsx', index=False)
    print(f"日期:{date}, 售价计算完成")
C:\Users\Admin\AppData\Local\Temp\ipykernel_9392\3242304332.py:43: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.
  df = pd.read_sql(sql, conn)
日期:2024-01-01, 售价计算完成
C:\Users\Admin\AppData\Local\Temp\ipykernel_9392\3242304332.py:43: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.
  df = pd.read_sql(sql, conn)
日期:2024-02-01, 售价计算完成
C:\Users\Admin\AppData\Local\Temp\ipykernel_9392\3242304332.py:43: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.
  df = pd.read_sql(sql, conn)
日期:2024-03-01, 售价计算完成
C:\Users\Admin\AppData\Local\Temp\ipykernel_9392\3242304332.py:43: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.
  df = pd.read_sql(sql, conn)
日期:2024-04-01, 售价计算完成
C:\Users\Admin\AppData\Local\Temp\ipykernel_9392\3242304332.py:43: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.
  df = pd.read_sql(sql, conn)
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
Cell In[3], line 44
     26     sql = f"""   SELECT
     27 	sku.SKU,
     28 	包裹数据,
   (...)
     41 	AND DATE_FORMAT( sku.添加时间, "%Y-%m-01" )= "{date}"
     42     """
     43     df = pd.read_sql(sql, conn)
---> 44     df1 = cal_sell_price(df)
     45     df1.to_excel(f'售价_{date}.xlsx', index=False)
     46     print(f"日期:{date}, 售价计算完成")

Cell In[3], line 16, in cal_sell_price(df)
     14 try:
     15     package_dict = json.loads(row['包裹数据'])
---> 16     sell_price,order_price,order_type = call_sell_and_order_price(row['成本价'], package_dict)
     17 except Exception as e:
     18     print(f" {row['SKU']} 报错: {e}")

File d:\test\logistics\sell\sell_price.py:120, in call_sell_and_order_price(price, package_dict)
    118 # 修改版本
    119 sell_price = litfad.cal_sell_price_2025()
--> 120 order_price, order_type = us_ocean_order_price(packages)
    121 return round(sell_price,2),order_price,order_type

File d:\test\logistics\sell\sell_price.py:23, in us_ocean_order_price(packages)
     21     base_fee = 999999
     22     break
---> 23 base_fee += df1[df1['g'] >= lbs_weight]['费用'].iloc[0]
     24 if package.fst_size >= 116 or package.sed_size >= 71 or package.girth >= 251:
     25     other_fee += 16.3

File c:\ProgramData\anaconda3\Lib\site-packages\pandas\core\frame.py:4093, in DataFrame.__getitem__(self, key)
   4091 # Do we have a (boolean) 1d indexer?
   4092 if com.is_bool_indexer(key):
-> 4093     return self._getitem_bool_array(key)
   4095 # We are left with two options: a single key, and a collection of keys,
   4096 # We interpret tuples as collections only for non-MultiIndex
   4097 is_single_key = isinstance(key, tuple) or not is_list_like(key)

File c:\ProgramData\anaconda3\Lib\site-packages\pandas\core\frame.py:4155, in DataFrame._getitem_bool_array(self, key)
   4152     return self.copy(deep=None)
   4154 indexer = key.nonzero()[0]
-> 4155 return self._take_with_is_copy(indexer, axis=0)

File c:\ProgramData\anaconda3\Lib\site-packages\pandas\core\generic.py:4153, in NDFrame._take_with_is_copy(self, indices, axis)
   4142 @final
   4143 def _take_with_is_copy(self, indices, axis: Axis = 0) -> Self:
   4144     """
   4145     Internal version of the `take` method that sets the `_is_copy`
   4146     attribute to keep track of the parent dataframe (using in indexing
   (...)
   4151     See the docstring of `take` for full explanation of the parameters.
   4152     """
-> 4153     result = self.take(indices=indices, axis=axis)
   4154     # Maybe set copy if we didn't actually change the index.
   4155     if self.ndim == 2 and not result._get_axis(axis).equals(self._get_axis(axis)):

File c:\ProgramData\anaconda3\Lib\site-packages\pandas\core\generic.py:4133, in NDFrame.take(self, indices, axis, **kwargs)
   4128     # We can get here with a slice via DataFrame.__getitem__
   4129     indices = np.arange(
   4130         indices.start, indices.stop, indices.step, dtype=np.intp
   4131     )
-> 4133 new_data = self._mgr.take(
   4134     indices,
   4135     axis=self._get_block_manager_axis(axis),
   4136     verify=True,
   4137 )
   4138 return self._constructor_from_mgr(new_data, axes=new_data.axes).__finalize__(
   4139     self, method="take"
   4140 )

File c:\ProgramData\anaconda3\Lib\site-packages\pandas\core\internals\managers.py:894, in BaseBlockManager.take(self, indexer, axis, verify)
    891 indexer = maybe_convert_indices(indexer, n, verify=verify)
    893 new_labels = self.axes[axis].take(indexer)
--> 894 return self.reindex_indexer(
    895     new_axis=new_labels,
    896     indexer=indexer,
    897     axis=axis,
    898     allow_dups=True,
    899     copy=None,
    900 )

File c:\ProgramData\anaconda3\Lib\site-packages\pandas\core\internals\managers.py:687, in BaseBlockManager.reindex_indexer(self, new_axis, indexer, axis, fill_value, allow_dups, copy, only_slice, use_na_proxy)
    680     new_blocks = self._slice_take_blocks_ax0(
    681         indexer,
    682         fill_value=fill_value,
    683         only_slice=only_slice,
    684         use_na_proxy=use_na_proxy,
    685     )
    686 else:
--> 687     new_blocks = [
    688         blk.take_nd(
    689             indexer,
    690             axis=1,
    691             fill_value=(
    692                 fill_value if fill_value is not None else blk.fill_value
    693             ),
    694         )
    695         for blk in self.blocks
    696     ]
    698 new_axes = list(self.axes)
    699 new_axes[axis] = new_axis

File c:\ProgramData\anaconda3\Lib\site-packages\pandas\core\internals\managers.py:688, in <listcomp>(.0)
    680     new_blocks = self._slice_take_blocks_ax0(
    681         indexer,
    682         fill_value=fill_value,
    683         only_slice=only_slice,
    684         use_na_proxy=use_na_proxy,
    685     )
    686 else:
    687     new_blocks = [
--> 688         blk.take_nd(
    689             indexer,
    690             axis=1,
    691             fill_value=(
    692                 fill_value if fill_value is not None else blk.fill_value
    693             ),
    694         )
    695         for blk in self.blocks
    696     ]
    698 new_axes = list(self.axes)
    699 new_axes[axis] = new_axis

File c:\ProgramData\anaconda3\Lib\site-packages\pandas\core\internals\blocks.py:1307, in Block.take_nd(self, indexer, axis, new_mgr_locs, fill_value)
   1304     allow_fill = True
   1306 # Note: algos.take_nd has upcast logic similar to coerce_to_target_dtype
-> 1307 new_values = algos.take_nd(
   1308     values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value
   1309 )
   1311 # Called from three places in managers, all of which satisfy
   1312 #  these assertions
   1313 if isinstance(self, ExtensionBlock):
   1314     # NB: in this case, the 'axis' kwarg will be ignored in the
   1315     #  algos.take_nd call above.

File c:\ProgramData\anaconda3\Lib\site-packages\pandas\core\array_algos\take.py:117, in take_nd(arr, indexer, axis, fill_value, allow_fill)
    114     return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
    116 arr = np.asarray(arr)
--> 117 return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill)

File c:\ProgramData\anaconda3\Lib\site-packages\pandas\core\array_algos\take.py:162, in _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill)
    157     out = np.empty(out_shape, dtype=dtype)
    159 func = _get_take_nd_function(
    160     arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info
    161 )
--> 162 func(arr, indexer, out, fill_value)
    164 if flip_order:
    165     out = out.T

KeyboardInterrupt: