Selaa lähdekoodia

MOD:添加素材创意接口

cxyu 3 vuotta sitten
vanhempi
commit
9c95282ff9
5 muutettua tiedostoa jossa 168 lisäystä ja 22 poistoa
  1. 98 0
      data_manage/public_analysis.py
  2. 18 20
      handlers/HandlerBase.py
  3. 35 1
      handlers/PublicAnalysisHandler.py
  4. 0 1
      model/CommonUtils.py
  5. 17 0
      urls.py

+ 98 - 0
data_manage/public_analysis.py

@@ -610,5 +610,103 @@ def advertisement_rank(user_id, start, end, type, page, page_size, order, order_
     return getLimitSumData(db, sql, sum_sql, page, page_size)
 
 
+"""创意展示"""
+
+
+def idea_rank(user_id, start, end, page, page_size, order, order_by, book, channel, is_singleimg,
+              is_video, labels, collect, data_type):
+    # TODO:修改为clickhouse来进行数据访问
+
+    # TODO:时间的归因-----获取到所有这段时间内的记录,并进行聚合(聚合周末再做,先把数据拿出来)
+
+    # TODO:前端需要做的事情--------添加一个显示,owner,添加一个请求userid,去除掉上传时间,开始投放时间
+
+    if user_id in super_auth():
+        op1 = ''
+    else:
+        user = tuple([i['nick_name'] for i in get_pitcher({'user_id': user_id})] + [get_user_name_by_id(user_id)])
+        if len(user) == 1:
+            op1 = f" and pitcher ='{user[0]}'"
+        else:
+            op1 = f" and pitcher in {str(user)}"
+
+    op4 = f" and channel='{channel}'" if channel else ''
+
+    op10 = f" and book='{book}'" if book else ''
+    # TODO:添加标签相关处理------id与对应计划进行--对应
+
+    op11 = f" and image_id like '%,%' " if not is_singleimg else ''
+    op12 = f" and is_video" if is_video else ''  # 进行对应修改1----图片
+
+    # 公共数据,和素材库一样,个人只显示个人(小组)数据
+    # TODO:之后op_or1 变化为owner来限制,------dw_image_cost_day 生成时就根据dt,cost来归类owner
+    op_or1 = f' or (dt<date_add(now(),interval -5 day) or cost>5000) ' if data_type == 'all' else ''
+    # clicktimes,view_times,consume_amount,click_rate---------数据进行一一对应
+    if order_by == 'clicktimes':
+        order_by = 'click_count'
+    if order_by == 'view_times':
+        order_by = 'view_count'
+    if order_by == 'consume_amount':
+        order_by = 'cost'
+    if order_by == 'click_rate':
+        order_by = 'if(click_count=0 or view_count =0 or view_count is null or click_count is null,0,click_count / view_count) '
+
+    op_order = f" order by {order_by}  {order}" if order_by and order else ''
+
+    # 时间为基底限制,必须遵守
+    op_time_bigger = f" and dt>='{start}' " if start else ''
+    op_time_small = f" and dt<='{end}' " if end else ''
+
+    db = MysqlUtils().dm
+
+    # TODO:-----label_id 需要对应修改
+    sql = f"""
+    select 
+    row_number () over() as id,
+    book as novels,
+    dt as createTime,
+    `type` as channel,
+    'all' as dataType,
+    owner as creator,
+    0 as delFlag,
+    False as isCollected,
+    '' as labels,
+    download_path as downloadPath,
+    now() as startDate,
+    now() as endDate,
+    height,
+    width,
+    preview_url as media,
+    format as mediaFormat,
+    size as mediaSize,
+    if(click_count=0 or view_count =0 or view_count is null or click_count is null,0,click_count / view_count) as clickRate,
+    min(width)/if(min(height),min(height),1) aspect_ratio
+    cost as consumeAmount,
+    view_count as viewTimes,
+    click_count as clickTimes,
+    round((cost/view_count)*1000,2) cpm, 
+    round(click_count/view_count,4) ctr,
+    round(cost/click_count,2) cpc,
+    title,
+    description as article,
+    now() as upateTime,
+    null as updateBy,
+    if(is_video,2,1) as type,
+    videoBitRate,
+    videoFirstPage,
+    videoLength,
+    use_times as userTimes
+    from dw_image_cost_day
+    where replace (preview_url,' ,','') !='' 
+    and (1=1 {op1}  {op4}   {op10} {op11}  {op12}  {op_or1}) 
+    {op_time_bigger} {op_time_small} 
+     {op_order} 
+    """
+    print(sql)
+    data, total = getLimitData(db, sql, page, page_size)
+    data = {'records': data, 'total': total, 'size': page_size, 'current': page, 'pages': int(total / page_size) + 1}
+    return data
+
+
 if __name__ == '__main__':
     print(get_pitcher({"user_id": 78}))

+ 18 - 20
handlers/HandlerBase.py

@@ -10,7 +10,8 @@ from model.DateUtils import DateUtils
 
 log = logger()
 
-class BaseHandler(RequestHandler,DateUtils):
+
+class BaseHandler(RequestHandler, DateUtils):
     def __init__(self, application, request, **kwargs):
         RequestHandler.__init__(self, application, request, **kwargs)
         self._status_code = 200
@@ -34,13 +35,16 @@ class BaseHandler(RequestHandler,DateUtils):
         self.set_header("Access-Control-Allow-Headers",
                         "Content-Type, Depth, User-Agent, Token, Origin, X-Requested-With, Accept, Authorization")
         self.set_header("Content-Type", "application/json; charset=UTF-8")
-        self.set_header("Access-Control-Allow-Origin","*")
+        self.set_header("Access-Control-Allow-Origin", "*")
 
+    def write_json_tmp_java(self, data, status_code=200, msg='SUCCESS', total=1, total_data={}):
+        self.write(json.dumps({'code': status_code, 'data': data, 'msg': msg}))
 
-    def write_json(self, data, status_code=200, msg='success',total=1,total_data={}):
-        self.write(json.dumps({'status': {'msg': msg, "RetCode": status_code},'total':total,'data': data,"total_data":total_data}))
+    def write_json(self, data, status_code=200, msg='success', total=1, total_data={}):
+        self.write(json.dumps(
+            {'status': {'msg': msg, "RetCode": status_code}, 'total': total, 'data': data, "total_data": total_data}))
 
-    def write_fail(self,code=400,msg='error'):
+    def write_fail(self, code=400, msg='error'):
         self.write(json.dumps({'status': {'msg': msg, "RetCode": code}}))
 
     def write_download(self, filename, data):
@@ -55,8 +59,8 @@ class BaseHandler(RequestHandler,DateUtils):
         self.write(df)
 
     def get_args(self):
-        di=json.loads(self.request.body.decode(encoding='utf-8'))
-        if isinstance(di,str):
+        di = json.loads(self.request.body.decode(encoding='utf-8'))
+        if isinstance(di, str):
             di = json.loads(di)
         return di
 
@@ -98,7 +102,7 @@ class BaseHandler(RequestHandler,DateUtils):
     def get_auth(self):
         # 不需要验证的请求
         authless = ['/api/get_yangguang_data', '/api/git_hook/data_center', '/api/git_hook/qc_web']
-        url = self.request.full_url().split(str(self.settings.get('port'))+'/')[1]
+        url = self.request.full_url().split(str(self.settings.get('port')) + '/')[1]
         if url in authless:
             return True
         else:
@@ -111,21 +115,15 @@ class BaseHandler(RequestHandler,DateUtils):
             except:
                 return False
 
-            origStr += (4-len(origStr)%4)*"="
+            origStr += (4 - len(origStr) % 4) * "="
             print(origStr)
-            print(float(base64.b64decode(origStr.encode('utf-8')).decode(encoding='utf-8',errors='ignore')))
-            b = str(float(base64.b64decode(origStr.encode('utf-8')).decode(encoding='utf-8',errors='ignore')) * int(self.now.day))[:10]
-            print("b:",b)
-            diff =int(time.mktime(time.localtime()))-int(b)
+            print(float(base64.b64decode(origStr.encode('utf-8')).decode(encoding='utf-8', errors='ignore')))
+            b = str(float(base64.b64decode(origStr.encode('utf-8')).decode(encoding='utf-8', errors='ignore')) * int(
+                self.now.day))[:10]
+            print("b:", b)
+            diff = int(time.mktime(time.localtime())) - int(b)
             print(diff)
             if diff < 10:
                 return True
             else:
                 return False
-
-
-
-
-
-
-

+ 35 - 1
handlers/PublicAnalysisHandler.py

@@ -1,6 +1,7 @@
 from handlers.HandlerBase import BaseHandler
 from data_manage.public_analysis import *
 import time
+import json
 from model.DateUtils import DateUtils
 
 log = logger()
@@ -148,6 +149,8 @@ class ImageRank(BaseHandler):
 
 
 """素材排行榜"""
+
+
 class AdvertisementRank(BaseHandler):
     def post(self):
         if not self._au:
@@ -155,7 +158,7 @@ class AdvertisementRank(BaseHandler):
         else:
             du = DateUtils()
             arg = self.get_args()
-            show_type = arg.get('show_type', 'public') # 展示 public private,默认public----public 会展示大于5000以上的数据,超过5天的数据
+            show_type = arg.get('show_type', 'public')  # 展示 public private,默认public----public 会展示大于5000以上的数据,超过5天的数据
             user_id = arg.get('user_id')
             start = arg.get("start", du.getNow())
             end = arg.get("end")
@@ -172,3 +175,34 @@ class AdvertisementRank(BaseHandler):
             data, total, total_data = advertisement_rank(user_id, start, end, type, page, page_size, order, order_by,
                                                          book, channel, pitcher, has_order, is_video, show_type)
             self.write_json(data=data, total=total, total_data=total_data)
+
+
+"""素材创意"""
+
+
+class AdvertisementIdea(BaseHandler):
+    def post(self):
+        if not self._au:
+            self.write_fail(msg='auth error')
+        else:
+
+            du = DateUtils()
+            arg = self.get_args()
+            data_type = arg.get('dataType', 'all')  # 数据是否是个人,个人是private,共有是all
+            channel = arg.get('channel')  # 渠道-----朋友圈信息流,抖音,广点通等等,,,,,,选项朋友圈信息流,公众平台流量,先#TODO:先暂时放置
+            labels = arg.get('labels')  # 标签
+            collect = arg.get('collect')  # 是否用标签的数据
+            page = arg.get('pageNum',1)
+            page_size = arg.get('pageSize',20)
+            order = 'desc' if arg.get('upOrder') else 'asc'  #
+            order_by = arg.get('sortRule')  # clicktimes,view_times,consume_amount,click_rate---------数据进行一一对应
+            start = arg.get('beginDate', du.getNow())
+            end = arg.get('endDate')
+            book = arg.get('novels')
+            is_video = arg.get('type')
+            is_singleimg = arg.get('singleImg')  # 是否是组图-----默认是没有
+            user_id = arg.get('userId', '192')  # TODO:测试默认192
+
+            data= idea_rank(user_id, start, end, page, page_size, order, order_by,
+                                    book, channel, is_singleimg, is_video, labels, collect, data_type)
+            self.write_json_tmp_java(data=data)

+ 0 - 1
model/CommonUtils.py

@@ -34,7 +34,6 @@ def getLimitData(DataBase,sql,page,page_size):
     total = DataBase.getData(f"select count(1) from ({sql}) a")[0][0]
     if page and page_size:
         sql += f" limit {(page - 1) * page_size},{page_size} "
-    # print(sql)
     data = DataBase.getData_json(sql)
     return data,total
 

+ 17 - 0
urls.py

@@ -42,10 +42,27 @@ urls = [
     # 广告排行榜
     (r'/data/advertisement/ad_rank', AdvertisementRank),  # 素材消耗排行
 
+    # 广告素材库----创意
+    (r'/data/advertisement/database/idea', AdvertisementIdea),
+
+    # 广告素材库----图片
+    (r'/data/advertisement/database/media', AdvertisementRank),
+
+    # 广告素材库----文本
+    (r'/data/advertisement/database/content', AdvertisementRank),
+
+    # 广告素材库----标签
+    (r'/data/advertisement/database/label', AdvertisementRank),
+
+    # 广告素材库----标签对应操作-----周一先标签全部无法操作,只能用现有
+    (r'/data/advertisement/database/label/operate', AdvertisementRank),
+
     # CRUD
     (r'/operate/channel_group.*', OperateHandler.ChannelGroupHandler),  # 公众号分组设置
 
     # 自助分析
     (r'/data/analysis.*', AnalysisHandler.ChannelAnalysisHandler),
 
+
+
 ]