Explorar o código

MOD:素材库添加文本,媒体

cxyu %!s(int64=3) %!d(string=hai) anos
pai
achega
65733fd7b1
Modificáronse 3 ficheiros con 262 adicións e 16 borrados
  1. 193 9
      data_manage/public_analysis.py
  2. 60 4
      handlers/PublicAnalysisHandler.py
  3. 9 3
      urls.py

+ 193 - 9
data_manage/public_analysis.py

@@ -617,9 +617,102 @@ def idea_rank(user_id, start, end, page, page_size, order, order_by, book, chann
               is_video, labels, collect, data_type):
     # TODO:修改为clickhouse来进行数据访问
 
-    # TODO:时间的归因-----获取到所有这段时间内的记录,并进行聚合(聚合周末再做,先把数据拿出来)
+    # 时间的归因-----获取到所有这段时间内的记录,并进行聚合(聚合周末再做,先把数据拿出来)
+    # 认为素材消耗的数据,已经是一条数据的所有归因直接根据dt来就可以
 
-    # TODO:前端需要做的事情--------添加一个显示,owner,添加一个请求userid,去除掉上传时间,开始投放时间
+    if user_id in super_auth():
+        op1 = ''
+    else:
+        user = tuple([i['nick_name'] for i in get_pitcher({'user_id': user_id})] + [get_user_name_by_id(user_id)])
+        if len(user) == 1:
+            op1 = f" and pitcher ='{user[0]}'"
+        else:
+            op1 = f" and pitcher in {str(user)}"
+
+    op4 = f" and channel='{channel}'" if channel else ''
+
+    op10 = f" and book='{book}'" if book else ''
+    # TODO:添加标签相关处理------id与对应计划进行--对应
+
+    op11 = f" and image_id like '%,%' " if not is_singleimg else ''
+    op12 = f" and is_video" if is_video else ''  # 进行对应修改1----图片
+
+    # 公共数据,和素材库一样,个人只显示个人(小组)数据
+    # TODO:之后op_or1 变化为owner来限制,------dw_image_cost_day 生成时就根据dt,cost来归类owner
+    op_or1 = f' or (dt<date_add(now(),interval -5 day) or cost>5000) ' if data_type == 'all' else ''
+    # clicktimes,view_times,consume_amount,click_rate---------数据进行一一对应
+    if order_by == 'click_times':
+        order_by = 'click_count'
+    if order_by == 'view_times':
+        order_by = 'view_count'
+    if order_by == 'consume_amount':
+        order_by = 'cost'
+    if order_by == 'click_rate':
+        order_by = 'if(click_count=0 or view_count =0 or view_count is null or click_count is null,0,round(click_count / view_count,2)) '
+    if order_by == 'create_time' or order_by == 'start_date':
+        order_by = 'cost'
+
+    op_order = f" order by {order_by}  {order}" if order_by and order else ''
+
+    # 时间为基底限制,必须遵守
+    op_time_bigger = f" and dt>='{start}' " if start else ''
+    op_time_small = f" and dt<='{end}' " if end else ''
+
+    db = MysqlUtils().dm
+
+    sql = f"""
+    select 
+    row_number () over() as id,
+    book as novels,
+    dt as startDate,
+    date_format( now(),'%Y-%m-%d') as endDate,
+    `type` as channel,
+    'all' as dataType,
+    owner as creator,
+    0 as delFlag,
+    False as isCollected,
+    '' as labels,
+    download_path as downloadPath,
+    height,
+    width,
+    preview_url as media,
+    format as mediaFormat,
+    size as mediaSize,
+    if(click_count=0 or view_count =0 or view_count is null or click_count is null,0,click_count / view_count) as clickRate,
+    round(width/if(height,height,1),2) aspect_ratio,
+    cost as consumeAmount,
+    view_count as viewTimes,
+    click_count as clickTimes,
+    round((cost/view_count)*1000,2) cpm, 
+    round(click_count/view_count,4) ctr,
+    round(cost/click_count,2) cpc,
+    title,
+    description as article,
+    date_format( now(),'%Y-%m-%d %H:%i:%S') as upateTime,
+    null as updateBy,
+    if(is_video,2,1) as type,
+    video_bit_rate  as videoBitRate,
+    null as videoFirstPage,
+    video_length as videoLength,
+    use_times as userTimes
+    from dw_image_cost_day
+    where replace (preview_url,' ,','') !='' 
+    and (1=1 {op1}  {op4}   {op10} {op11}  {op12}  {op_or1}) 
+    {op_time_bigger} {op_time_small} 
+     {op_order} 
+    """
+    print(sql)
+    data, total = getLimitData(db, sql, page, page_size)
+    data = {'records': data, 'total': total, 'size': page_size, 'current': page, 'pages': int(total / page_size) + 1}
+    return data
+
+
+def media_rank(user_id, start, end, page, page_size, order, order_by, book, channel, is_singleimg,
+               is_video, labels, collect, data_type):
+    # TODO:修改为clickhouse来进行数据访问
+
+    # 时间的归因-----获取到所有这段时间内的记录,并进行聚合(聚合周末再做,先把数据拿出来)
+    # 认为素材消耗的数据,已经是一条数据的所有归因直接根据dt来就可以
 
     if user_id in super_auth():
         op1 = ''
@@ -650,7 +743,7 @@ def idea_rank(user_id, start, end, page, page_size, order, order_by, book, chann
         order_by = 'cost'
     if order_by == 'click_rate':
         order_by = 'if(click_count=0 or view_count =0 or view_count is null or click_count is null,0,click_count / view_count) '
-    if order_by == 'create_time' or order_by =='start_date':
+    if order_by == 'create_time' or order_by == 'start_date':
         order_by = 'cost'
 
     op_order = f" order by {order_by}  {order}" if order_by and order else ''
@@ -661,12 +754,105 @@ def idea_rank(user_id, start, end, page, page_size, order, order_by, book, chann
 
     db = MysqlUtils().dm
 
-    # TODO:-----label_id 需要对应修改
     sql = f"""
     select 
     row_number () over() as id,
     book as novels,
-    dt as createTime,
+    dt as startDate,
+    date_format( now(),'%Y-%m-%d') as endDate,
+    `type` as channel,
+    'all' as dataType,
+    owner as creator,
+    0 as delFlag,
+    False as isCollected,
+    '' as labels,
+    download_path as downloadPath,
+    height,
+    width,
+    preview_url as content,
+    format as mediaFormat,
+    size as mediaSize,
+    if(click_count=0 or view_count =0 or view_count is null or click_count is null,0,round(click_count / view_count,2)) as clickRate,
+    round(width/if(height,height,1),2) aspect_ratio,
+    cost as consumeAmount,
+    view_count as viewTimes,
+    click_count as clickTimes,
+    round((cost/view_count)*1000,2) cpm, 
+    round(click_count/view_count,4) ctr,
+    round(cost/click_count,2) cpc,
+    date_format( now(),'%Y-%m-%d %H:%i:%S') as upateTime,
+    null as updateBy,
+    if (image_id not like '%,%'  ,true,false) as singleImg,
+    if(is_video,2,1) as type,
+    video_bit_rate  as videoBitRate,
+    null as videoFirstPage,
+    video_length as videoLength,
+    use_times as userTimes
+    from dw_image_cost_day
+    where replace (preview_url,' ,','') !='' 
+    and (1=1 {op1}  {op4}   {op10} {op11}  {op12}  {op_or1}) 
+    {op_time_bigger} {op_time_small} 
+     {op_order} 
+    """
+    print(sql)
+    data, total = getLimitData(db, sql, page, page_size)
+    data = {'records': data, 'total': total, 'size': page_size, 'current': page, 'pages': int(total / page_size) + 1}
+    return data
+
+
+def content_rank(user_id, start, end, page, page_size, order, order_by, book, channel, is_singleimg,
+                 is_video, labels, collect, data_type):
+    # TODO:修改为clickhouse来进行数据访问
+
+    # 时间的归因-----获取到所有这段时间内的记录,并进行聚合(聚合周末再做,先把数据拿出来)
+    # 认为素材消耗的数据,已经是一条数据的所有归因直接根据dt来就可以
+
+    if user_id in super_auth():
+        op1 = ''
+    else:
+        user = tuple([i['nick_name'] for i in get_pitcher({'user_id': user_id})] + [get_user_name_by_id(user_id)])
+        if len(user) == 1:
+            op1 = f" and pitcher ='{user[0]}'"
+        else:
+            op1 = f" and pitcher in {str(user)}"
+
+    op4 = f" and channel='{channel}'" if channel else ''
+
+    op10 = f" and book='{book}'" if book else ''
+    # TODO:添加标签相关处理------id与对应计划进行--对应
+
+    op11 = f" and image_id like '%,%' " if not is_singleimg else ''
+    op12 = f" and is_video" if is_video else ''  # 进行对应修改1----图片
+
+    # 公共数据,和素材库一样,个人只显示个人(小组)数据
+    # TODO:之后op_or1 变化为owner来限制,------dw_image_cost_day 生成时就根据dt,cost来归类owner
+    op_or1 = f' or (dt<date_add(now(),interval -5 day) or cost>5000) ' if data_type == 'all' else ''
+    # clicktimes,view_times,consume_amount,click_rate---------数据进行一一对应
+    if order_by == 'click_times':
+        order_by = 'click_count'
+    if order_by == 'view_times':
+        order_by = 'view_count'
+    if order_by == 'consume_amount':
+        order_by = 'cost'
+    if order_by == 'click_rate':
+        order_by = 'if(click_count=0 or view_count =0 or view_count is null or click_count is null,0,click_count / view_count) '
+    if order_by == 'create_time' or order_by == 'start_date':
+        order_by = 'cost'
+
+    op_order = f" order by {order_by}  {order}" if order_by and order else ''
+
+    # 时间为基底限制,必须遵守
+    op_time_bigger = f" and dt>='{start}' " if start else ''
+    op_time_small = f" and dt<='{end}' " if end else ''
+
+    db = MysqlUtils().dm
+
+    sql = f"""
+    select 
+    row_number () over() as id,
+    book as novels,
+    dt as startDate,
+    date_format( now(),'%Y-%m-%d') as endDate,
     `type` as channel,
     'all' as dataType,
     owner as creator,
@@ -674,15 +860,13 @@ def idea_rank(user_id, start, end, page, page_size, order, order_by, book, chann
     False as isCollected,
     '' as labels,
     download_path as downloadPath,
-    date_format( now(),'%Y-%m-%d %H:%i:%S') as startDate,
-    date_format( now(),'%Y-%m-%d %H:%i:%S') as endDate,
     height,
     width,
     preview_url as media,
     format as mediaFormat,
     size as mediaSize,
-    if(click_count=0 or view_count =0 or view_count is null or click_count is null,0,click_count / view_count) as clickRate,
-    width/if(height,height,1) aspect_ratio,
+    if(click_count=0 or view_count =0 or view_count is null or click_count is null,0,round(click_count / view_count,2)) as clickRate,
+    round(width/if(height,height,1),2) aspect_ratio,
     cost as consumeAmount,
     view_count as viewTimes,
     click_count as clickTimes,

+ 60 - 4
handlers/PublicAnalysisHandler.py

@@ -192,8 +192,8 @@ class AdvertisementIdea(BaseHandler):
             channel = arg.get('channel')  # 渠道-----朋友圈信息流,抖音,广点通等等,,,,,,选项朋友圈信息流,公众平台流量,先#TODO:先暂时放置
             labels = arg.get('labels')  # 标签
             collect = arg.get('collect')  # 是否用标签的数据
-            page = arg.get('pageNum',1)
-            page_size = arg.get('pageSize',20)
+            page = arg.get('pageNum', 1)
+            page_size = arg.get('pageSize', 20)
             order = 'desc' if arg.get('upOrder') else 'asc'  #
             order_by = arg.get('sortRule')  # clicktimes,view_times,consume_amount,click_rate---------数据进行一一对应
             start = arg.get('beginDate', du.getNow())
@@ -203,6 +203,62 @@ class AdvertisementIdea(BaseHandler):
             is_singleimg = arg.get('singleImg')  # 是否是组图-----默认是没有
             user_id = arg.get('userId', '192')  # TODO:测试默认192
 
-            data= idea_rank(user_id, start, end, page, page_size, order, order_by,
-                                    book, channel, is_singleimg, is_video, labels, collect, data_type)
+            data = idea_rank(user_id, start, end, page, page_size, order, order_by,
+                             book, channel, is_singleimg, is_video, labels, collect, data_type)
+            self.write_json_tmp_java(data=data)
+
+
+class AdvertisementMedia(BaseHandler):
+    def post(self):
+        if not self._au:
+            self.write_fail(msg='auth error')
+        else:
+
+            du = DateUtils()
+            arg = self.get_args()
+            data_type = arg.get('dataType', 'all')  # 数据是否是个人,个人是private,共有是all
+            channel = arg.get('channel')  # 渠道-----朋友圈信息流,抖音,广点通等等,,,,,,选项朋友圈信息流,公众平台流量,先#TODO:先暂时放置
+            labels = arg.get('labels')  # 标签
+            collect = arg.get('collect')  # 是否用标签的数据
+            page = arg.get('pageNum', 1)
+            page_size = arg.get('pageSize', 20)
+            order = 'desc' if arg.get('upOrder') else 'asc'  #
+            order_by = arg.get('sortRule')  # clicktimes,view_times,consume_amount,click_rate---------数据进行一一对应
+            start = arg.get('beginDate', du.getNow())
+            end = arg.get('endDate')
+            book = arg.get('novels')
+            is_video = arg.get('type')
+            is_singleimg = arg.get('singleImg')  # 是否是组图-----默认是没有
+            user_id = arg.get('userId', '192')  # TODO:测试默认192
+
+            data = media_rank(user_id, start, end, page, page_size, order, order_by,
+                             book, channel, is_singleimg, is_video, labels, collect, data_type)
+            self.write_json_tmp_java(data=data)
+
+
+class AdvertisementContent(BaseHandler):
+    def post(self):
+        if not self._au:
+            self.write_fail(msg='auth error')
+        else:
+
+            du = DateUtils()
+            arg = self.get_args()
+            data_type = arg.get('dataType', 'all')  # 数据是否是个人,个人是private,共有是all
+            channel = arg.get('channel')  # 渠道-----朋友圈信息流,抖音,广点通等等,,,,,,选项朋友圈信息流,公众平台流量,先#TODO:先暂时放置
+            labels = arg.get('labels')  # 标签
+            collect = arg.get('collect')  # 是否用标签的数据
+            page = arg.get('pageNum', 1)
+            page_size = arg.get('pageSize', 20)
+            order = 'desc' if arg.get('upOrder') else 'asc'  #
+            order_by = arg.get('sortRule')  # clicktimes,view_times,consume_amount,click_rate---------数据进行一一对应
+            start = arg.get('beginDate', du.getNow())
+            end = arg.get('endDate')
+            book = arg.get('novels')
+            is_video = arg.get('type')
+            is_singleimg = arg.get('singleImg')  # 是否是组图-----默认是没有
+            user_id = arg.get('userId', '192')  # TODO:测试默认192
+
+            data = content_rank(user_id, start, end, page, page_size, order, order_by,
+                             book, channel, is_singleimg, is_video, labels, collect, data_type)
             self.write_json_tmp_java(data=data)

+ 9 - 3
urls.py

@@ -46,16 +46,22 @@ urls = [
     (r'/data/advertisement/database/idea', AdvertisementIdea),
 
     # 广告素材库----图片
-    (r'/data/advertisement/database/media', AdvertisementRank),
+    (r'/data/advertisement/database/media', AdvertisementMedia),
 
     # 广告素材库----文本
-    (r'/data/advertisement/database/content', AdvertisementRank),
+    (r'/data/advertisement/database/content', AdvertisementContent),
 
     # 广告素材库----标签
     (r'/data/advertisement/database/label', AdvertisementRank),
 
     # 广告素材库----标签对应操作-----周一先标签全部无法操作,只能用现有
-    (r'/data/advertisement/database/label/operate', AdvertisementRank),
+    (r'/data/advertisement/database/label/delete', AdvertisementRank),
+
+    # 广告素材库----标签对应操作-----周一先标签全部无法操作,只能用现有
+    (r'/data/advertisement/database/label/add', AdvertisementRank),
+
+    # 广告收藏
+    (r'/data/advertisement/database/collects', AdvertisementRank),
 
     # CRUD
     (r'/operate/channel_group.*', OperateHandler.ChannelGroupHandler),  # 公众号分组设置