cost_util.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747
  1. import json
  2. import requests
  3. import time
  4. import pymysql
  5. import logging
  6. import pandas
  7. from concurrent.futures import ThreadPoolExecutor
  8. from model.DataBaseUtils import MysqlUtils
  9. from model.ComUtils import *
  10. from model.DateUtils import DateUtils
  11. from PIL import Image
  12. from io import BytesIO
  13. import cv2
  14. import oss2
  15. import os
  16. import ffmpeg
  17. du = DateUtils()
  18. db = MysqlUtils()
  19. max_workers = 10
  20. count = []
  21. t = du.get_n_days(-10)
  22. def get_campaign(account_id, access_token, flag, campaign_ids, dt):
  23. path = 'campaigns/get'
  24. fields = ('campaign_id', 'campaign_name', 'configured_status', 'campaign_type', 'promoted_object_type',
  25. 'daily_budget', 'budget_reach_date', 'created_time', 'last_modified_time', 'speed_mode', 'is_deleted')
  26. url = 'https://api.e.qq.com/v1.3/' + path
  27. li = []
  28. page = 1
  29. while True:
  30. parameters = {
  31. 'access_token': access_token,
  32. 'timestamp': int(time.time()),
  33. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  34. 'fields': fields,
  35. "filtering": [{
  36. "field": "campaign_id",
  37. "operator": "IN",
  38. "values":
  39. campaign_ids.split(',')
  40. }],
  41. "account_id": account_id,
  42. "page": page,
  43. "page_size": 100,
  44. "is_deleted": False
  45. }
  46. for k in parameters:
  47. if type(parameters[k]) is not str:
  48. parameters[k] = json.dumps(parameters[k])
  49. while True:
  50. r = requests.get(url, params=parameters).json()
  51. code = r['code']
  52. if code == 11017:
  53. time.sleep(61)
  54. else:
  55. break
  56. # logging.info(r)
  57. total_page = r['data']['page_info']['total_page']
  58. if page > total_page:
  59. break
  60. else:
  61. page += 1
  62. if r.get("data"):
  63. for i in r['data']['list']:
  64. li.append((str(i['campaign_id']), i['campaign_name'], i['configured_status'], i['campaign_type'],
  65. i['promoted_object_type'], i['daily_budget'], i.get('budget_reach_date'),
  66. DateUtils.stamp_to_str(i['created_time']),
  67. DateUtils.stamp_to_str(i['last_modified_time']), i.get('speed_mode'), i.get('is_deleted'),
  68. account_id, flag, dt))
  69. # logging.info(li)
  70. """mp 没有 speed_mode,is_deleted,budget_reach_date"""
  71. if li.__len__() > 0:
  72. logging.info(f"{account_id}有计划:" + str(li.__len__()))
  73. sql = "replace into campaign_info values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
  74. db.quchen_text.executeMany(sql, li)
  75. db.close()
  76. def get_adcreatives(account_id, access_token, flag, adc_ids, dt): # 获取创意
  77. # 接口 https://developers.e.qq.com/docs/api/adsmanagement/adcreatives/adcreatives_get?version=1.3
  78. url = 'https://api.e.qq.com/v1.1/adcreatives/get'
  79. li = []
  80. page = 1
  81. logging.info(f"{account_id}开始获取创意")
  82. while True:
  83. parameters = {
  84. 'access_token': access_token,
  85. 'timestamp': int(time.time()),
  86. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  87. 'fields': ('campaign_id', 'adcreative_id', 'adcreative_name', 'adcreative_elements', 'promoted_object_type',
  88. 'page_type',
  89. 'page_spec', 'link_page_spec', 'universal_link_url', 'promoted_object_id', 'site_set'),
  90. "filtering": [{
  91. "field": "adcreative_id",
  92. "operator": "IN",
  93. "values": adc_ids.split(',')
  94. }],
  95. "account_id": account_id,
  96. "page": page,
  97. "page_size": 100,
  98. "is_deleted": False
  99. }
  100. for k in parameters:
  101. if type(parameters[k]) is not str:
  102. parameters[k] = json.dumps(parameters[k])
  103. while True:
  104. h = requests.get(url, params=parameters, timeout=1)
  105. # logging.info(h.json())
  106. if h.status_code == 200:
  107. r = h.json()
  108. # logging.info(r)
  109. break
  110. else:
  111. time.sleep(1)
  112. logging.info("爬取失败 等待1s")
  113. logging.info(f"{account_id}采集到创意")
  114. if 'data' in r.keys():
  115. is_video = 0
  116. for i in r['data']['list']:
  117. # logging.info(i)
  118. if flag == 'MP':
  119. if len(i['adcreative_elements']) > 0:
  120. d = i['adcreative_elements']
  121. title = d.get('title', '')
  122. description = d.get('description', '')
  123. if 'image' in d.keys():
  124. image = d.get('image', '')
  125. elif 'image_list' in d.keys():
  126. image = ','.join(d.get('image_list'))
  127. elif 'video' in d.keys():
  128. image = d['video']
  129. is_video = 1
  130. else:
  131. image = ''
  132. else:
  133. title = image = ''
  134. li.append((
  135. i['adcreative_id'], i['adcreative_name'], i['campaign_id'], image, title,
  136. i.get('promoted_object_type', ''), i.get('page_type', ''),
  137. i['page_spec'].get('page_id', ''), i.get('promoted_object_id', ''),
  138. '', description, 'MP', account_id, dt, is_video
  139. ))
  140. else:
  141. if len(i['adcreative_elements']) > 0:
  142. d = i['adcreative_elements']
  143. if 'image' in d.keys():
  144. image = d['image']
  145. elif 'element_story' in d.keys():
  146. image = ','.join([x['image'] for x in d['element_story']])
  147. else:
  148. image = ''
  149. title = d.get('title', '')
  150. description = d.get('description', '')
  151. else:
  152. image = title = description = ''
  153. li.append(
  154. (
  155. i['adcreative_id'], i['adcreative_name'], i['campaign_id'], image, title,
  156. i.get('promoted_object_type', ''), i.get('page_type', ''),
  157. i['page_spec'].get('page_id', ''), i.get('promoted_object_id', ''),
  158. ','.join(i['site_set']), description, 'GDT', account_id, dt, is_video
  159. )
  160. )
  161. total_page = r['data']['page_info']['total_page']
  162. if total_page > page:
  163. page += 1
  164. else:
  165. break
  166. else:
  167. break
  168. logging.info(f"{account_id}创意分析结束")
  169. logging.info(f"{account_id}获取创意,结束")
  170. if len(li) > 0:
  171. logging.info(f"{account_id}有创意:" + str(len(li)))
  172. sql = 'replace into adcreative_info values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) '
  173. db.quchen_text.executeMany(sql, li)
  174. def images_info_get(account_id, access_token, image_ids): # 获取图片信息
  175. # 接口 https://developers.e.qq.com/docs/api/business_assets/image/images_get?version=1.3
  176. def get_image_info(preview_url, err_num=5):
  177. try:
  178. if not preview_url:
  179. return None
  180. rsp = requests.get(preview_url, timeout=5)
  181. # 1.图片写入内存
  182. im = Image.open(BytesIO(rsp.content))
  183. # 2.获取图片属性
  184. image_format = im.format
  185. # image_size = len(rsp.content)
  186. return image_format
  187. except:
  188. if err_num < 5:
  189. return get_image_info(preview_url, err_num=err_num + 1)
  190. # 1.更新数据
  191. id_content = ','.join([''' '{}' '''.format(i) for i in image_ids.split(',')])
  192. id_content = id_content[:-1]
  193. sql = ''' select image_id from image_info vi
  194. where image_id in ({});'''.format(id_content)
  195. rs = db.quchen_text.getData(sql)
  196. id_all_set = set([i for i in image_ids.split(',') if len(i) > 0])
  197. id_have = set([i[0] for i in rs])
  198. image_ids = id_all_set - id_have
  199. fields = ('image_id', 'width', 'height', 'file_size', 'signature', 'preview_url')
  200. interface = 'images/get'
  201. url = 'https://api.e.qq.com/v1.3/' + interface
  202. page = 1
  203. li = []
  204. for image_id in image_ids:
  205. if len(image_id) < 1:
  206. continue
  207. while True:
  208. common_parameters = {
  209. 'access_token': access_token,
  210. 'timestamp': int(time.time()),
  211. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  212. 'fields': fields
  213. }
  214. parameters = {
  215. "account_id": account_id,
  216. "filtering": [{
  217. "field": "image_id",
  218. "operator": "IN",
  219. "values": [image_id]
  220. }],
  221. "page": page,
  222. "page_size": 100
  223. }
  224. parameters.update(common_parameters)
  225. for k in parameters:
  226. if type(parameters[k]) is not str:
  227. parameters[k] = json.dumps(parameters[k])
  228. while True:
  229. h = requests.get(url, params=parameters)
  230. # logging.info(h.text)
  231. if h.status_code == 200:
  232. r = h.json()
  233. break
  234. else:
  235. time.sleep(1)
  236. logging.info("请求出错 等待1s..")
  237. if 'data' in r.keys():
  238. li.extend(r['data']['list'])
  239. total_page = r['data']['page_info']['total_page']
  240. if total_page > page:
  241. page += 1
  242. else:
  243. break
  244. data = []
  245. for i in li:
  246. image_format = get_image_info(i['preview_url'])
  247. data.append(
  248. (i['image_id'], i['width'], i['height'], i['signature'], i['preview_url'], i['file_size'], image_format))
  249. logging.info(f"{account_id} 有新图片:" + str(li.__len__()))
  250. if li.__len__() > 0:
  251. sql = "insert IGNORE into image_info (image_id,width,height,signature,preview_url,size,type) value (%s,%s,%s,%s,%s,%s,%s)"
  252. db.quchen_text.executeMany(sql, data)
  253. db.close()
  254. def video_info_get(account_id, access_token, image_ids): # 获取视频信息
  255. # 接口 https://developers.e.qq.com/docs/api/business_assets/video/videos_get?version=1.3
  256. def get_video_info(video_url, err_num=0):
  257. try:
  258. if video_url:
  259. return None, None
  260. rsp = requests.get(video_url)
  261. with open('/tmp/aa.mp4', 'wb') as f:
  262. f.write(rsp.content)
  263. video_size = len(rsp.content)
  264. cap = cv2.VideoCapture('/tmp/aa.mp4') # 视频流
  265. if cap.isOpened():
  266. rate = cap.get(5)
  267. frame_num = cap.get(7)
  268. duration = frame_num / rate
  269. byte_rate = (video_size / (duration / 8))
  270. return duration, byte_rate
  271. except:
  272. if err_num < 5:
  273. return get_video_info(video_url, err_num=err_num + 1)
  274. return None, None
  275. # 1.数据库获取,查看是否需要获取对应数据
  276. id_content = ','.join([''' '{}' '''.format(i) for i in image_ids.split(',')])
  277. id_content = id_content[:-1]
  278. sql = ''' select video_id from video_info vi
  279. where video_id in ({});'''.format(id_content)
  280. rs = db.quchen_text.getData(sql)
  281. id_all_set = set([i for i in image_ids.split(',') if len(i) > 0])
  282. id_have = set([i[0] for i in rs])
  283. image_ids = id_all_set - id_have
  284. # 2.获取对应数据
  285. fields = ('video_id', 'width', 'height', 'file_size', 'signature', 'preview_url')
  286. interface = 'videos/get'
  287. url = 'https://api.e.qq.com/v1.3/' + interface
  288. page = 1
  289. li = []
  290. for image_id in image_ids:
  291. if len(image_id) < 1:
  292. continue
  293. while True:
  294. common_parameters = {
  295. 'access_token': access_token,
  296. 'timestamp': int(time.time()),
  297. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  298. 'fields': fields
  299. }
  300. parameters = {
  301. "account_id": account_id,
  302. "filtering": [{
  303. "field": "media_id",
  304. "operator": "IN",
  305. "values": [image_id]
  306. }],
  307. "page": page,
  308. "page_size": 100
  309. }
  310. parameters.update(common_parameters)
  311. for k in parameters:
  312. if type(parameters[k]) is not str:
  313. parameters[k] = json.dumps(parameters[k])
  314. while True:
  315. h = requests.get(url, params=parameters)
  316. # logging.info(h.text)
  317. if h.status_code == 200:
  318. r = h.json()
  319. break
  320. else:
  321. time.sleep(1)
  322. logging.info("请求出错 等待1s..")
  323. if 'data' in r.keys():
  324. li.extend(r['data']['list'])
  325. total_page = r['data']['page_info']['total_page']
  326. if total_page > page:
  327. page += 1
  328. else:
  329. break
  330. data = []
  331. for i in li:
  332. duration, byte_rate = get_video_info(i['preview_url'])
  333. data.append((i['video_id'], i['width'], i['height'],
  334. i['signature'], i['preview_url'], i['file_size'],
  335. 'mp4', byte_rate, duration))
  336. logging.info(f"{account_id} 获取到新视频:" + str(li.__len__()))
  337. if li.__len__() > 0:
  338. sql = '''insert IGNORE into video_info (video_id,width,height,
  339. signature,preview_url,size,type,byte_rate,video_length)
  340. value (%s,%s,%s,
  341. %s,%s,%s,%s,%s,%s)'''
  342. db.quchen_text.executeMany(sql, data)
  343. db.close()
  344. def ad_info():
  345. accounts = db.quchen_text.getData("""
  346. select account_id,access_token,name channel,'GDT' type from advertiser_qq where name !='' or name is not null
  347. union
  348. select account_id,access_token,name channel,'MP' type from advertiser_vx where name !='' or name is not null
  349. """)
  350. total_data = []
  351. executor = ThreadPoolExecutor(max_workers=max_workers)
  352. for i in accounts:
  353. # logging.info(i)
  354. account_id = i[0]
  355. access_token = i[1]
  356. type = i[3]
  357. executor.submit(get_ad_info, account_id, access_token, type, total_data)
  358. executor.shutdown()
  359. logging.info(len(total_data))
  360. if len(total_data) > 0:
  361. sql = "replace into ad_info values(%s,%s,%s,%s,%s,%s,%s) "
  362. db.quchen_text.executeMany(sql, total_data)
  363. """获取广告基础信息"""
  364. def get_ad_info(account_id, access_token, flag, ad_ids, dt):
  365. # 接口为 https://developers.e.qq.com/docs/apilist/ads/ad?version=1.3#a3
  366. path = 'ads/get'
  367. fields = ('ad_id', 'ad_name', 'adcreative_id', 'adgroup_id', 'campaign_id')
  368. url = 'https://api.e.qq.com/v1.3/' + path
  369. li = []
  370. page = 1
  371. while True:
  372. parameters = {
  373. 'access_token': access_token,
  374. 'timestamp': int(time.time()),
  375. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  376. 'fields': fields,
  377. "filtering": [{
  378. "field": "ad_id",
  379. "operator": "IN",
  380. "values":
  381. ad_ids.split(',')
  382. }],
  383. "account_id": account_id,
  384. "page": page,
  385. "page_size": 100,
  386. "is_deleted": False
  387. }
  388. for k in parameters:
  389. if type(parameters[k]) is not str:
  390. parameters[k] = json.dumps(parameters[k])
  391. while True:
  392. r = requests.get(url, params=parameters).json()
  393. code = r['code']
  394. if code == 11017:
  395. time.sleep(61)
  396. else:
  397. break
  398. # logging.info(r)
  399. total_page = r['data']['page_info']['total_page']
  400. if page > total_page:
  401. break
  402. else:
  403. page += 1
  404. if r.get("data"):
  405. for i in r['data']['list']:
  406. li.append((str(i['ad_id']), i['ad_name'], i['adcreative_id'], i['campaign_id'], i['adgroup_id'],
  407. account_id, flag, dt))
  408. if li.__len__() > 0:
  409. logging.info(f"{account_id}有广告:" + str(li.__len__()))
  410. sql = "replace into ad_info values(%s,%s,%s,%s,%s,%s,%s,%s) "
  411. db.quchen_text.executeMany(sql, li)
  412. db.close()
  413. def get_ad_cost_day(account_id, access_token, flag, st, et):
  414. if flag == 'MP':
  415. ad_cost_day_mp(account_id, access_token, st, et)
  416. else:
  417. ad_cost_day_gdt(account_id, access_token, st, et)
  418. def ad_cost_day_gdt(account_id, access_token, st, et):
  419. # 接口文档 https://developers.e.qq.com/docs/api/insights/ad_insights/daily_reports_get?version=1.3
  420. url = 'https://api.e.qq.com/v1.3/daily_reports/get'
  421. fields = (
  422. 'date', 'ad_id', 'adgroup_id', 'cost', 'view_count', 'ctr', 'follow_count', 'web_order_count', 'order_amount')
  423. li = []
  424. page = 1
  425. while True:
  426. parameters = {
  427. 'access_token': access_token,
  428. 'timestamp': int(time.time()),
  429. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  430. 'fields': fields,
  431. "account_id": account_id,
  432. "group_by": ['ad_id', 'date'],
  433. "level": 'REPORT_LEVEL_AD',
  434. "page": page,
  435. "page_size": 1000,
  436. "date_range": {
  437. "start_date": st,
  438. "end_date": et
  439. }
  440. }
  441. for k in parameters:
  442. if type(parameters[k]) is not str:
  443. parameters[k] = json.dumps(parameters[k])
  444. while True:
  445. r = requests.get(url, params=parameters)
  446. r = r.json()
  447. # logging.info(r)
  448. code = r['code']
  449. if code == 11017:
  450. time.sleep(61)
  451. else:
  452. break
  453. if r.get("data"):
  454. for i in r['data']['list']:
  455. if i['cost'] > 0:
  456. li.append(
  457. (
  458. i['date'], i['ad_id'], i['adgroup_id'], i['cost'] / 100, i['view_count'],
  459. i['ctr'] * i['view_count'],
  460. i['follow_count'], i['web_order_count'], i['order_amount'] / 100, account_id, 'GDT'
  461. )
  462. )
  463. total_page = r['data']['page_info']['total_page']
  464. if page >= total_page:
  465. break
  466. else:
  467. page += 1
  468. # logging.info(li)
  469. if len(li) > 0:
  470. logging.info(f"{account_id} have ad cost :{len(li)} ")
  471. db.quchen_text.executeMany('replace into ad_cost_day values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)', li)
  472. db.close()
  473. def ad_cost_day_mp(account_id, access_token, st, et):
  474. # 接口文档 https://developers.e.qq.com/docs/api/insights/ad_insights/daily_reports_get?version=1.3
  475. url = 'https://api.e.qq.com/v1.3/daily_reports/get'
  476. fields = ('date', 'ad_id', 'adgroup_id', 'cost', 'view_count', 'valid_click_count', 'official_account_follow_count',
  477. 'order_count', 'order_amount')
  478. li = []
  479. page = 1
  480. while True:
  481. parameters = {
  482. 'access_token': access_token,
  483. 'timestamp': int(time.time()),
  484. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  485. 'fields': fields,
  486. "account_id": account_id,
  487. "level": 'REPORT_LEVEL_AD_WECHAT',
  488. "page": page,
  489. "page_size": 1000,
  490. "date_range": {
  491. "start_date": st,
  492. "end_date": et
  493. }
  494. }
  495. for k in parameters:
  496. if type(parameters[k]) is not str:
  497. parameters[k] = json.dumps(parameters[k])
  498. while True:
  499. r = requests.get(url, params=parameters)
  500. r = r.json()
  501. # logging.info(r['data']['list'])
  502. # import pandas as pd
  503. # logging.info(pd.DataFrame(r['data']['list']))
  504. code = r['code']
  505. if code == 11017:
  506. time.sleep(61)
  507. else:
  508. break
  509. if r.get("data"):
  510. for i in r['data']['list']:
  511. if i['cost'] > 0:
  512. li.append(
  513. (
  514. i['date'], i['ad_id'], i['adgroup_id'],
  515. i['cost'] / 100, i['view_count'],
  516. i['valid_click_count'],
  517. i['official_account_follow_count'],
  518. i['order_count'], i['order_amount'] / 100, account_id,
  519. 'MP'
  520. )
  521. )
  522. total_page = r['data']['page_info']['total_page']
  523. if page >= total_page:
  524. break
  525. else:
  526. page += 1
  527. # logging.info(li)
  528. # exit()
  529. if len(li) > 0:
  530. # TODO:询问一下adgroup_id,campaign_id作用
  531. # 对一下ad的数据
  532. li_df = pandas.DataFrame(li)
  533. li_df_g = li_df.groupby([0, 1, 9, 10])
  534. li_new = []
  535. adgroup_id_dict = {}
  536. for index, group in li_df_g:
  537. adgroup_id_dict[index] = ','.join([str(i) for i in group[2].tolist()])
  538. for index, row in li_df_g.agg('sum').iterrows():
  539. new_row = row.tolist()
  540. new_row = list(index[0:2]) + new_row + list(index[2:])
  541. new_row[2] = adgroup_id_dict[index]
  542. li_new.append(tuple(new_row))
  543. logging.info(f"{account_id} have ad cost :{len(li_new)} ")
  544. db.quchen_text.executeMany('replace into ad_cost_day values(%s,%s,%s,%s,%s,'
  545. '%s,%s,%s,%s,%s,%s)', li_new)
  546. db.close()
  547. def daily_reports_get(access_token, account_id, level, start_date, end_date, fields): # 获取wx投放计划日报数据
  548. interface = 'daily_reports/get'
  549. url = 'https://api.e.qq.com/v1.3/' + interface
  550. common_parameters = {
  551. 'access_token': access_token,
  552. 'timestamp': int(time.time()),
  553. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  554. 'fields': fields
  555. }
  556. parameters = {
  557. "account_id": account_id,
  558. "level": level,
  559. "date_range":
  560. {
  561. "start_date": start_date,
  562. "end_date": end_date
  563. },
  564. "page": 1,
  565. "page_size": 1000,
  566. "fields":
  567. [
  568. ]
  569. }
  570. parameters.update(common_parameters)
  571. for k in parameters:
  572. if type(parameters[k]) is not str:
  573. parameters[k] = json.dumps(parameters[k])
  574. while True:
  575. r = requests.get(url, params=parameters)
  576. if r.status_code == 200:
  577. break
  578. else:
  579. time.sleep(1)
  580. logging.info("请求出错 等待1s..")
  581. return r.json()
  582. def daily_qq_reports_get(access_token, account_id, compaign_id, level, start_date, end_date, fields): # 获取gdt投放计划日报数据
  583. interface = 'daily_reports/get'
  584. url = 'https://api.e.qq.com/v1.1/' + interface
  585. common_parameters = {
  586. 'access_token': access_token,
  587. 'timestamp': int(time.time()),
  588. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  589. 'fields': fields
  590. }
  591. parameters = {
  592. "account_id": account_id,
  593. "filtering":
  594. [
  595. {
  596. "field": "campaign_id",
  597. "operator": "EQUALS",
  598. "values":
  599. [
  600. compaign_id
  601. ]
  602. }
  603. ],
  604. "level": level,
  605. "date_range":
  606. {
  607. "start_date": start_date,
  608. "end_date": end_date
  609. },
  610. "page": 1,
  611. "page_size": 1000,
  612. "fields":
  613. [
  614. ]
  615. }
  616. parameters.update(common_parameters)
  617. for k in parameters:
  618. if type(parameters[k]) is not str:
  619. parameters[k] = json.dumps(parameters[k])
  620. r = requests.get(url, params=parameters)
  621. return r.json()
  622. def mysql_insert_adcreative(data):
  623. db = pymysql.connect('rm-bp1c9cj79872tx3aaro.mysql.rds.aliyuncs.com', 'superc', 'Cc719199895', 'quchen_text')
  624. cursor = db.cursor()
  625. sql = 'replace into adcreative (campaign_id,adcreative_id,adcreative_name,image_id,title,promoted_object_type,page_type,page_id,link_page_id,promoted_object_id) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
  626. try:
  627. cursor.executemany(sql, data)
  628. db.commit()
  629. logging.info('insert [adcreative] ' + str(len(data)))
  630. except:
  631. db.rollback()
  632. logging.info('insert [adcreative] defeat')
  633. if __name__ == '__main__':
  634. account_id = 19206910
  635. access_token = '89079ccc8db047b078a0108e36a7e276'
  636. #
  637. account_id2 = 14709511
  638. access_token2 = 'e87f7b6f860eaeef086ddcc9c3614678'
  639. get_ad_cost_day(account_id, access_token, 'MP', '2021-04-09', '2021-04-09')
  640. # get_adcreatives(account_id,access_token,'MP','3187867673','2021-04-09')