cost_util.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739
  1. import json
  2. import requests
  3. import time
  4. import pymysql
  5. import logging
  6. import pandas
  7. from concurrent.futures import ThreadPoolExecutor
  8. from model.DataBaseUtils import MysqlUtils
  9. from model.ComUtils import *
  10. from model.DateUtils import DateUtils
  11. from PIL import Image
  12. from io import BytesIO
  13. from data_processing import video_processing
  14. du = DateUtils()
  15. db = MysqlUtils()
  16. max_workers = 10
  17. count = []
  18. t = du.get_n_days(-10)
  19. def get_campaign(account_id, access_token, flag, campaign_ids, dt):
  20. path = 'campaigns/get'
  21. fields = ('campaign_id', 'campaign_name', 'configured_status', 'campaign_type', 'promoted_object_type',
  22. 'daily_budget', 'budget_reach_date', 'created_time', 'last_modified_time', 'speed_mode', 'is_deleted')
  23. url = 'https://api.e.qq.com/v1.3/' + path
  24. li = []
  25. page = 1
  26. while True:
  27. parameters = {
  28. 'access_token': access_token,
  29. 'timestamp': int(time.time()),
  30. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  31. 'fields': fields,
  32. "filtering": [{
  33. "field": "campaign_id",
  34. "operator": "IN",
  35. "values":
  36. campaign_ids.split(',')
  37. }],
  38. "account_id": account_id,
  39. "page": page,
  40. "page_size": 100,
  41. "is_deleted": False
  42. }
  43. for k in parameters:
  44. if type(parameters[k]) is not str:
  45. parameters[k] = json.dumps(parameters[k])
  46. while True:
  47. r = requests.get(url, params=parameters).json()
  48. code = r['code']
  49. if code == 11017:
  50. time.sleep(61)
  51. else:
  52. break
  53. # logging.info(r)
  54. total_page = r['data']['page_info']['total_page']
  55. if page > total_page:
  56. break
  57. else:
  58. page += 1
  59. if r.get("data"):
  60. for i in r['data']['list']:
  61. li.append((str(i['campaign_id']), i['campaign_name'], i['configured_status'], i['campaign_type'],
  62. i['promoted_object_type'], i['daily_budget'], i.get('budget_reach_date'),
  63. DateUtils.stamp_to_str(i['created_time']),
  64. DateUtils.stamp_to_str(i['last_modified_time']), i.get('speed_mode'), i.get('is_deleted'),
  65. account_id, flag, dt))
  66. # logging.info(li)
  67. """mp 没有 speed_mode,is_deleted,budget_reach_date"""
  68. if li.__len__() > 0:
  69. logging.info(f"{account_id}有计划:" + str(li.__len__()))
  70. sql = "replace into campaign_info values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
  71. db.quchen_text.executeMany(sql, li)
  72. db.close()
  73. def get_adcreatives(account_id, access_token, flag, adc_ids, dt): # 获取创意
  74. # 接口 https://developers.e.qq.com/docs/api/adsmanagement/adcreatives/adcreatives_get?version=1.3
  75. url = 'https://api.e.qq.com/v1.1/adcreatives/get'
  76. li = []
  77. page = 1
  78. logging.info(f"{account_id}开始获取创意")
  79. while True:
  80. parameters = {
  81. 'access_token': access_token,
  82. 'timestamp': int(time.time()),
  83. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  84. 'fields': ('campaign_id', 'adcreative_id', 'adcreative_name', 'adcreative_elements', 'promoted_object_type',
  85. 'page_type',
  86. 'page_spec', 'link_page_spec', 'universal_link_url', 'promoted_object_id', 'site_set'),
  87. "filtering": [{
  88. "field": "adcreative_id",
  89. "operator": "IN",
  90. "values": adc_ids.split(',')
  91. }],
  92. "account_id": account_id,
  93. "page": page,
  94. "page_size": 100,
  95. "is_deleted": False
  96. }
  97. for k in parameters:
  98. if type(parameters[k]) is not str:
  99. parameters[k] = json.dumps(parameters[k])
  100. while True:
  101. h = requests.get(url, params=parameters, timeout=1)
  102. # logging.info(h.json())
  103. if h.status_code == 200:
  104. r = h.json()
  105. # logging.info(r)
  106. break
  107. else:
  108. time.sleep(1)
  109. logging.info("爬取失败 等待1s")
  110. logging.info(f"{account_id}采集到创意")
  111. if 'data' in r.keys():
  112. is_video = 0
  113. for i in r['data']['list']:
  114. # logging.info(i)
  115. description=''
  116. if flag == 'MP':
  117. if len(i['adcreative_elements']) > 0:
  118. d = i['adcreative_elements']
  119. title = d.get('title', '')
  120. description = d.get('description', '')
  121. if 'image' in d.keys():
  122. image = d.get('image', '')
  123. elif 'image_list' in d.keys():
  124. image = ','.join(d.get('image_list'))
  125. elif 'video' in d.keys():
  126. image = d['video']
  127. is_video = 1
  128. else:
  129. image = ''
  130. else:
  131. title = image = ''
  132. li.append((
  133. i['adcreative_id'], i['adcreative_name'], i['campaign_id'], image, title,
  134. i.get('promoted_object_type', ''), i.get('page_type', ''),
  135. i['page_spec'].get('page_id', ''), i.get('promoted_object_id', ''),
  136. '', description, 'MP', account_id, dt, is_video
  137. ))
  138. else:
  139. if len(i['adcreative_elements']) > 0:
  140. d = i['adcreative_elements']
  141. if 'image' in d.keys():
  142. image = d['image']
  143. elif 'element_story' in d.keys():
  144. image = ','.join([x['image'] for x in d['element_story']])
  145. else:
  146. image = ''
  147. title = d.get('title', '')
  148. description = d.get('description', '')
  149. else:
  150. image = title = description = ''
  151. li.append(
  152. (
  153. i['adcreative_id'], i['adcreative_name'], i['campaign_id'], image, title,
  154. i.get('promoted_object_type', ''), i.get('page_type', ''),
  155. i['page_spec'].get('page_id', ''), i.get('promoted_object_id', ''),
  156. ','.join(i['site_set']), description, 'GDT', account_id, dt, is_video
  157. )
  158. )
  159. total_page = r['data']['page_info']['total_page']
  160. if total_page > page:
  161. page += 1
  162. else:
  163. break
  164. else:
  165. break
  166. logging.info(f"{account_id}创意分析结束")
  167. logging.info(f"{account_id}获取创意,结束")
  168. if len(li) > 0:
  169. logging.info(f"{account_id}有创意:" + str(len(li)))
  170. sql = 'replace into adcreative_info values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) '
  171. db.quchen_text.executeMany(sql, li)
  172. def images_info_get(account_id, access_token, image_ids): # 获取图片信息
  173. # 接口 https://developers.e.qq.com/docs/api/business_assets/image/images_get?version=1.3
  174. def get_image_info(preview_url, err_num=5):
  175. try:
  176. if not preview_url:
  177. return None
  178. rsp = requests.get(preview_url, timeout=5)
  179. # 1.图片写入内存
  180. im = Image.open(BytesIO(rsp.content))
  181. # 2.获取图片属性
  182. image_format = im.format
  183. # image_size = len(rsp.content)
  184. return image_format
  185. except:
  186. if err_num < 5:
  187. return get_image_info(preview_url, err_num=err_num + 1)
  188. # 1.更新数据
  189. id_content = ','.join([''' '{}' '''.format(i) for i in image_ids.split(',')])
  190. id_content = id_content[:-1]
  191. sql = ''' select image_id from image_info vi
  192. where image_id in ({});'''.format(id_content)
  193. rs = db.quchen_text.getData(sql)
  194. id_all_set = set([i for i in image_ids.split(',') if len(i) > 0])
  195. id_have = set([i[0] for i in rs])
  196. image_ids = id_all_set - id_have
  197. fields = ('image_id', 'width', 'height', 'file_size', 'signature', 'preview_url')
  198. interface = 'images/get'
  199. url = 'https://api.e.qq.com/v1.3/' + interface
  200. page = 1
  201. li = []
  202. for image_id in image_ids:
  203. if len(image_id) < 1:
  204. continue
  205. while True:
  206. common_parameters = {
  207. 'access_token': access_token,
  208. 'timestamp': int(time.time()),
  209. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  210. 'fields': fields
  211. }
  212. parameters = {
  213. "account_id": account_id,
  214. "filtering": [{
  215. "field": "image_id",
  216. "operator": "IN",
  217. "values": [image_id]
  218. }],
  219. "page": page,
  220. "page_size": 100
  221. }
  222. parameters.update(common_parameters)
  223. for k in parameters:
  224. if type(parameters[k]) is not str:
  225. parameters[k] = json.dumps(parameters[k])
  226. while True:
  227. h = requests.get(url, params=parameters)
  228. # logging.info(h.text)
  229. if h.status_code == 200:
  230. r = h.json()
  231. break
  232. else:
  233. time.sleep(1)
  234. logging.info("请求出错 等待1s..")
  235. if 'data' in r.keys():
  236. li.extend(r['data']['list'])
  237. total_page = r['data']['page_info']['total_page']
  238. if total_page > page:
  239. page += 1
  240. else:
  241. break
  242. data = []
  243. for i in li:
  244. image_format = get_image_info(i['preview_url'])
  245. data.append(
  246. (i['image_id'], i['width'], i['height'], i['signature'], i['preview_url'], i['file_size'], image_format))
  247. logging.info(f"{account_id} 有新图片:" + str(li.__len__()))
  248. if li.__len__() > 0:
  249. sql = "insert IGNORE into image_info (image_id,width,height,signature,preview_url,size,type) value (%s,%s,%s,%s,%s,%s,%s)"
  250. db.quchen_text.executeMany(sql, data)
  251. db.close()
  252. def video_info_get(account_id, access_token, image_ids): # 获取视频信息
  253. # 接口 https://developers.e.qq.com/docs/api/business_assets/video/videos_get?version=1.3
  254. def get_video_info(video_url, signature, err_num=0):
  255. try:
  256. if not video_url:
  257. return None, None, None, None
  258. cloud_filepath, metadata_title, video_size, duration, bit_rate, width, height, format = video_processing.change_format(
  259. video_url, signature)
  260. return duration, bit_rate, metadata_title, cloud_filepath
  261. except Exception as e:
  262. logging.error(str(e))
  263. if err_num < 5:
  264. return get_video_info(video_url, signature, err_num=err_num + 1)
  265. else:
  266. return None, None, None, None
  267. # 1.数据库获取,查看是否需要获取对应数据
  268. id_content = ','.join([''' '{}' '''.format(i) for i in image_ids.split(',')])
  269. id_content = id_content[:-1]
  270. sql = ''' select video_id from video_info vi
  271. where video_id in ({});'''.format(id_content)
  272. rs = db.quchen_text.getData(sql)
  273. id_all_set = set([i for i in image_ids.split(',') if len(i) > 0])
  274. id_have = set([i[0] for i in rs])
  275. image_ids = id_all_set - id_have
  276. # 2.获取对应数据
  277. fields = ('video_id', 'width', 'height', 'file_size', 'signature', 'preview_url')
  278. interface = 'videos/get'
  279. url = 'https://api.e.qq.com/v1.3/' + interface
  280. page = 1
  281. li = []
  282. for image_id in image_ids:
  283. if len(image_id) < 1:
  284. continue
  285. while True:
  286. common_parameters = {
  287. 'access_token': access_token,
  288. 'timestamp': int(time.time()),
  289. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  290. 'fields': fields
  291. }
  292. parameters = {
  293. "account_id": account_id,
  294. "filtering": [{
  295. "field": "media_id",
  296. "operator": "IN",
  297. "values": [image_id]
  298. }],
  299. "page": page,
  300. "page_size": 100
  301. }
  302. parameters.update(common_parameters)
  303. for k in parameters:
  304. if type(parameters[k]) is not str:
  305. parameters[k] = json.dumps(parameters[k])
  306. while True:
  307. h = requests.get(url, params=parameters)
  308. # logging.info(h.text)
  309. if h.status_code == 200:
  310. r = h.json()
  311. break
  312. else:
  313. time.sleep(1)
  314. logging.info("请求出错 等待1s..")
  315. if 'data' in r.keys():
  316. li.extend(r['data']['list'])
  317. total_page = r['data']['page_info']['total_page']
  318. if total_page > page:
  319. page += 1
  320. else:
  321. break
  322. data = []
  323. for i in li:
  324. # TODO:signature相同的,不进行再一次运行计算
  325. duration, byte_rate, metadata_title, cloud_filepath = get_video_info(i['preview_url'], i['signature'])
  326. data.append((i['video_id'], i['width'], i['height'],
  327. i['signature'], i['preview_url'], i['file_size'],
  328. 'mp4', byte_rate, duration, metadata_title, cloud_filepath))
  329. logging.info(f"{account_id} 获取到新视频:" + str(li.__len__()))
  330. if li.__len__() > 0:
  331. sql = '''insert IGNORE into video_info (video_id,width,height,
  332. signature,preview_url,size,type,byte_rate,video_length,
  333. video_meta_data,download_path)
  334. value (%s,%s,%s,
  335. %s,%s,%s,%s,%s,%s,%s,%s)'''
  336. db.quchen_text.executeMany(sql, data)
  337. db.close()
  338. def ad_info():
  339. accounts = db.quchen_text.getData("""
  340. select account_id,access_token,name channel,'GDT' type from advertiser_qq where name !='' or name is not null
  341. union
  342. select account_id,access_token,name channel,'MP' type from advertiser_vx where name !='' or name is not null
  343. """)
  344. total_data = []
  345. executor = ThreadPoolExecutor(max_workers=max_workers)
  346. for i in accounts:
  347. # logging.info(i)
  348. account_id = i[0]
  349. access_token = i[1]
  350. type = i[3]
  351. executor.submit(get_ad_info, account_id, access_token, type, total_data)
  352. executor.shutdown()
  353. logging.info(len(total_data))
  354. if len(total_data) > 0:
  355. sql = "replace into ad_info values(%s,%s,%s,%s,%s,%s,%s) "
  356. db.quchen_text.executeMany(sql, total_data)
  357. """获取广告基础信息"""
  358. def get_ad_info(account_id, access_token, flag, ad_ids, dt):
  359. # 接口为 https://developers.e.qq.com/docs/apilist/ads/ad?version=1.3#a3
  360. path = 'ads/get'
  361. fields = ('ad_id', 'ad_name', 'adcreative_id', 'adgroup_id', 'campaign_id')
  362. url = 'https://api.e.qq.com/v1.3/' + path
  363. li = []
  364. page = 1
  365. while True:
  366. parameters = {
  367. 'access_token': access_token,
  368. 'timestamp': int(time.time()),
  369. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  370. 'fields': fields,
  371. "filtering": [{
  372. "field": "ad_id",
  373. "operator": "IN",
  374. "values":
  375. ad_ids.split(',')
  376. }],
  377. "account_id": account_id,
  378. "page": page,
  379. "page_size": 100,
  380. "is_deleted": False
  381. }
  382. for k in parameters:
  383. if type(parameters[k]) is not str:
  384. parameters[k] = json.dumps(parameters[k])
  385. while True:
  386. r = requests.get(url, params=parameters).json()
  387. code = r['code']
  388. if code == 11017:
  389. time.sleep(61)
  390. else:
  391. break
  392. # logging.info(r)
  393. total_page = r['data']['page_info']['total_page']
  394. if page > total_page:
  395. break
  396. else:
  397. page += 1
  398. if r.get("data"):
  399. for i in r['data']['list']:
  400. li.append((str(i['ad_id']), i['ad_name'], i['adcreative_id'], i['campaign_id'], i['adgroup_id'],
  401. account_id, flag, dt))
  402. if li.__len__() > 0:
  403. logging.info(f"{account_id}有广告:" + str(li.__len__()))
  404. sql = "replace into ad_info values(%s,%s,%s,%s,%s,%s,%s,%s) "
  405. db.quchen_text.executeMany(sql, li)
  406. db.close()
  407. def get_ad_cost_day(account_id, access_token, flag, st, et):
  408. if flag == 'MP':
  409. ad_cost_day_mp(account_id, access_token, st, et)
  410. else:
  411. ad_cost_day_gdt(account_id, access_token, st, et)
  412. def ad_cost_day_gdt(account_id, access_token, st, et):
  413. # 接口文档 https://developers.e.qq.com/docs/api/insights/ad_insights/daily_reports_get?version=1.3
  414. url = 'https://api.e.qq.com/v1.3/daily_reports/get'
  415. fields = (
  416. 'date', 'ad_id', 'adgroup_id', 'cost', 'view_count', 'ctr', 'follow_count', 'web_order_count', 'order_amount')
  417. li = []
  418. page = 1
  419. while True:
  420. parameters = {
  421. 'access_token': access_token,
  422. 'timestamp': int(time.time()),
  423. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  424. 'fields': fields,
  425. "account_id": account_id,
  426. "group_by": ['ad_id', 'date'],
  427. "level": 'REPORT_LEVEL_AD',
  428. "page": page,
  429. "page_size": 1000,
  430. "date_range": {
  431. "start_date": st,
  432. "end_date": et
  433. }
  434. }
  435. for k in parameters:
  436. if type(parameters[k]) is not str:
  437. parameters[k] = json.dumps(parameters[k])
  438. while True:
  439. r = requests.get(url, params=parameters)
  440. r = r.json()
  441. # logging.info(r)
  442. code = r['code']
  443. if code == 11017:
  444. time.sleep(61)
  445. else:
  446. break
  447. if r.get("data"):
  448. for i in r['data']['list']:
  449. if i['cost'] > 0:
  450. li.append(
  451. (
  452. i['date'], i['ad_id'], i['adgroup_id'], i['cost'] / 100, i['view_count'],
  453. i['ctr'] * i['view_count'],
  454. i['follow_count'], i['web_order_count'], i['order_amount'] / 100, account_id, 'GDT'
  455. )
  456. )
  457. total_page = r['data']['page_info']['total_page']
  458. if page >= total_page:
  459. break
  460. else:
  461. page += 1
  462. # logging.info(li)
  463. if len(li) > 0:
  464. logging.info(f"{account_id} have ad cost :{len(li)} ")
  465. db.quchen_text.executeMany('replace into ad_cost_day values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)', li)
  466. db.close()
  467. def ad_cost_day_mp(account_id, access_token, st, et):
  468. # 接口文档 https://developers.e.qq.com/docs/api/insights/ad_insights/daily_reports_get?version=1.3
  469. url = 'https://api.e.qq.com/v1.3/daily_reports/get'
  470. fields = ('date', 'ad_id', 'adgroup_id', 'cost', 'view_count', 'valid_click_count', 'official_account_follow_count',
  471. 'order_count', 'order_amount')
  472. li = []
  473. page = 1
  474. while True:
  475. parameters = {
  476. 'access_token': access_token,
  477. 'timestamp': int(time.time()),
  478. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  479. 'fields': fields,
  480. "account_id": account_id,
  481. "level": 'REPORT_LEVEL_AD_WECHAT',
  482. "page": page,
  483. "page_size": 1000,
  484. "date_range": {
  485. "start_date": st,
  486. "end_date": et
  487. }
  488. }
  489. for k in parameters:
  490. if type(parameters[k]) is not str:
  491. parameters[k] = json.dumps(parameters[k])
  492. while True:
  493. r = requests.get(url, params=parameters)
  494. r = r.json()
  495. # logging.info(r['data']['list'])
  496. # import pandas as pd
  497. # logging.info(pd.DataFrame(r['data']['list']))
  498. code = r['code']
  499. if code == 11017:
  500. time.sleep(61)
  501. else:
  502. break
  503. if r.get("data"):
  504. for i in r['data']['list']:
  505. if i['cost'] > 0:
  506. li.append(
  507. (
  508. i['date'], i['ad_id'], i['adgroup_id'],
  509. i['cost'] / 100, i['view_count'],
  510. i['valid_click_count'],
  511. i['official_account_follow_count'],
  512. i['order_count'], i['order_amount'] / 100, account_id,
  513. 'MP'
  514. )
  515. )
  516. total_page = r['data']['page_info']['total_page']
  517. if page >= total_page:
  518. break
  519. else:
  520. page += 1
  521. # logging.info(li)
  522. # exit()
  523. if len(li) > 0:
  524. # TODO:询问一下adgroup_id,campaign_id作用
  525. # 对一下ad的数据
  526. li_df = pandas.DataFrame(li)
  527. li_df_g = li_df.groupby([0, 1, 9, 10])
  528. li_new = []
  529. adgroup_id_dict = {}
  530. for index, group in li_df_g:
  531. adgroup_id_dict[index] = ','.join([str(i) for i in group[2].tolist()])
  532. for index, row in li_df_g.agg('sum').iterrows():
  533. new_row = row.tolist()
  534. new_row = list(index[0:2]) + new_row + list(index[2:])
  535. new_row[2] = adgroup_id_dict[index]
  536. li_new.append(tuple(new_row))
  537. logging.info(f"{account_id} have ad cost :{len(li_new)} ")
  538. db.quchen_text.executeMany('replace into ad_cost_day values(%s,%s,%s,%s,%s,'
  539. '%s,%s,%s,%s,%s,%s)', li_new)
  540. db.close()
  541. def daily_reports_get(access_token, account_id, level, start_date, end_date, fields): # 获取wx投放计划日报数据
  542. interface = 'daily_reports/get'
  543. url = 'https://api.e.qq.com/v1.3/' + interface
  544. common_parameters = {
  545. 'access_token': access_token,
  546. 'timestamp': int(time.time()),
  547. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  548. 'fields': fields
  549. }
  550. parameters = {
  551. "account_id": account_id,
  552. "level": level,
  553. "date_range":
  554. {
  555. "start_date": start_date,
  556. "end_date": end_date
  557. },
  558. "page": 1,
  559. "page_size": 1000,
  560. "fields":
  561. [
  562. ]
  563. }
  564. parameters.update(common_parameters)
  565. for k in parameters:
  566. if type(parameters[k]) is not str:
  567. parameters[k] = json.dumps(parameters[k])
  568. while True:
  569. r = requests.get(url, params=parameters)
  570. if r.status_code == 200:
  571. break
  572. else:
  573. time.sleep(1)
  574. logging.info("请求出错 等待1s..")
  575. return r.json()
  576. def daily_qq_reports_get(access_token, account_id, compaign_id, level, start_date, end_date, fields): # 获取gdt投放计划日报数据
  577. interface = 'daily_reports/get'
  578. url = 'https://api.e.qq.com/v1.1/' + interface
  579. common_parameters = {
  580. 'access_token': access_token,
  581. 'timestamp': int(time.time()),
  582. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  583. 'fields': fields
  584. }
  585. parameters = {
  586. "account_id": account_id,
  587. "filtering":
  588. [
  589. {
  590. "field": "campaign_id",
  591. "operator": "EQUALS",
  592. "values":
  593. [
  594. compaign_id
  595. ]
  596. }
  597. ],
  598. "level": level,
  599. "date_range":
  600. {
  601. "start_date": start_date,
  602. "end_date": end_date
  603. },
  604. "page": 1,
  605. "page_size": 1000,
  606. "fields":
  607. [
  608. ]
  609. }
  610. parameters.update(common_parameters)
  611. for k in parameters:
  612. if type(parameters[k]) is not str:
  613. parameters[k] = json.dumps(parameters[k])
  614. r = requests.get(url, params=parameters)
  615. return r.json()
  616. def mysql_insert_adcreative(data):
  617. db = pymysql.connect('rm-bp1c9cj79872tx3aaro.mysql.rds.aliyuncs.com', 'superc', 'Cc719199895', 'quchen_text')
  618. cursor = db.cursor()
  619. sql = 'replace into adcreative (campaign_id,adcreative_id,adcreative_name,image_id,title,promoted_object_type,page_type,page_id,link_page_id,promoted_object_id) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
  620. try:
  621. cursor.executemany(sql, data)
  622. db.commit()
  623. logging.info('insert [adcreative] ' + str(len(data)))
  624. except:
  625. db.rollback()
  626. logging.info('insert [adcreative] defeat')
  627. if __name__ == '__main__':
  628. account_id = 19206910
  629. access_token = '89079ccc8db047b078a0108e36a7e276'
  630. #
  631. account_id2 = 14709511
  632. access_token2 = 'e87f7b6f860eaeef086ddcc9c3614678'
  633. get_ad_cost_day(account_id, access_token, 'MP', '2021-04-09', '2021-04-09')
  634. # get_adcreatives(account_id,access_token,'MP','3187867673','2021-04-09')