cost_util.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739
  1. import json
  2. import requests
  3. import time
  4. import pymysql
  5. import logging
  6. import pandas
  7. from concurrent.futures import ThreadPoolExecutor
  8. from model.DataBaseUtils import MysqlUtils
  9. from model.ComUtils import *
  10. from model.DateUtils import DateUtils
  11. from PIL import Image
  12. from io import BytesIO
  13. from data_processing import video_processing
  14. du = DateUtils()
  15. db = MysqlUtils()
  16. max_workers = 10
  17. count = []
  18. t = du.get_n_days(-10)
  19. def get_campaign(account_id, access_token, flag, campaign_ids, dt):
  20. path = 'campaigns/get'
  21. fields = ('campaign_id', 'campaign_name', 'configured_status', 'campaign_type', 'promoted_object_type',
  22. 'daily_budget', 'budget_reach_date', 'created_time', 'last_modified_time', 'speed_mode', 'is_deleted')
  23. url = 'https://api.e.qq.com/v1.3/' + path
  24. li = []
  25. page = 1
  26. while True:
  27. parameters = {
  28. 'access_token': access_token,
  29. 'timestamp': int(time.time()),
  30. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  31. 'fields': fields,
  32. "filtering": [{
  33. "field": "campaign_id",
  34. "operator": "IN",
  35. "values":
  36. campaign_ids.split(',')
  37. }],
  38. "account_id": account_id,
  39. "page": page,
  40. "page_size": 100,
  41. "is_deleted": False
  42. }
  43. for k in parameters:
  44. if type(parameters[k]) is not str:
  45. parameters[k] = json.dumps(parameters[k])
  46. while True:
  47. r = requests.get(url, params=parameters).json()
  48. code = r['code']
  49. if code == 11017:
  50. time.sleep(61)
  51. else:
  52. break
  53. # logging.info(r)
  54. total_page = r['data']['page_info']['total_page']
  55. if page > total_page:
  56. break
  57. else:
  58. page += 1
  59. if r.get("data"):
  60. for i in r['data']['list']:
  61. li.append((str(i['campaign_id']), i['campaign_name'], i['configured_status'], i['campaign_type'],
  62. i['promoted_object_type'], i['daily_budget'], i.get('budget_reach_date'),
  63. DateUtils.stamp_to_str(i['created_time']),
  64. DateUtils.stamp_to_str(i['last_modified_time']), i.get('speed_mode'), i.get('is_deleted'),
  65. account_id, flag, dt))
  66. # logging.info(li)
  67. """mp 没有 speed_mode,is_deleted,budget_reach_date"""
  68. if li.__len__() > 0:
  69. logging.info(f"{account_id}有计划:" + str(li.__len__()))
  70. sql = "replace into campaign_info values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
  71. db.quchen_text.executeMany(sql, li)
  72. db.close()
  73. def get_adcreatives(account_id, access_token, flag, adc_ids, dt): # 获取创意
  74. # 接口 https://developers.e.qq.com/docs/api/adsmanagement/adcreatives/adcreatives_get?version=1.3
  75. url = 'https://api.e.qq.com/v1.1/adcreatives/get'
  76. li = []
  77. page = 1
  78. logging.info(f"{account_id}开始获取创意")
  79. while True:
  80. parameters = {
  81. 'access_token': access_token,
  82. 'timestamp': int(time.time()),
  83. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  84. 'fields': ('campaign_id', 'adcreative_id', 'adcreative_name', 'adcreative_elements', 'promoted_object_type',
  85. 'page_type',
  86. 'page_spec', 'link_page_spec', 'universal_link_url', 'promoted_object_id', 'site_set'),
  87. "filtering": [{
  88. "field": "adcreative_id",
  89. "operator": "IN",
  90. "values": adc_ids.split(',')
  91. }],
  92. "account_id": account_id,
  93. "page": page,
  94. "page_size": 100,
  95. "is_deleted": False
  96. }
  97. for k in parameters:
  98. if type(parameters[k]) is not str:
  99. parameters[k] = json.dumps(parameters[k])
  100. while True:
  101. h = requests.get(url, params=parameters, timeout=1)
  102. # logging.info(h.json())
  103. if h.status_code == 200:
  104. r = h.json()
  105. # logging.info(r)
  106. break
  107. else:
  108. time.sleep(1)
  109. logging.info("爬取失败 等待1s")
  110. logging.info(f"{account_id}采集到创意")
  111. if 'data' in r.keys():
  112. is_video = 0
  113. for i in r['data']['list']:
  114. # logging.info(i)
  115. if flag == 'MP':
  116. if len(i['adcreative_elements']) > 0:
  117. d = i['adcreative_elements']
  118. title = d.get('title', '')
  119. description = d.get('description', '')
  120. if 'image' in d.keys():
  121. image = d.get('image', '')
  122. elif 'image_list' in d.keys():
  123. image = ','.join(d.get('image_list'))
  124. elif 'video' in d.keys():
  125. image = d['video']
  126. is_video = 1
  127. else:
  128. image = ''
  129. else:
  130. title = image = ''
  131. li.append((
  132. i['adcreative_id'], i['adcreative_name'], i['campaign_id'], image, title,
  133. i.get('promoted_object_type', ''), i.get('page_type', ''),
  134. i['page_spec'].get('page_id', ''), i.get('promoted_object_id', ''),
  135. '', description, 'MP', account_id, dt, is_video
  136. ))
  137. else:
  138. if len(i['adcreative_elements']) > 0:
  139. d = i['adcreative_elements']
  140. if 'image' in d.keys():
  141. image = d['image']
  142. elif 'element_story' in d.keys():
  143. image = ','.join([x['image'] for x in d['element_story']])
  144. else:
  145. image = ''
  146. title = d.get('title', '')
  147. description = d.get('description', '')
  148. else:
  149. image = title = description = ''
  150. li.append(
  151. (
  152. i['adcreative_id'], i['adcreative_name'], i['campaign_id'], image, title,
  153. i.get('promoted_object_type', ''), i.get('page_type', ''),
  154. i['page_spec'].get('page_id', ''), i.get('promoted_object_id', ''),
  155. ','.join(i['site_set']), description, 'GDT', account_id, dt, is_video
  156. )
  157. )
  158. total_page = r['data']['page_info']['total_page']
  159. if total_page > page:
  160. page += 1
  161. else:
  162. break
  163. else:
  164. break
  165. logging.info(f"{account_id}创意分析结束")
  166. logging.info(f"{account_id}获取创意,结束")
  167. if len(li) > 0:
  168. logging.info(f"{account_id}有创意:" + str(len(li)))
  169. sql = 'replace into adcreative_info values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) '
  170. db.quchen_text.executeMany(sql, li)
  171. def images_info_get(account_id, access_token, image_ids): # 获取图片信息
  172. # 接口 https://developers.e.qq.com/docs/api/business_assets/image/images_get?version=1.3
  173. def get_image_info(preview_url, err_num=5):
  174. try:
  175. if not preview_url:
  176. return None
  177. rsp = requests.get(preview_url, timeout=5)
  178. # 1.图片写入内存
  179. im = Image.open(BytesIO(rsp.content))
  180. # 2.获取图片属性
  181. image_format = im.format
  182. # image_size = len(rsp.content)
  183. return image_format
  184. except:
  185. if err_num < 5:
  186. return get_image_info(preview_url, err_num=err_num + 1)
  187. # 1.更新数据
  188. id_content = ','.join([''' '{}' '''.format(i) for i in image_ids.split(',')])
  189. id_content = id_content[:-1]
  190. sql = ''' select image_id from image_info vi
  191. where image_id in ({});'''.format(id_content)
  192. rs = db.quchen_text.getData(sql)
  193. id_all_set = set([i for i in image_ids.split(',') if len(i) > 0])
  194. id_have = set([i[0] for i in rs])
  195. image_ids = id_all_set - id_have
  196. fields = ('image_id', 'width', 'height', 'file_size', 'signature', 'preview_url')
  197. interface = 'images/get'
  198. url = 'https://api.e.qq.com/v1.3/' + interface
  199. page = 1
  200. li = []
  201. for image_id in image_ids:
  202. if len(image_id) < 1:
  203. continue
  204. while True:
  205. common_parameters = {
  206. 'access_token': access_token,
  207. 'timestamp': int(time.time()),
  208. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  209. 'fields': fields
  210. }
  211. parameters = {
  212. "account_id": account_id,
  213. "filtering": [{
  214. "field": "image_id",
  215. "operator": "IN",
  216. "values": [image_id]
  217. }],
  218. "page": page,
  219. "page_size": 100
  220. }
  221. parameters.update(common_parameters)
  222. for k in parameters:
  223. if type(parameters[k]) is not str:
  224. parameters[k] = json.dumps(parameters[k])
  225. while True:
  226. h = requests.get(url, params=parameters)
  227. # logging.info(h.text)
  228. if h.status_code == 200:
  229. r = h.json()
  230. break
  231. else:
  232. time.sleep(1)
  233. logging.info("请求出错 等待1s..")
  234. if 'data' in r.keys():
  235. li.extend(r['data']['list'])
  236. total_page = r['data']['page_info']['total_page']
  237. if total_page > page:
  238. page += 1
  239. else:
  240. break
  241. data = []
  242. for i in li:
  243. image_format = get_image_info(i['preview_url'])
  244. data.append(
  245. (i['image_id'], i['width'], i['height'], i['signature'], i['preview_url'], i['file_size'], image_format))
  246. logging.info(f"{account_id} 有新图片:" + str(li.__len__()))
  247. if li.__len__() > 0:
  248. sql = "insert IGNORE into image_info (image_id,width,height,signature,preview_url,size,type) value (%s,%s,%s,%s,%s,%s,%s)"
  249. db.quchen_text.executeMany(sql, data)
  250. db.close()
  251. def video_info_get(account_id, access_token, image_ids): # 获取视频信息
  252. # 接口 https://developers.e.qq.com/docs/api/business_assets/video/videos_get?version=1.3
  253. def get_video_info(video_url, signature, err_num=0):
  254. try:
  255. if not video_url:
  256. return None, None, None, None
  257. cloud_filepath, metadata_title, video_size, duration, bit_rate, width, height, format = video_processing.change_format(
  258. video_url, signature)
  259. return duration, bit_rate, metadata_title, cloud_filepath
  260. except Exception as e:
  261. logging.error(str(e))
  262. if err_num < 5:
  263. return get_video_info(video_url, signature, err_num=err_num + 1)
  264. else:
  265. return None, None, None, None
  266. # 1.数据库获取,查看是否需要获取对应数据
  267. id_content = ','.join([''' '{}' '''.format(i) for i in image_ids.split(',')])
  268. id_content = id_content[:-1]
  269. sql = ''' select video_id from video_info vi
  270. where video_id in ({});'''.format(id_content)
  271. rs = db.quchen_text.getData(sql)
  272. id_all_set = set([i for i in image_ids.split(',') if len(i) > 0])
  273. id_have = set([i[0] for i in rs])
  274. image_ids = id_all_set - id_have
  275. # 2.获取对应数据
  276. fields = ('video_id', 'width', 'height', 'file_size', 'signature', 'preview_url')
  277. interface = 'videos/get'
  278. url = 'https://api.e.qq.com/v1.3/' + interface
  279. page = 1
  280. li = []
  281. for image_id in image_ids:
  282. if len(image_id) < 1:
  283. continue
  284. while True:
  285. common_parameters = {
  286. 'access_token': access_token,
  287. 'timestamp': int(time.time()),
  288. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  289. 'fields': fields
  290. }
  291. parameters = {
  292. "account_id": account_id,
  293. "filtering": [{
  294. "field": "media_id",
  295. "operator": "IN",
  296. "values": [image_id]
  297. }],
  298. "page": page,
  299. "page_size": 100
  300. }
  301. parameters.update(common_parameters)
  302. for k in parameters:
  303. if type(parameters[k]) is not str:
  304. parameters[k] = json.dumps(parameters[k])
  305. while True:
  306. h = requests.get(url, params=parameters)
  307. # logging.info(h.text)
  308. if h.status_code == 200:
  309. r = h.json()
  310. break
  311. else:
  312. time.sleep(1)
  313. logging.info("请求出错 等待1s..")
  314. if 'data' in r.keys():
  315. li.extend(r['data']['list'])
  316. total_page = r['data']['page_info']['total_page']
  317. if total_page > page:
  318. page += 1
  319. else:
  320. break
  321. data = []
  322. for i in li:
  323. # TODO:signature相同的,不进行再一次运行计算
  324. duration, byte_rate, metadata_title, cloud_filepath = get_video_info(i['preview_url'], i['signature'])
  325. data.append((i['video_id'], i['width'], i['height'],
  326. i['signature'], i['preview_url'], i['file_size'],
  327. 'mp4', byte_rate, duration, metadata_title, cloud_filepath))
  328. logging.info(f"{account_id} 获取到新视频:" + str(li.__len__()))
  329. if li.__len__() > 0:
  330. sql = '''insert IGNORE into video_info (video_id,width,height,
  331. signature,preview_url,size,type,byte_rate,video_length,
  332. video_meta_data,download_path)
  333. value (%s,%s,%s,
  334. %s,%s,%s,%s,%s,%s,%s,%s)'''
  335. db.quchen_text.executeMany(sql, data)
  336. db.close()
  337. def ad_info():
  338. accounts = db.quchen_text.getData("""
  339. select account_id,access_token,name channel,'GDT' type from advertiser_qq where name !='' or name is not null
  340. union
  341. select account_id,access_token,name channel,'MP' type from advertiser_vx where name !='' or name is not null
  342. """)
  343. total_data = []
  344. executor = ThreadPoolExecutor(max_workers=max_workers)
  345. for i in accounts:
  346. # logging.info(i)
  347. account_id = i[0]
  348. access_token = i[1]
  349. type = i[3]
  350. executor.submit(get_ad_info, account_id, access_token, type, total_data)
  351. executor.shutdown()
  352. logging.info(len(total_data))
  353. if len(total_data) > 0:
  354. sql = "replace into ad_info values(%s,%s,%s,%s,%s,%s,%s) "
  355. db.quchen_text.executeMany(sql, total_data)
  356. """获取广告基础信息"""
  357. def get_ad_info(account_id, access_token, flag, ad_ids, dt):
  358. # 接口为 https://developers.e.qq.com/docs/apilist/ads/ad?version=1.3#a3
  359. path = 'ads/get'
  360. fields = ('ad_id', 'ad_name', 'adcreative_id', 'adgroup_id', 'campaign_id')
  361. url = 'https://api.e.qq.com/v1.3/' + path
  362. li = []
  363. page = 1
  364. while True:
  365. parameters = {
  366. 'access_token': access_token,
  367. 'timestamp': int(time.time()),
  368. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  369. 'fields': fields,
  370. "filtering": [{
  371. "field": "ad_id",
  372. "operator": "IN",
  373. "values":
  374. ad_ids.split(',')
  375. }],
  376. "account_id": account_id,
  377. "page": page,
  378. "page_size": 100,
  379. "is_deleted": False
  380. }
  381. for k in parameters:
  382. if type(parameters[k]) is not str:
  383. parameters[k] = json.dumps(parameters[k])
  384. while True:
  385. r = requests.get(url, params=parameters).json()
  386. code = r['code']
  387. if code == 11017:
  388. time.sleep(61)
  389. else:
  390. break
  391. # logging.info(r)
  392. total_page = r['data']['page_info']['total_page']
  393. if page > total_page:
  394. break
  395. else:
  396. page += 1
  397. if r.get("data"):
  398. for i in r['data']['list']:
  399. li.append((str(i['ad_id']), i['ad_name'], i['adcreative_id'], i['campaign_id'], i['adgroup_id'],
  400. account_id, flag, dt))
  401. if li.__len__() > 0:
  402. logging.info(f"{account_id}有广告:" + str(li.__len__()))
  403. sql = "replace into ad_info values(%s,%s,%s,%s,%s,%s,%s,%s) "
  404. db.quchen_text.executeMany(sql, li)
  405. db.close()
  406. def get_ad_cost_day(account_id, access_token, flag, st, et):
  407. if flag == 'MP':
  408. ad_cost_day_mp(account_id, access_token, st, et)
  409. else:
  410. ad_cost_day_gdt(account_id, access_token, st, et)
  411. def ad_cost_day_gdt(account_id, access_token, st, et):
  412. # 接口文档 https://developers.e.qq.com/docs/api/insights/ad_insights/daily_reports_get?version=1.3
  413. url = 'https://api.e.qq.com/v1.3/daily_reports/get'
  414. fields = (
  415. 'date', 'ad_id', 'adgroup_id', 'cost', 'view_count', 'ctr', 'follow_count', 'web_order_count', 'order_amount')
  416. li = []
  417. page = 1
  418. while True:
  419. parameters = {
  420. 'access_token': access_token,
  421. 'timestamp': int(time.time()),
  422. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  423. 'fields': fields,
  424. "account_id": account_id,
  425. "group_by": ['ad_id', 'date'],
  426. "level": 'REPORT_LEVEL_AD',
  427. "page": page,
  428. "page_size": 1000,
  429. "date_range": {
  430. "start_date": st,
  431. "end_date": et
  432. }
  433. }
  434. for k in parameters:
  435. if type(parameters[k]) is not str:
  436. parameters[k] = json.dumps(parameters[k])
  437. while True:
  438. r = requests.get(url, params=parameters)
  439. r = r.json()
  440. # logging.info(r)
  441. code = r['code']
  442. if code == 11017:
  443. time.sleep(61)
  444. else:
  445. break
  446. if r.get("data"):
  447. for i in r['data']['list']:
  448. if i['cost'] > 0:
  449. li.append(
  450. (
  451. i['date'], i['ad_id'], i['adgroup_id'], i['cost'] / 100, i['view_count'],
  452. i['ctr'] * i['view_count'],
  453. i['follow_count'], i['web_order_count'], i['order_amount'] / 100, account_id, 'GDT'
  454. )
  455. )
  456. total_page = r['data']['page_info']['total_page']
  457. if page >= total_page:
  458. break
  459. else:
  460. page += 1
  461. # logging.info(li)
  462. if len(li) > 0:
  463. logging.info(f"{account_id} have ad cost :{len(li)} ")
  464. db.quchen_text.executeMany('replace into ad_cost_day values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)', li)
  465. db.close()
  466. def ad_cost_day_mp(account_id, access_token, st, et):
  467. # 接口文档 https://developers.e.qq.com/docs/api/insights/ad_insights/daily_reports_get?version=1.3
  468. url = 'https://api.e.qq.com/v1.3/daily_reports/get'
  469. fields = ('date', 'ad_id', 'adgroup_id', 'cost', 'view_count', 'valid_click_count', 'official_account_follow_count',
  470. 'order_count', 'order_amount')
  471. li = []
  472. page = 1
  473. while True:
  474. parameters = {
  475. 'access_token': access_token,
  476. 'timestamp': int(time.time()),
  477. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  478. 'fields': fields,
  479. "account_id": account_id,
  480. "level": 'REPORT_LEVEL_AD_WECHAT',
  481. "page": page,
  482. "page_size": 1000,
  483. "date_range": {
  484. "start_date": st,
  485. "end_date": et
  486. }
  487. }
  488. for k in parameters:
  489. if type(parameters[k]) is not str:
  490. parameters[k] = json.dumps(parameters[k])
  491. while True:
  492. r = requests.get(url, params=parameters)
  493. r = r.json()
  494. # logging.info(r['data']['list'])
  495. # import pandas as pd
  496. # logging.info(pd.DataFrame(r['data']['list']))
  497. code = r['code']
  498. if code == 11017:
  499. time.sleep(61)
  500. else:
  501. break
  502. if r.get("data"):
  503. for i in r['data']['list']:
  504. if i['cost'] > 0:
  505. li.append(
  506. (
  507. i['date'], i['ad_id'], i['adgroup_id'],
  508. i['cost'] / 100, i['view_count'],
  509. i['valid_click_count'],
  510. i['official_account_follow_count'],
  511. i['order_count'], i['order_amount'] / 100, account_id,
  512. 'MP'
  513. )
  514. )
  515. total_page = r['data']['page_info']['total_page']
  516. if page >= total_page:
  517. break
  518. else:
  519. page += 1
  520. # logging.info(li)
  521. # exit()
  522. if len(li) > 0:
  523. # TODO:询问一下adgroup_id,campaign_id作用
  524. # 对一下ad的数据
  525. li_df = pandas.DataFrame(li)
  526. li_df_g = li_df.groupby([0, 1, 9, 10])
  527. li_new = []
  528. adgroup_id_dict = {}
  529. for index, group in li_df_g:
  530. adgroup_id_dict[index] = ','.join([str(i) for i in group[2].tolist()])
  531. for index, row in li_df_g.agg('sum').iterrows():
  532. new_row = row.tolist()
  533. new_row = list(index[0:2]) + new_row + list(index[2:])
  534. new_row[2] = adgroup_id_dict[index]
  535. li_new.append(tuple(new_row))
  536. logging.info(f"{account_id} have ad cost :{len(li_new)} ")
  537. db.quchen_text.executeMany('replace into ad_cost_day values(%s,%s,%s,%s,%s,'
  538. '%s,%s,%s,%s,%s,%s)', li_new)
  539. db.close()
  540. def daily_reports_get(access_token, account_id, level, start_date, end_date, fields): # 获取wx投放计划日报数据
  541. interface = 'daily_reports/get'
  542. url = 'https://api.e.qq.com/v1.3/' + interface
  543. common_parameters = {
  544. 'access_token': access_token,
  545. 'timestamp': int(time.time()),
  546. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  547. 'fields': fields
  548. }
  549. parameters = {
  550. "account_id": account_id,
  551. "level": level,
  552. "date_range":
  553. {
  554. "start_date": start_date,
  555. "end_date": end_date
  556. },
  557. "page": 1,
  558. "page_size": 1000,
  559. "fields":
  560. [
  561. ]
  562. }
  563. parameters.update(common_parameters)
  564. for k in parameters:
  565. if type(parameters[k]) is not str:
  566. parameters[k] = json.dumps(parameters[k])
  567. while True:
  568. r = requests.get(url, params=parameters)
  569. if r.status_code == 200:
  570. break
  571. else:
  572. time.sleep(1)
  573. logging.info("请求出错 等待1s..")
  574. return r.json()
  575. def daily_qq_reports_get(access_token, account_id, compaign_id, level, start_date, end_date, fields): # 获取gdt投放计划日报数据
  576. interface = 'daily_reports/get'
  577. url = 'https://api.e.qq.com/v1.1/' + interface
  578. common_parameters = {
  579. 'access_token': access_token,
  580. 'timestamp': int(time.time()),
  581. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  582. 'fields': fields
  583. }
  584. parameters = {
  585. "account_id": account_id,
  586. "filtering":
  587. [
  588. {
  589. "field": "campaign_id",
  590. "operator": "EQUALS",
  591. "values":
  592. [
  593. compaign_id
  594. ]
  595. }
  596. ],
  597. "level": level,
  598. "date_range":
  599. {
  600. "start_date": start_date,
  601. "end_date": end_date
  602. },
  603. "page": 1,
  604. "page_size": 1000,
  605. "fields":
  606. [
  607. ]
  608. }
  609. parameters.update(common_parameters)
  610. for k in parameters:
  611. if type(parameters[k]) is not str:
  612. parameters[k] = json.dumps(parameters[k])
  613. r = requests.get(url, params=parameters)
  614. return r.json()
  615. def mysql_insert_adcreative(data):
  616. db = pymysql.connect('rm-bp1c9cj79872tx3aaro.mysql.rds.aliyuncs.com', 'superc', 'Cc719199895', 'quchen_text')
  617. cursor = db.cursor()
  618. sql = 'replace into adcreative (campaign_id,adcreative_id,adcreative_name,image_id,title,promoted_object_type,page_type,page_id,link_page_id,promoted_object_id) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
  619. try:
  620. cursor.executemany(sql, data)
  621. db.commit()
  622. logging.info('insert [adcreative] ' + str(len(data)))
  623. except:
  624. db.rollback()
  625. logging.info('insert [adcreative] defeat')
  626. if __name__ == '__main__':
  627. account_id = 19206910
  628. access_token = '89079ccc8db047b078a0108e36a7e276'
  629. #
  630. account_id2 = 14709511
  631. access_token2 = 'e87f7b6f860eaeef086ddcc9c3614678'
  632. get_ad_cost_day(account_id, access_token, 'MP', '2021-04-09', '2021-04-09')
  633. # get_adcreatives(account_id,access_token,'MP','3187867673','2021-04-09')