cost_util.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735
  1. import json
  2. import requests
  3. import time
  4. import pymysql
  5. import logging
  6. import pandas
  7. from concurrent.futures import ThreadPoolExecutor
  8. from model.DataBaseUtils import MysqlUtils
  9. from model.ComUtils import *
  10. from model.DateUtils import DateUtils
  11. from PIL import Image
  12. from io import BytesIO
  13. import cv2
  14. du = DateUtils()
  15. db = MysqlUtils()
  16. max_workers = 10
  17. count = []
  18. t = du.get_n_days(-10)
  19. def get_campaign(account_id, access_token, flag, campaign_ids, dt):
  20. path = 'campaigns/get'
  21. fields = ('campaign_id', 'campaign_name', 'configured_status', 'campaign_type', 'promoted_object_type',
  22. 'daily_budget', 'budget_reach_date', 'created_time', 'last_modified_time', 'speed_mode', 'is_deleted')
  23. url = 'https://api.e.qq.com/v1.3/' + path
  24. li = []
  25. page = 1
  26. while True:
  27. parameters = {
  28. 'access_token': access_token,
  29. 'timestamp': int(time.time()),
  30. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  31. 'fields': fields,
  32. "filtering": [{
  33. "field": "campaign_id",
  34. "operator": "IN",
  35. "values":
  36. campaign_ids.split(',')
  37. }],
  38. "account_id": account_id,
  39. "page": page,
  40. "page_size": 100,
  41. "is_deleted": False
  42. }
  43. for k in parameters:
  44. if type(parameters[k]) is not str:
  45. parameters[k] = json.dumps(parameters[k])
  46. while True:
  47. r = requests.get(url, params=parameters).json()
  48. code = r['code']
  49. if code == 11017:
  50. time.sleep(61)
  51. else:
  52. break
  53. # logging.info(r)
  54. total_page = r['data']['page_info']['total_page']
  55. if page > total_page:
  56. break
  57. else:
  58. page += 1
  59. if r.get("data"):
  60. for i in r['data']['list']:
  61. li.append((str(i['campaign_id']), i['campaign_name'], i['configured_status'], i['campaign_type'],
  62. i['promoted_object_type'], i['daily_budget'], i.get('budget_reach_date'),
  63. DateUtils.stamp_to_str(i['created_time']),
  64. DateUtils.stamp_to_str(i['last_modified_time']), i.get('speed_mode'), i.get('is_deleted'),
  65. account_id, flag, dt))
  66. # logging.info(li)
  67. """mp 没有 speed_mode,is_deleted,budget_reach_date"""
  68. if li.__len__() > 0:
  69. logging.info(f"{account_id}有计划:" + str(li.__len__()))
  70. sql = "replace into campaign_info values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
  71. db.quchen_text.executeMany(sql, li)
  72. db.close()
  73. def get_adcreatives(account_id, access_token, flag, adc_ids, dt): # 获取创意
  74. # 接口 https://developers.e.qq.com/docs/api/adsmanagement/adcreatives/adcreatives_get?version=1.3
  75. url = 'https://api.e.qq.com/v1.1/adcreatives/get'
  76. li = []
  77. page = 1
  78. logging.info(f"{account_id}开始获取创意")
  79. while True:
  80. parameters = {
  81. 'access_token': access_token,
  82. 'timestamp': int(time.time()),
  83. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  84. 'fields': ('campaign_id', 'adcreative_id', 'adcreative_name', 'adcreative_elements', 'promoted_object_type',
  85. 'page_type',
  86. 'page_spec', 'link_page_spec', 'universal_link_url', 'promoted_object_id', 'site_set'),
  87. "filtering": [{
  88. "field": "adcreative_id",
  89. "operator": "IN",
  90. "values": adc_ids.split(',')
  91. }],
  92. "account_id": account_id,
  93. "page": page,
  94. "page_size": 100,
  95. "is_deleted": False
  96. }
  97. for k in parameters:
  98. if type(parameters[k]) is not str:
  99. parameters[k] = json.dumps(parameters[k])
  100. while True:
  101. h = requests.get(url, params=parameters, timeout=1)
  102. # logging.info(h.json())
  103. if h.status_code == 200:
  104. r = h.json()
  105. # logging.info(r)
  106. break
  107. else:
  108. time.sleep(1)
  109. logging.info("爬取失败 等待1s")
  110. logging.info(f"{account_id}采集到创意")
  111. if 'data' in r.keys():
  112. is_video = 0
  113. for i in r['data']['list']:
  114. # logging.info(i)
  115. if flag == 'MP':
  116. if len(i['adcreative_elements']) > 0:
  117. d = i['adcreative_elements']
  118. title = d.get('title', '')
  119. description = d.get('description', '')
  120. if 'image' in d.keys():
  121. image = d.get('image', '')
  122. elif 'image_list' in d.keys():
  123. image = ','.join(d.get('image_list'))
  124. elif 'video' in d.keys():
  125. image = d['video']
  126. is_video = 1
  127. else:
  128. image = ''
  129. else:
  130. title = image = ''
  131. li.append((
  132. i['adcreative_id'], i['adcreative_name'], i['campaign_id'], image, title,
  133. i.get('promoted_object_type', ''), i.get('page_type', ''),
  134. i['page_spec'].get('page_id', ''), i.get('promoted_object_id', ''),
  135. '', description, 'MP', account_id, dt, is_video
  136. ))
  137. else:
  138. if len(i['adcreative_elements']) > 0:
  139. d = i['adcreative_elements']
  140. if 'image' in d.keys():
  141. image = d['image']
  142. elif 'element_story' in d.keys():
  143. image = ','.join([x['image'] for x in d['element_story']])
  144. else:
  145. image = ''
  146. title = d.get('title', '')
  147. description = d.get('description', '')
  148. else:
  149. image = title = description = ''
  150. li.append(
  151. (
  152. i['adcreative_id'], i['adcreative_name'], i['campaign_id'], image, title,
  153. i.get('promoted_object_type', ''), i.get('page_type', ''),
  154. i['page_spec'].get('page_id', ''), i.get('promoted_object_id', ''),
  155. ','.join(i['site_set']), description, 'GDT', account_id, dt, is_video
  156. )
  157. )
  158. total_page = r['data']['page_info']['total_page']
  159. if total_page > page:
  160. page += 1
  161. else:
  162. break
  163. else:
  164. break
  165. logging.info(f"{account_id}创意分析结束")
  166. logging.info(f"{account_id}获取创意,结束")
  167. if len(li) > 0:
  168. logging.info(f"{account_id}有创意:" + str(len(li)))
  169. sql = 'replace into adcreative_info values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) '
  170. db.quchen_text.executeMany(sql, li)
  171. def images_info_get(account_id, access_token, image_ids): # 获取图片信息
  172. # 接口 https://developers.e.qq.com/docs/api/business_assets/image/images_get?version=1.3
  173. def get_image_info(preview_url, err_num=5):
  174. try:
  175. if not preview_url:
  176. return None
  177. rsp = requests.get(preview_url, timeout=5)
  178. # 1.图片写入内存
  179. im = Image.open(BytesIO(rsp.content))
  180. # 2.获取图片属性
  181. image_format = im.format
  182. # image_size = len(rsp.content)
  183. return image_format
  184. except:
  185. if err_num < 5:
  186. return get_image_info(preview_url, err_num=err_num + 1)
  187. # 1.更新数据
  188. id_content = ','.join([''' '{}' '''.format(i) for i in image_ids.split(',')])
  189. id_content = id_content[:-1]
  190. sql = ''' select image_id from image_info vi
  191. where image_id in ({});'''.format(id_content)
  192. rs = db.quchen_text.getData(sql)
  193. id_all_set = set([i for i in image_ids.split(',') if len(i) > 0])
  194. id_have = set([i[0] for i in rs])
  195. image_ids = id_all_set - id_have
  196. fields = ('image_id', 'width', 'height', 'file_size', 'signature', 'preview_url')
  197. interface = 'images/get'
  198. url = 'https://api.e.qq.com/v1.3/' + interface
  199. page = 1
  200. li = []
  201. for image_id in image_ids:
  202. if len(image_id) < 1:
  203. continue
  204. while True:
  205. common_parameters = {
  206. 'access_token': access_token,
  207. 'timestamp': int(time.time()),
  208. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  209. 'fields': fields
  210. }
  211. parameters = {
  212. "account_id": account_id,
  213. "filtering": [{
  214. "field": "image_id",
  215. "operator": "IN",
  216. "values": [image_id]
  217. }],
  218. "page": page,
  219. "page_size": 100
  220. }
  221. parameters.update(common_parameters)
  222. for k in parameters:
  223. if type(parameters[k]) is not str:
  224. parameters[k] = json.dumps(parameters[k])
  225. while True:
  226. h = requests.get(url, params=parameters)
  227. # logging.info(h.text)
  228. if h.status_code == 200:
  229. r = h.json()
  230. break
  231. else:
  232. time.sleep(1)
  233. logging.info("请求出错 等待1s..")
  234. if 'data' in r.keys():
  235. li.extend(r['data']['list'])
  236. total_page = r['data']['page_info']['total_page']
  237. if total_page > page:
  238. page += 1
  239. else:
  240. break
  241. data = []
  242. for i in li:
  243. image_format = get_image_info(i['preview_url'])
  244. data.append(
  245. (i['image_id'], i['width'], i['height'], i['signature'], i['preview_url'], i['file_size'], image_format))
  246. logging.info(f"{account_id} 有新图片:" + str(li.__len__()))
  247. if li.__len__() > 0:
  248. sql = "insert IGNORE into image_info (image_id,width,height,signature,preview_url,size,type) value (%s,%s,%s,%s,%s,%s,%s)"
  249. db.quchen_text.executeMany(sql, data)
  250. db.close()
  251. def video_info_get(account_id, access_token, image_ids): # 获取视频信息
  252. # 接口 https://developers.e.qq.com/docs/api/business_assets/video/videos_get?version=1.3
  253. def get_video_info(video_url, err_num=0):
  254. try:
  255. if video_url:
  256. return None, None
  257. rsp = requests.get(video_url)
  258. with open('/tmp/aa.mp4', 'wb') as f:
  259. f.write(rsp.content)
  260. video_size = len(rsp.content)
  261. cap = cv2.VideoCapture('/tmp/aa.mp4') # 视频流
  262. if cap.isOpened():
  263. rate = cap.get(5)
  264. frame_num = cap.get(7)
  265. duration = frame_num / rate
  266. byte_rate = (video_size / (duration / 8))
  267. return duration, byte_rate
  268. except:
  269. if err_num < 5:
  270. return get_video_info(video_url, err_num=err_num + 1)
  271. return None, None
  272. # 1.数据库获取,查看是否需要获取对应数据
  273. id_content = ','.join([''' '{}' '''.format(i) for i in image_ids.split(',')])
  274. id_content = id_content[:-1]
  275. sql = ''' select video_id from video_info vi
  276. where video_id in ({});'''.format(id_content)
  277. rs = db.quchen_text.getData(sql)
  278. id_all_set = set([i for i in image_ids.split(',') if len(i) > 0])
  279. id_have = set([i[0] for i in rs])
  280. image_ids = id_all_set - id_have
  281. # 2.获取对应数据
  282. fields = ('video_id', 'width', 'height', 'file_size', 'signature', 'preview_url')
  283. interface = 'videos/get'
  284. url = 'https://api.e.qq.com/v1.3/' + interface
  285. page = 1
  286. li = []
  287. for image_id in image_ids:
  288. if len(image_id) < 1:
  289. continue
  290. while True:
  291. common_parameters = {
  292. 'access_token': access_token,
  293. 'timestamp': int(time.time()),
  294. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  295. 'fields': fields
  296. }
  297. parameters = {
  298. "account_id": account_id,
  299. "filtering": [{
  300. "field": "media_id",
  301. "operator": "IN",
  302. "values": [image_id]
  303. }],
  304. "page": page,
  305. "page_size": 100
  306. }
  307. parameters.update(common_parameters)
  308. for k in parameters:
  309. if type(parameters[k]) is not str:
  310. parameters[k] = json.dumps(parameters[k])
  311. while True:
  312. h = requests.get(url, params=parameters)
  313. # logging.info(h.text)
  314. if h.status_code == 200:
  315. r = h.json()
  316. break
  317. else:
  318. time.sleep(1)
  319. logging.info("请求出错 等待1s..")
  320. if 'data' in r.keys():
  321. li.extend(r['data']['list'])
  322. total_page = r['data']['page_info']['total_page']
  323. if total_page > page:
  324. page += 1
  325. else:
  326. break
  327. data = []
  328. for i in li:
  329. duration, byte_rate = get_video_info(i['preview_url'])
  330. data.append((i['video_id'], i['width'], i['height'],
  331. i['signature'], i['preview_url'], i['file_size'],
  332. 'mp4', byte_rate, duration))
  333. logging.info(f"{account_id} 获取到新视频:" + str(li.__len__()))
  334. if li.__len__() > 0:
  335. sql = '''insert IGNORE into video_info (video_id,width,height,
  336. signature,preview_url,size,type,byte_rate,video_length)
  337. value (%s,%s,%s,
  338. %s,%s,%s,%s,%s,%s)'''
  339. db.quchen_text.executeMany(sql, data)
  340. db.close()
  341. def ad_info():
  342. accounts = db.quchen_text.getData("""
  343. select account_id,access_token,name channel,'GDT' type from advertiser_qq where name !='' or name is not null
  344. union
  345. select account_id,access_token,name channel,'MP' type from advertiser_vx where name !='' or name is not null
  346. """)
  347. total_data = []
  348. executor = ThreadPoolExecutor(max_workers=max_workers)
  349. for i in accounts:
  350. # logging.info(i)
  351. account_id = i[0]
  352. access_token = i[1]
  353. type = i[3]
  354. executor.submit(get_ad_info, account_id, access_token, type, total_data)
  355. executor.shutdown()
  356. logging.info(len(total_data))
  357. if len(total_data) > 0:
  358. sql = "replace into ad_info values(%s,%s,%s,%s,%s,%s,%s) "
  359. db.quchen_text.executeMany(sql, total_data)
  360. """获取广告基础信息"""
  361. def get_ad_info(account_id, access_token, flag, ad_ids, dt):
  362. # 接口为 https://developers.e.qq.com/docs/apilist/ads/ad?version=1.3#a3
  363. path = 'ads/get'
  364. fields = ('ad_id', 'ad_name', 'adcreative_id', 'adgroup_id', 'campaign_id')
  365. url = 'https://api.e.qq.com/v1.3/' + path
  366. li = []
  367. page = 1
  368. while True:
  369. parameters = {
  370. 'access_token': access_token,
  371. 'timestamp': int(time.time()),
  372. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  373. 'fields': fields,
  374. "filtering": [{
  375. "field": "ad_id",
  376. "operator": "IN",
  377. "values":
  378. ad_ids.split(',')
  379. }],
  380. "account_id": account_id,
  381. "page": page,
  382. "page_size": 100,
  383. "is_deleted": False
  384. }
  385. for k in parameters:
  386. if type(parameters[k]) is not str:
  387. parameters[k] = json.dumps(parameters[k])
  388. while True:
  389. r = requests.get(url, params=parameters).json()
  390. code = r['code']
  391. if code == 11017:
  392. time.sleep(61)
  393. else:
  394. break
  395. # logging.info(r)
  396. total_page = r['data']['page_info']['total_page']
  397. if page > total_page:
  398. break
  399. else:
  400. page += 1
  401. if r.get("data"):
  402. for i in r['data']['list']:
  403. li.append((str(i['ad_id']), i['ad_name'], i['adcreative_id'], i['campaign_id'], i['adgroup_id'],
  404. account_id, flag, dt))
  405. if li.__len__() > 0:
  406. logging.info(f"{account_id}有广告:" + str(li.__len__()))
  407. sql = "replace into ad_info values(%s,%s,%s,%s,%s,%s,%s,%s) "
  408. db.quchen_text.executeMany(sql, li)
  409. db.close()
  410. def get_ad_cost_day(account_id, access_token, flag, st, et):
  411. if flag == 'MP':
  412. ad_cost_day_mp(account_id, access_token, st, et)
  413. else:
  414. ad_cost_day_gdt(account_id, access_token, st, et)
  415. def ad_cost_day_gdt(account_id, access_token, st, et):
  416. # 接口文档 https://developers.e.qq.com/docs/api/insights/ad_insights/daily_reports_get?version=1.3
  417. url = 'https://api.e.qq.com/v1.3/daily_reports/get'
  418. fields = (
  419. 'date', 'ad_id', 'adgroup_id', 'cost', 'view_count', 'ctr', 'follow_count', 'web_order_count', 'order_amount')
  420. li = []
  421. page = 1
  422. while True:
  423. parameters = {
  424. 'access_token': access_token,
  425. 'timestamp': int(time.time()),
  426. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  427. 'fields': fields,
  428. "account_id": account_id,
  429. "group_by": ['ad_id', 'date'],
  430. "level": 'REPORT_LEVEL_AD',
  431. "page": page,
  432. "page_size": 1000,
  433. "date_range": {
  434. "start_date": st,
  435. "end_date": et
  436. }
  437. }
  438. for k in parameters:
  439. if type(parameters[k]) is not str:
  440. parameters[k] = json.dumps(parameters[k])
  441. while True:
  442. r = requests.get(url, params=parameters)
  443. r = r.json()
  444. # logging.info(r)
  445. code = r['code']
  446. if code == 11017:
  447. time.sleep(61)
  448. else:
  449. break
  450. if r.get("data"):
  451. for i in r['data']['list']:
  452. if i['cost'] > 0:
  453. li.append(
  454. (
  455. i['date'], i['ad_id'], i['adgroup_id'], i['cost'] / 100, i['view_count'],
  456. i['ctr'] * i['view_count'],
  457. i['follow_count'], i['web_order_count'], i['order_amount'] / 100, account_id, 'GDT'
  458. )
  459. )
  460. total_page = r['data']['page_info']['total_page']
  461. if page >= total_page:
  462. break
  463. else:
  464. page += 1
  465. # logging.info(li)
  466. if len(li) > 0:
  467. logging.info(f"{account_id} have ad cost :{len(li)} ")
  468. db.quchen_text.executeMany('replace into ad_cost_day values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)', li)
  469. db.close()
  470. def ad_cost_day_mp(account_id, access_token, st, et):
  471. # 接口文档 https://developers.e.qq.com/docs/api/insights/ad_insights/daily_reports_get?version=1.3
  472. url = 'https://api.e.qq.com/v1.3/daily_reports/get'
  473. fields = ('date', 'ad_id', 'adgroup_id', 'cost', 'view_count', 'valid_click_count', 'official_account_follow_count',
  474. 'order_count', 'order_amount')
  475. li = []
  476. page = 1
  477. while True:
  478. parameters = {
  479. 'access_token': access_token,
  480. 'timestamp': int(time.time()),
  481. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  482. 'fields': fields,
  483. "account_id": account_id,
  484. "level": 'REPORT_LEVEL_AD_WECHAT',
  485. "page": page,
  486. "page_size": 1000,
  487. "date_range": {
  488. "start_date": st,
  489. "end_date": et
  490. }
  491. }
  492. for k in parameters:
  493. if type(parameters[k]) is not str:
  494. parameters[k] = json.dumps(parameters[k])
  495. while True:
  496. r = requests.get(url, params=parameters)
  497. r = r.json()
  498. # logging.info(r['data']['list'])
  499. # import pandas as pd
  500. # logging.info(pd.DataFrame(r['data']['list']))
  501. code = r['code']
  502. if code == 11017:
  503. time.sleep(61)
  504. else:
  505. break
  506. if r.get("data"):
  507. for i in r['data']['list']:
  508. if i['cost'] > 0:
  509. li.append(
  510. (
  511. i['date'], i['ad_id'], i['adgroup_id'], i['cost'] / 100, i['view_count'],
  512. i['valid_click_count'],
  513. i['official_account_follow_count'], i['order_count'], i['order_amount'] / 100, account_id,
  514. 'MP'
  515. )
  516. )
  517. total_page = r['data']['page_info']['total_page']
  518. if page >= total_page:
  519. break
  520. else:
  521. page += 1
  522. # logging.info(li)
  523. # exit()
  524. if len(li) > 0:
  525. # TODO:询问一下adgroup_id,campaign_id作用
  526. li_df = pandas.DataFrame(li)
  527. li_df_g = li_df.groupby([0, 1, 9, 10])
  528. li_new = []
  529. for index, row in li_df_g.agg('sum').iterrows():
  530. new_row = row.tolist()
  531. new_row = list(index[0:2]) + new_row[1:] + list(index[2:])
  532. li_new.append(tuple(new_row))
  533. logging.info(f"{account_id} have ad cost :{len(li_new)} ")
  534. db.quchen_text.executeMany('replace into ad_cost_day values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)', li_new)
  535. db.close()
  536. def daily_reports_get(access_token, account_id, level, start_date, end_date, fields): # 获取wx投放计划日报数据
  537. interface = 'daily_reports/get'
  538. url = 'https://api.e.qq.com/v1.3/' + interface
  539. common_parameters = {
  540. 'access_token': access_token,
  541. 'timestamp': int(time.time()),
  542. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  543. 'fields': fields
  544. }
  545. parameters = {
  546. "account_id": account_id,
  547. "level": level,
  548. "date_range":
  549. {
  550. "start_date": start_date,
  551. "end_date": end_date
  552. },
  553. "page": 1,
  554. "page_size": 1000,
  555. "fields":
  556. [
  557. ]
  558. }
  559. parameters.update(common_parameters)
  560. for k in parameters:
  561. if type(parameters[k]) is not str:
  562. parameters[k] = json.dumps(parameters[k])
  563. while True:
  564. r = requests.get(url, params=parameters)
  565. if r.status_code == 200:
  566. break
  567. else:
  568. time.sleep(1)
  569. logging.info("请求出错 等待1s..")
  570. return r.json()
  571. def daily_qq_reports_get(access_token, account_id, compaign_id, level, start_date, end_date, fields): # 获取gdt投放计划日报数据
  572. interface = 'daily_reports/get'
  573. url = 'https://api.e.qq.com/v1.1/' + interface
  574. common_parameters = {
  575. 'access_token': access_token,
  576. 'timestamp': int(time.time()),
  577. 'nonce': str(time.time()) + str(random.randint(0, 999999)),
  578. 'fields': fields
  579. }
  580. parameters = {
  581. "account_id": account_id,
  582. "filtering":
  583. [
  584. {
  585. "field": "campaign_id",
  586. "operator": "EQUALS",
  587. "values":
  588. [
  589. compaign_id
  590. ]
  591. }
  592. ],
  593. "level": level,
  594. "date_range":
  595. {
  596. "start_date": start_date,
  597. "end_date": end_date
  598. },
  599. "page": 1,
  600. "page_size": 1000,
  601. "fields":
  602. [
  603. ]
  604. }
  605. parameters.update(common_parameters)
  606. for k in parameters:
  607. if type(parameters[k]) is not str:
  608. parameters[k] = json.dumps(parameters[k])
  609. r = requests.get(url, params=parameters)
  610. return r.json()
  611. def mysql_insert_adcreative(data):
  612. db = pymysql.connect('rm-bp1c9cj79872tx3aaro.mysql.rds.aliyuncs.com', 'superc', 'Cc719199895', 'quchen_text')
  613. cursor = db.cursor()
  614. sql = 'replace into adcreative (campaign_id,adcreative_id,adcreative_name,image_id,title,promoted_object_type,page_type,page_id,link_page_id,promoted_object_id) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
  615. try:
  616. cursor.executemany(sql, data)
  617. db.commit()
  618. logging.info('insert [adcreative] ' + str(len(data)))
  619. except:
  620. db.rollback()
  621. logging.info('insert [adcreative] defeat')
  622. if __name__ == '__main__':
  623. account_id = 19206910
  624. access_token = '89079ccc8db047b078a0108e36a7e276'
  625. #
  626. account_id2 = 14709511
  627. access_token2 = 'e87f7b6f860eaeef086ddcc9c3614678'
  628. get_ad_cost_day(account_id, access_token, 'MP', '2021-04-09', '2021-04-09')
  629. # get_adcreatives(account_id,access_token,'MP','3187867673','2021-04-09')