DataBaseOperation.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604
  1. """
  2. @desc 数据库操作方法封装
  3. @auth chenkai
  4. @date 2020/11/19
  5. @py_version py3.6
  6. """
  7. import pymysql
  8. import logging as log
  9. import pandas as pd
  10. import time
  11. from model.log import logger
  12. log = logger()
  13. pd.set_option('display.max_columns', None)
  14. pd.set_option('display.width', 1000)
  15. MYSQL_DEBUG = 1
  16. class MysqlOperation:
  17. def __init__(self, host, user, passwd, db, port=3306):
  18. try:
  19. self.conn = pymysql.connect(host=host,
  20. user=user,
  21. passwd=passwd,
  22. db=db,
  23. charset='utf8mb4',
  24. port=port)
  25. self.cursor = self.conn.cursor()
  26. except Exception as e:
  27. log.info(e)
  28. def set_dict_cursor(self):
  29. """
  30. 设置字典形式取数据
  31. """
  32. self.cursor = self.conn.cursor(pymysql.cursors.DictCursor)
  33. def getData(self, sql, args=None):
  34. start = time.time()
  35. # if args:
  36. # log.debug(sql % tuple(args))
  37. # else:
  38. # log.debug(sql)
  39. self.cursor.execute(sql, args=args)
  40. result = self.cursor.fetchall()
  41. if MYSQL_DEBUG:
  42. sql_str = sql % tuple(args) if args else sql
  43. log.info('sql: \n' + sql_str)
  44. log.info('sql cost: %s' % (time.time() - start))
  45. return result
  46. def execute(self, sql):
  47. start = time.time()
  48. self.cursor.execute(sql)
  49. self.conn.commit()
  50. if MYSQL_DEBUG:
  51. log.info('sql: \n' + sql)
  52. log.info('sql cost: %s' % (time.time() - start))
  53. def getOne(self,sql, args=None):
  54. result = self.getData(sql, args)
  55. return result[0][0]
  56. def getData_pd(self, sql, args=None):
  57. start = time.time()
  58. # if args:
  59. # log.debug(sql % tuple(args))
  60. # else:
  61. # log.debug(sql)
  62. self.cursor.execute(sql, args=args)
  63. num_fields = len(self.cursor.description)
  64. field_names = [i[0] for i in self.cursor.description]
  65. df = self.cursor.fetchall()
  66. df = pd.DataFrame(data=list(df), columns=field_names)
  67. if MYSQL_DEBUG:
  68. sql_str = sql % tuple(args) if args else sql
  69. log.info('sql: \n' + sql_str)
  70. log.info('sql cost: %s' % (time.time() - start))
  71. return df
  72. def insertData(self, sql, args=None):
  73. # if args:
  74. # log.debug(sql % tuple(args))
  75. # else:
  76. # log.debug(sql)
  77. start = time.time()
  78. self.cursor.execute(sql, args=args)
  79. if MYSQL_DEBUG:
  80. sql_str = sql % tuple(args) if args else sql
  81. log.info('sql: \n' + sql_str)
  82. log.info('sql cost: %s' % (time.time() - start))
  83. self.conn.commit()
  84. def executeWithoutCommit(self, sql, args=None):
  85. return self.cursor.execute(sql, args=args)
  86. def commit(self):
  87. self.conn.commit()
  88. def insertorupdate(self, table, keys, tags, tagvalue, flag, *args):
  89. """
  90. :param table: 表名
  91. :param keys: 联合主键名元组
  92. :param tags: 字段名元组
  93. :param tagvalue: 字段值
  94. :param args: 主键值
  95. :param flag: 控制是否打印日志
  96. :return:
  97. """
  98. # log.info(tags)
  99. sql = "INSERT INTO " + table + " ("
  100. sql += ",".join(keys) + ","
  101. sql += ",".join(tags)
  102. sql += ") SELECT "
  103. sql += "%s," * len(keys)
  104. sql += ("%s," * len(tags))[:-1]
  105. sql += " FROM DUAL WHERE NOT EXISTS (SELECT id FROM " + table
  106. sql += " WHERE "
  107. for _ in keys:
  108. sql += _ + "=%s AND "
  109. sql = sql[:-4]
  110. sql += "LIMIT 1)"
  111. arg = list(args)
  112. arg.extend(tagvalue)
  113. arg.extend(list(args))
  114. rows = self.cursor.execute(sql, args=arg)
  115. if rows == 0:
  116. sql = "UPDATE " + table + " SET "
  117. for _ in tags:
  118. sql += _ + "=%s,"
  119. sql = sql[:-1]
  120. sql += " WHERE "
  121. for _ in keys:
  122. sql += _ + "=%s AND "
  123. sql = sql[:-4]
  124. arg = []
  125. arg.extend(tagvalue)
  126. arg.extend(list(args))
  127. self.cursor.execute(sql, args=arg)
  128. if flag:
  129. log.info(sql % tuple(arg))
  130. self.conn.commit()
  131. def _insertorupdate(self, table, keys, tags, tag_value, flag, key_value, update=False):
  132. if not update:
  133. sql = "INSERT INTO " + table + " ("
  134. sql += ",".join(keys) + ","
  135. sql += ",".join(tags)
  136. sql += ") SELECT "
  137. sql += "%s," * len(keys)
  138. sql += ("%s," * len(tags))[:-1]
  139. sql += " FROM DUAL WHERE NOT EXISTS (SELECT id FROM " + table
  140. sql += " WHERE "
  141. for _ in keys:
  142. sql += _ + "=%s AND "
  143. sql = sql[:-4]
  144. sql += "LIMIT 1)"
  145. arg = list(key_value)
  146. arg.extend(tag_value)
  147. arg.extend(list(key_value))
  148. rows = self.cursor.execute(sql, args=arg)
  149. if rows == 0:
  150. sql = "UPDATE " + table + " SET "
  151. for _ in tags:
  152. sql += _ + "=%s,"
  153. sql = sql[:-1]
  154. sql += " WHERE "
  155. for _ in keys:
  156. sql += _ + "=%s AND "
  157. sql = sql[:-4]
  158. arg = []
  159. arg.extend(tag_value)
  160. arg.extend(list(key_value))
  161. self.cursor.execute(sql, args=arg)
  162. if flag:
  163. log.info(sql % tuple(arg))
  164. else:
  165. sql = "UPDATE " + table + " SET "
  166. for _ in tags:
  167. sql += _ + "=%s,"
  168. sql = sql[:-1]
  169. sql += " WHERE "
  170. for _ in keys:
  171. sql += _ + "=%s AND "
  172. sql = sql[:-4]
  173. arg = []
  174. arg.extend(tag_value)
  175. arg.extend(list(key_value))
  176. self.cursor.execute(sql, args=arg)
  177. if flag:
  178. log.info(sql % tuple(arg))
  179. def _insert_on_duplicate(self, table, keys, tags, tag_value, flag, key_value):
  180. name_all = list(keys)
  181. name_all.extend(tags)
  182. arg = list(key_value)
  183. arg.extend(tag_value)
  184. arg.extend(tag_value)
  185. sql_name = '(' + ','.join(name_all) + ')'
  186. sql_value = '(' + ','.join(['%s'] * len(name_all)) + ')'
  187. sql_update = ','.join([_ + '=%s' for _ in tags])
  188. sql = """
  189. insert into %s
  190. %s
  191. VALUES %s
  192. ON duplicate key UPDATE %s
  193. """ % (table, sql_name, sql_value, sql_update)
  194. self.cursor.execute(sql, args=arg)
  195. if flag:
  196. log.debug(sql % tuple(arg))
  197. def insertorupdatemany(self, table, keys, tags, tag_values, key_values, flag=False, unique_key=False, update=False):
  198. """
  199. :param table: 表名
  200. :param keys: 联合主键名元组
  201. :param tags: 字段名元组
  202. :param tag_values: 字段值组(list or pd.DataFrame)
  203. :param key_values: 主键值组(list or pd.DataFrame)
  204. :param flag: 控制是否打印日志
  205. :param unique_key: keys 是否为table的 unique_key
  206. :return:
  207. ps: 效率(外网): rows / 50; 1000以上更新使用
  208. """
  209. if isinstance(tag_values, pd.DataFrame):
  210. list_tag_value = [list(tag_values.iloc[_, :]) for _ in range(len(tag_values))]
  211. else:
  212. list_tag_value = list(tag_values)
  213. if isinstance(key_values, pd.DataFrame):
  214. list_key_value = [list(key_values.iloc[_, :]) for _ in range(len(key_values))]
  215. else:
  216. list_key_value = list(key_values)
  217. for _ in range(len(list_tag_value)):
  218. tag_value = list_tag_value[_]
  219. key_value = list_key_value[_]
  220. if unique_key:
  221. self._insert_on_duplicate(table, keys, tags, tag_value, flag, key_value)
  222. else:
  223. self._insertorupdate(table, keys, tags, tag_value, flag, key_value, update)
  224. self.conn.commit()
  225. def _check_repeat_key(self, key_list):
  226. tmp = list(map(lambda x: tuple(x), key_list))
  227. if len(tmp) == len(set(tmp)):
  228. return False
  229. else:
  230. last_data = -1
  231. repeat_key = set()
  232. for i in sorted(tmp):
  233. if last_data == i:
  234. repeat_key.add(i)
  235. if len(repeat_key) >= 10:
  236. break
  237. last_data = i
  238. log.error('Reject repeated keys')
  239. log.error('repeat_key: %s' % repeat_key)
  240. return True
  241. def _convert_to_list(self, data):
  242. if isinstance(data, pd.DataFrame):
  243. # np.nan != np.nan 从而判断值为np.nan
  244. list_data = [map(lambda x: None if x != x else x, list(data.iloc[_, :])) for _ in range(len(data))]
  245. li =[]
  246. for i in list_data:
  247. li.append(list(i))
  248. list_data = li
  249. else:
  250. list_data = list(data)
  251. return list_data
  252. def _get_exist_keys_index(self, table, keys, key_values, flag=False):
  253. list_sql_when = []
  254. list_tmp = []
  255. for i in range(len(key_values)):
  256. sql_when = """when (%s)=(%s) then %s""" % (','.join(keys), ','.join(['%s'] * len(key_values[i])), i)
  257. list_sql_when.append(sql_when)
  258. list_tmp.extend(key_values[i])
  259. list_sql_condition = []
  260. for i in range(len(key_values)):
  261. # sql_condition_old = """(%s)=(%s)""" % (','.join(keys), ','.join(['%s'] * len(key_values[i])))
  262. row_condition_list = map(lambda x: '%s = %%s' % x, keys)
  263. sql_condition = """(%s)""" % ' and '.join(row_condition_list)
  264. # print sql_condition_old, sql_condition
  265. list_sql_condition.append(sql_condition)
  266. list_tmp.extend(key_values[i])
  267. sql_where = ' or '.join(list_sql_condition)
  268. sql_case = '\n'.join(list_sql_when)
  269. sql = """
  270. select
  271. case
  272. %s
  273. end
  274. from %s
  275. where %s
  276. """ % (sql_case, table, sql_where)
  277. if flag:
  278. log.info(sql % tuple(list_tmp))
  279. self.cursor.execute(sql, tuple(list_tmp))
  280. print()
  281. result = self.cursor.fetchall()
  282. return map(lambda x: x[0], result)
  283. def insertorupdatemany_v2(self, table, keys, tags, tag_values, key_values, flag=False, split=80):
  284. """
  285. 更新插入多条数据(无key时自动插入, 有keys时更新)
  286. :param table: 表名
  287. :param keys: 联合主键名元组
  288. :param tags: 字段名元组
  289. :param tag_values: 字段值组(list or pd.DataFrame)
  290. :param key_values: 主键值组(list or pd.DataFrame)
  291. :param flag: 控制是否打印日志
  292. :param split: 切割阈值
  293. :return:
  294. ps: 效率(外网): rows^2 / 50000; rows以split为单位分批更新
  295. """
  296. if not isinstance(tag_values, (tuple, list, pd.DataFrame)):
  297. log.error('Type Error')
  298. exit(-1)
  299. return
  300. if len(tag_values) > split:
  301. length = len(tag_values)
  302. for i in range(0, length, split):
  303. start, finish = i, i + split
  304. self.insertorupdatemany_v2(table, keys, tags, tag_values[start:finish], key_values[start:finish], flag, split=split)
  305. return
  306. if len(key_values) == 0 or len(tag_values) == 0:
  307. log.debug('insert or update 0 rows')
  308. return
  309. tag_values = self._convert_to_list(tag_values)
  310. key_values = self._convert_to_list(key_values)
  311. assert self._check_repeat_key(key_values) == False
  312. exist_key_index = list(self._get_exist_keys_index(table, keys, key_values, flag))
  313. new_key_index = list(set(range(len(key_values))) - set(exist_key_index))
  314. update_keys = list(map(lambda x: key_values[x], exist_key_index))
  315. update_tags = list(map(lambda x: tag_values[x], exist_key_index))
  316. insert_keys = list(map(lambda x: key_values[x], new_key_index))
  317. insert_tags = list(map(lambda x: tag_values[x], new_key_index))
  318. self.insert_many(table=table,
  319. keys=keys,
  320. tags=tags,
  321. tag_values=insert_tags,
  322. key_values=insert_keys,
  323. flag=flag)
  324. self.update_many(table=table,
  325. keys=keys,
  326. tags=tags,
  327. tag_values=update_tags,
  328. key_values=update_keys,
  329. flag=flag,
  330. split=split)
  331. def insertorupdatemany_v3(self, df, table, keys, tags, flag=False, split=80):
  332. self.insertorupdatemany_v2(
  333. table=table,
  334. keys=keys,
  335. tags=tags,
  336. tag_values=df[tags],
  337. key_values=df[keys],
  338. flag=flag,
  339. split=split
  340. )
  341. def _get_s_format(self, data):
  342. """
  343. Args:
  344. data: [[featureA1, featureB1, ...], [featureA2, featureB2, ...], ...]
  345. Returns:
  346. format of %s and real value
  347. Example:
  348. [['2017-07-01', 78], ['2017-07-01', 1]] ->
  349. ('((%s, %s), (%s, %s))', ['2017-07-01', 78, '2017-07-01', 1])
  350. """
  351. list_tmp_s = []
  352. values = []
  353. for _ in data:
  354. tmp_s = ','.join(len(_) * ['%s'])
  355. values.extend(_)
  356. if len(_) > 1:
  357. tmp_s = '(' + tmp_s + ')'
  358. list_tmp_s.append(tmp_s)
  359. format_s = '(' + ','.join(list_tmp_s) + ')'
  360. return format_s, values
  361. def delete_by_key(self, table, keys, key_values, flag=False):
  362. """
  363. Args:
  364. table: 表名
  365. keys: 联合主键名元组
  366. key_values: 主键值组(list or pd.DataFrame)
  367. flag: 控制是否打印日志
  368. Examples:
  369. delete_by_key('table_test', keys=['date'], key_values=[['2017-07-01'], ['2017-07-02']], flag=False)
  370. delete_by_key('table_test', keys=['date'], key_values=['2017-07-01'], flag=False)
  371. """
  372. if len(key_values) == 0:
  373. return
  374. if not (isinstance(key_values[0], (list, tuple)) or isinstance(key_values, pd.DataFrame)):
  375. key_values_list = [key_values]
  376. else:
  377. key_values_list = self._convert_to_list(key_values)
  378. sql_keys = '(' + ','.join(keys) + ')'
  379. contact_s, values_s = self._get_s_format(key_values_list)
  380. sql_del = """
  381. delete from %s
  382. where %s in %s
  383. """ % (table, sql_keys, contact_s)
  384. if flag:
  385. log.debug(sql_del % tuple(values_s))
  386. self.cursor.execute(sql_del, tuple(values_s))
  387. self.conn.commit()
  388. def insert_many(self, table, keys, tags, tag_values, key_values, flag=False, split=80):
  389. """
  390. 直接插入多条数据
  391. :param table: 表名
  392. :param keys: 联合主键名元组
  393. :param tags: 字段名元组
  394. :param tag_values: 字段值组(list or pd.DataFrame)
  395. :param key_values: 主键值组(list or pd.DataFrame)
  396. :param flag: 控制是否打印日志
  397. :return:
  398. Examples: 参照 insertorupdatemany_v2
  399. insert into table
  400. (count_date, cid, tag1, tag2)
  401. values ('2017-01-01', 10, 1, 'a'), ('2017-01-02', 20, 2, 'b'), ...
  402. """
  403. if len(key_values) == 0 or len(tag_values) == 0:
  404. log.debug('insert 0 rows')
  405. return
  406. if len(tag_values) > split:
  407. length = len(tag_values)
  408. for i in range(0, length, split):
  409. start, finish = i, i + split
  410. self.insert_many(table, keys, tags, tag_values[start:finish], key_values[start:finish], flag, split=split)
  411. return
  412. tag_values = self._convert_to_list(tag_values)
  413. key_values = self._convert_to_list(key_values)
  414. feature_total = "(" + ",".join(keys + tags) + ")"
  415. tmp_s = "(" + ",".join(["%s"] * len(keys + tags)) + ")"
  416. tmp_s_concat = ",\n".join([tmp_s] * len(key_values))
  417. sql_insert = """
  418. Insert into %s
  419. %s
  420. values %s""" % (table, feature_total, tmp_s_concat)
  421. value_insert = []
  422. for _ in zip(key_values, tag_values):
  423. value_insert.extend(_[0] + _[1])
  424. if flag:
  425. log.debug(sql_insert % tuple(value_insert))
  426. t0 = time.time()
  427. self.cursor.execute(sql_insert,tuple(value_insert))
  428. log.debug('insert %s rows, cost: %s' % (len(key_values), time.time() - t0))
  429. self.conn.commit()
  430. def update_many(self, table, keys, tags, tag_values, key_values, flag=False, split=80):
  431. """
  432. 更新多条数据(无key时不会自动插入)
  433. :param table: 表名
  434. :param keys: 联合主键名元组
  435. :param tags: 字段名元组
  436. :param tag_values: 字段值组(list or pd.DataFrame)
  437. :param key_values: 主键值组(list or pd.DataFrame)
  438. :param flag: 控制是否打印日志
  439. :param split: 分批更新量
  440. :return:
  441. Examples: 参照 insertorupdatemany_v2
  442. # 单条 update sql tag1=1, tag2='a' 插入到 (count_date, cid) =('2017-01-01', 10)
  443. update table
  444. set tag1=1, tag2='a'
  445. where (count_date, cid) =('2017-01-01', 10)
  446. # 多条组合 update sql
  447. # tag1=1, tag2='a' 插入到 (count_date, cid) =('2017-01-01', 10);
  448. # tag1=1, tag2='a' 插入到 (count_date, cid) =('2017-01-01', 10);
  449. update table
  450. set tag1 = case
  451. when (count_date, cid)=('2017-01-01', 10) then 1
  452. when (count_date, cid)=('2017-01-02', 20) then 2
  453. ...
  454. ,
  455. tag_2 = case
  456. when (count_date, cid)=('2017-01-01', 10) then 'a'
  457. when (count_date, cid)=('2017-01-02', 20) then 'b'
  458. ...
  459. where (count_date, cid)=('2017-01-01', 10) or (count_date, cid)=('2017-01-02', 20) or ...
  460. """
  461. if len(tag_values) > split:
  462. length = len(tag_values)
  463. for i in range(0, length, split):
  464. start, finish = i, i + split
  465. self.update_many(table, keys, tags, tag_values[start:finish], key_values[start:finish], flag, split=split)
  466. return
  467. if len(key_values) == 0 or len(tag_values) == 0:
  468. log.debug('update 0 rows')
  469. return
  470. tag_values = self._convert_to_list(tag_values)
  471. key_values = self._convert_to_list(key_values)
  472. if self._check_repeat_key(key_values):
  473. return
  474. update_value = []
  475. sql_keys = ','.join(keys)
  476. if len(keys) > 1:
  477. sql_keys = '(' + sql_keys + ')'
  478. sql_key_values = ','.join(['%s'] * len(keys))
  479. if len(keys) > 1:
  480. sql_key_values = '(' + sql_key_values + ')'
  481. sql_set_list = []
  482. for i in range(len(tags)):
  483. sql_when_list = []
  484. for j in range(len(tag_values)):
  485. sql_when = """when %s=%s then %s """ % (sql_keys, sql_key_values, '%s')
  486. update_value.extend(key_values[j])
  487. update_value.append(tag_values[j][i])
  488. sql_when_list.append(sql_when)
  489. sql_when_concat = '\n\t'.join(sql_when_list)
  490. sql_set = """%s = case \n\t %s\n end""" % (tags[i], sql_when_concat)
  491. sql_set_list.append(sql_set)
  492. for _ in key_values:
  493. update_value.extend(_)
  494. sql_set_concat = ',\n'.join(sql_set_list)
  495. list_sql_condition = []
  496. for i in range(len(key_values)):
  497. row_condition_list = map(lambda x: '%s = %%s' % x, keys)
  498. sql_condition = """(%s)""" % ' and '.join(row_condition_list)
  499. list_sql_condition.append(sql_condition)
  500. sql_where = ' or '.join(list_sql_condition)
  501. # condition = ' or\n\t'.join([sql_keys + '=' + sql_key_values] * len(tag_values))
  502. # print condition
  503. sql = """update %s\n set %s\n where %s""" % (table, sql_set_concat, sql_where)
  504. if flag:
  505. log.info(sql % tuple(update_value))
  506. t0 = time.time()
  507. self.cursor.execute(sql, tuple(update_value))
  508. self.conn.commit()
  509. log.debug('update %s rows, cost: %s' % (len(key_values), time.time() - t0))
  510. def getColumn(self,table,flag=0):
  511. "获取表的所有列"
  512. sql="SELECT `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`COLUMNS` " \
  513. "WHERE `TABLE_NAME`='{}' ORDER BY ordinal_position".format(table)
  514. self.cursor.execute(sql)
  515. a= self.cursor.fetchall()
  516. str=''
  517. li=[]
  518. for i in a:
  519. str+=i[0]+','
  520. li.append(i[0])
  521. if flag:
  522. return li
  523. else:
  524. return str[:-1]