bitscn.com
mysql4.0做主从时主库的备份脚本 mysql4.0是老版本了,但是有些早期使用的企业依然在用,在创建主从时特别是线上服务器创建主从时,保证数据的一致性是个大问题:比如创建完从库同步时出现重复数据重复执行(虽然数据条数一致,但数据有可能会不一致)等。 在mysql5.0以上版本中,此时备份主库只用在mysqldump时加上-f、master-data=2,single-transaction参数,从库同步时导入备份,在取备份文件开头的bin-log和pos位置进行同步即可,不会出现数据重复执行等问题,他能确保同步时的一致性。比较悲剧的是,本人所用的数据库还没有升级,是4.0的版本,经过测试,写了一个专一用于4.0主从同步时主库备份的脚本,原理就是模拟5.0以上的备份过程来做的。也可以用于5.0以上的版本,但是5.0以上的版本没有必要这么做。大家可以参考。 #!/usr/bin/env python # -*- coding: utf-8 -*- import os,sys,time,mysqldb import subprocess,threading class mysql_dump(): def __init__(self): self.dumpname = /root/alldata%s.bz2 % time.strftime(%y%m%d%h%m) self.stat_ip = 192.168.0.39 self.logfile = /root/mysql_dump.log self.user = root self.passwd = 1q2w3e4r def log_w(self,text): now = time.strftime(%y-%m-%d %h:%m:%s) tt = str(now) + /t + text + /n f = open(self.logfile,'a+') f.write(tt) f.close() def dump(self): cmd = /usr/local/mysql/bin/mysqldump -a -q -e --add-drop-table --add-locks --extended-insert --quick --no-autocommit --single-transaction -u%s -p%s | bzip2 -2 > %s % (self.user,self.passwd,self.dumpname) print time.strftime(%y-%m-%d %h:%m:%s) text = start mysqldump,please wait ... print text self.log_w(text) a = subprocess.popen(cmd,shell=true) while 1: b = subprocess.popen.poll(a) if b == 0: text = mysqldump complete print text self.log_w(text) break elif b is none: print 'mysqldump running' time.sleep(30) else: print a.pid,'term' break self.rsync() def rsync(self): cmd = rsync -az %s %s::asktao_db/db_back/ % (self.dumpname,self.stat_ip) text = start rsync to server(%s) ,please wait ... % self.stat_ip print text self.log_w(text) a = subprocess.popen(cmd,shell=true) while 1: b = subprocess.popen.poll(a) if b == 0: text = rsync complete print text self.log_w(text) break elif b is none: print 'rsync running' time.sleep(30) else: print a.pid,'term' break def lock(self): try: conn = mysqldb.connect(host = '127.0.0.1',user = 'root',passwd = '1q2w3e4r', charset='utf8', connect_timeout=5) cursor = conn.cursor() text = flush tables with read lock print text self.log_w(text) cursor.execute(flush tables with read lock) text = flush logs print text self.log_w(text) cursor.execute(flush logs) d = threading.thread(target=self.dump, args=()) d.start() while 1: if os.path.isfile(self.dumpname) and os.path.getsize(self.dumpname) > 0: text = unlock tables print text self.log_w(text) cursor.execute(unlock tables) break except mysqldb.error,e: text = e.args print text self.log_w(text) def work(self): t = threading.thread(target=self.lock, args=()) t.start() if __name__ == __main__: boss = mysql_dump() boss.work() bitscn.com