一、windows下迁移access到mysql
Windows下 Access 数据 迁移到 Mysql(5.5)数据库 1. 具体做法 1. 在access的表中选择,文件->导出->保存类型->excel->填写文件名 - Navicat for mysql的表中,导入向导->excel类型-》选中刚才的文件->下一步->。。。->对应目标栏位和源栏位->开始即可 前提是:两个表的字段数据类型一致。主键会比较麻烦,可以先转化后改动主键 2. ODBC方式连接mysql数据库 ,ODBC 做到access与mysql实时同步更新 比较1和2,1是手动导入的方式,自主性强,但是相互之间不会实时更新,2更优!但是需要注意外键的关系 2. Access若存在外键关系 1. 在access中制作表格时如何设置外键 手动建立两表一对多参照完整性(关系可以设定参照完整性,这样你在其中一个表中更改或删除记录时,将同步到另一个表) 注:注意设置外键前一方表的关联字段必须是主键或建有唯一索引 2. 3. 直接mysql上存access导入过来的表,django能执行吗? pymysql orm 反向生成表 - 将accss的表不用刻意去命名 - python manage.py inspectdb > models.py 反向生成models.py - 替换models.py文件 - python manage.py migrate - 正常ORM调用 迁移过程问题分析: - 建表乱码问题 去除带中文字段、手动添加外键等关系 - 先正向生成models文件 - 在将之前已经建立好关系的表直接粘贴到本数据库中 - Python manage.py inspectdb > models.py - 再正向生成migrate时会报重复的错(already exists) - 直接 python manage.py migrate myapp --fake - 在已有的表上再加入外键 - Dbtestcase - App01ProjFunc 4. 外键关系手动添加。。。。。 能使用吗
二、 index页面
index.html设计 1. https://icons8.com/icons/set/find icon图标 2. 71.0.3578.98 3. jquery.flexisel.js 是一款响应式旋转木马插件, 它可以拥有幻灯片播放,图片展示,文章展示等等, 我们今天手动把这个插件集成到wordpress中。本站 提供了DEMO下载。 - http://www.511yj.com/wordpress-flexisel-js.html 4. jqgrid 5. 台架预约项目需要手动创建数据表 server_calenda 才能启用 6. gritter 边框提示插件 6. Ubuntu_16_04 - D:\Documents\Virtual Machines\Ubuntu_16_04 7. 云服务器的使用 https://mp.weixin.qq.com/s/-9pqpz5FKTgPUi_1iRYnMQ table 数据表格文档 - layui.table redis - 若用户多,session服务器 - 对于经常访问的如首页,则用缓存服务器 JWT https://www.jianshu.com/p/576dbf44b2ae 第一部分我们称它为头部(header),第二部分我们称其为载荷 (payload, 类似于飞机上承载的物品),第三部分是签证(signature). gritter 像logo一样的广告显示 to-do: 1.设计后台管理页面 2.做好incredidb反向生成的表,并建立外键关系正常查询 - 考虑建表的优化 3.虚拟机安装redis、mysql,正常使用 4.在线浏览asp页面轮播PPT显示 - jquery给未来元素添加style的属性 https://zhidao.baidu.com/question/431431850462605412.html - 5.HTML里面Textarea换行总结 <script> //换行转回车 var haha=document.getElementById("SendTextArea").value; haha=haha.replace('<br />','/n'); document.getElementById("SendTextArea").value=haha; </script>
三、自动化selenium爬
from selenium import webdriver options = webdriver.ChromeOptions() prefs = {'profile.default_content_settings.popups': 0, # 设置为 0 禁止弹出窗口 'download.default_directory': 'D:\\chrome_downdir'} # 设置下载路径 options.add_experimental_option('prefs', prefs) driver = webdriver.Chrome(chrome_options=options) baidu = driver.get('http://www.baidu.com/') length = len(driver.find_elements_by_tag_name("a")) print(length) import time f = open('news.log', mode='a+') links = driver.find_elements_by_tag_name("a") for i in range(0,length): link = links[i] if not (len(link.get_attribute("href"))<33): f.write(str(i) +':'+link.get_attribute("href")+"\n") print('1111111111111') link.click() time.sleep(3) print('222222222222') driver.back()
四、页面中加入查阅PPT的功能
<div class="pdf"> <iframe id="pdf_page" name="pdf_page" style="width:1400px;height:800px"> </iframe> </div> <script> <%if Request.QueryString("id")= "17113005" then%> $(document).ready(function () { var url = "images/media/11111.pdf";//这里就可以做url动态切换--主要是使用iframe $("#pdf_page").attr("src", url); $(".pdf").media(); }); <%elseif Request.QueryString("id")= "17113004" then%> $(document).ready(function () { var url = "images/media/2222.pdf";//这里就可以做url动态切换--主要是使用iframe $("#pdf_page").attr("src", url); $(".pdf").media(); }); <%end if%> </script>
五、学习爬虫
1.commonutils.py # -*- coding: UTF-8 -*- import urllib2 #读取配置文件 def readConfigFile(filename, dataDict): CANDataList = [] VehicleDataList = [] DataPathList = [] try: configFile = file(filename) while True: line = configFile.readline() if len(line) == 0: break if line.startswith("CANData"): tmpList = line.split("=")[1].split(",") for i in tmpList: CANDataList.append(i.strip()) dataDict["CANData"] = CANDataList if line.startswith("VehicleData"): tmpList = line.split("=")[1].split(",") for i in tmpList: VehicleDataList.append(i.strip()) dataDict["VehicleData"] = VehicleDataList if line.startswith("DataPath"): tmpList = line.split("=")[1].split(",") for i in tmpList: DataPathList.append(i.strip()) dataDict["DataPath"] = DataPathList except: print "Exception: readConfigFile" else: print "Read config file successfully" finally: configFile.close() def proxy_connect(): proxy_handler = urllib2.ProxyHandler({"http": "http://proxy.baidu.com:8080/"}) password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() proxy_auth_handler = urllib2.ProxyBasicAuthHandler(password_mgr) proxy_auth_handler.add_password(None, "http://proxy.baidu.com:8080", "w000baidu", "baidu@123,.,") opener = urllib2.build_opener(proxy_handler, proxy_auth_handler) urllib2.install_opener(opener) def createHtmlFile(content): try: hFile = file("htmlFile.txt", "w") hFile.write(content) except: print "Exception: htmlFile" else: print "htmlFile.txt is ok" finally: hFile.close() def getZipList(filename): zipList = [] try: hFile = file(filename) while True: line = hFile.readline() if len(line) == 0: break if ".zip" in line: zipName = line.split('">')[0].split("/")[-1] zipList.append(zipName) except: print "Exception: getZipList" zipList = [] else: print "zipList is ok" finally: hFile.close() return zipList 2. conf # -*- coding: UTF-8 -*- #填写服务器进行数据分类的CAN,以逗号分隔 #如: CANData = BMS_CHARGE, BmuInner, PowertraintSubnet, SmartSubnet CANData = BmsCharge, ChargerCAN, BmuInner, BmuInnerCAN, PowertraintSubnet, SmartSubnet, PowertrainSubnet, EnergySubnet, OriginalSubnet #填写要获取数据的车辆,以逗号分隔,如下 #如: VehicleData = 1st_car, 2nd_car, 3rd_car VehicleData = 1st_car, 2nd_car, secondmodelx, modelx #填写服务器存储数据路径 #如: DataPath = /opt/BMS_Vehicle_Data/ DataPath = /home/bms/BMS_Vehicle_Data 3.htmlFile.txt <HTML><HEAD><LINK HREF="jetty-dir.css" REL="stylesheet" TYPE="text/css"/><TITLE>Directory: /MON/secondmodelx/</TITLE></HEAD><BODY> <H1>Directory: /MON/secondmodelx/</H1> <TABLE BORDER=0> <TR><TD><A HREF="/MON/secondmodelx/../">Parent Directory</A></TD><TD></TD><TD></TD></TR> </TABLE> </BODY></HTML> 4.index.html <!DOCTYPE html> <!--STATUS OK--> <html> <head> <meta http-equiv=content-type content=text/html;charset=utf-8> <meta http-equiv=X-UA-Compatible content=IE=Edge> <meta content=always name=referrer> <link rel=stylesheet type=text/css href=http://s1.bdstatic.com/r/www/cache/bdorz/baidu.min.css> <title>百度一下,你就知道</title></head> <body link=#0000cc> <div id=wrapper> <div id=head> <div class=head_wrapper> <div class=s_form> <div class=s_form_wrapper> <div id=lg><img hidefocus=true src=//www.baidu.com/img/bd_logo1.png width=270 height=129></div> <form id=form name=f action=//www.baidu.com/s class=fm><input type=hidden name=bdorz_come value=1> <input type=hidden name=ie value=utf-8> <input type=hidden name=f value=8> <input type=hidden name=rsv_bp value=1> <input type=hidden name=rsv_idx value=1> <input type=hidden name=tn value=baidu><span class="bg s_ipt_wr"><input id=kw name=wd class=s_ipt value maxlength=255 autocomplete=off autofocus></span><span class="bg s_btn_wr"><input type=submit id=su value=百度一下 class="bg s_btn"></span></form> </div> </div> <div id=u1><a href=http://news.baidu.com name=tj_trnews class=mnav>新闻</a> <a href=http://www.hao123.com name=tj_trhao123 class=mnav>hao123</a> <a href=http://map.baidu.com name=tj_trmap class=mnav>地图</a> <a href=http://v.baidu.com name=tj_trvideo class=mnav>视频</a> <a href=http://tieba.baidu.com name=tj_trtieba class=mnav>贴吧</a> <noscript><a href=http://www.baidu.com/bdorz/login.gif?login&tpl=mn&u=http%3A%2F%2Fwww.baidu.com%2f%3fbdorz_come%3d1 name=tj_login class=lb>登录</a></noscript> <script>document.write('<a href="http://www.baidu.com/bdorz/login.gif?login&tpl=mn&u=' + encodeURIComponent(window.location.href + (window.location.search === "" ? "?" : "&") + "bdorz_come=1") + '" name="tj_login" class="lb">登录</a>');</script> <a href=//www.baidu.com/more/ name=tj_briicon class=bri style="display: block;">更多产品</a></div> </div> </div> <div id=ftCon> <div id=ftConw><p id=lh><a href=http://home.baidu.com>关于百度</a> <a href=http://ir.baidu.com>About Baidu</a></p> <p id=cp>©2017 Baidu <a href=http://www.baidu.com/duty/>使用百度前必读</a> <a href=http://jianyi.baidu.com/ class=cp-feedback>意见反馈</a> 京ICP证030173号 <img src=//www.baidu.com/img/gs.gif></p></div> </div> </div> </body> </html> 5.synchronizeData.py # -*- coding: UTF-8 -*- #!/usr/bin/python # Filename : obtainData.py import urllib2, sys, os, time import commonUtils def main(url, pathList, oldZipList, bakPath): try: #建立proxy连接 commonUtils.proxy_connect() # 把url页面源代码写入txt文本,并提取zip包名 f = urllib2.urlopen(url) content = f.read() commonUtils.createHtmlFile(content) zipNameList = commonUtils.getZipList("htmlFile.txt") zipList = [] #排除已经获取的zip包名 for i in zipNameList: logName = i[:-4] + ".log" if logName not in oldZipList: zipList.append(i) if len(zipList) == 0: print "There is no vehicle data to be synchronied." return #下载zip包并解压 for zipName in zipList: try: zipUrl = url + "/" + zipName path = None g = urllib2.urlopen(zipUrl) data = g.read() for i in pathList: if zipName.split("_")[0] in i: path = i break serverPath = path + zipName with open(serverPath, "wb") as code: code.write(data) t = 0 while True: if os.system("unzip -tq %s" % serverPath) == 0: if os.system("unzip -q %s -d %s" % (serverPath, path)) == 0: if os.system("rm %s" % serverPath) == 0: print "Succeeded in unzip and removing the zip." else: print "Failed to upzip %s" % serverPath break else: time.sleep(1) t += 1 print "time.sleep %ds" % t if t > 10: os.system("mv %s %s" % (serverPath, bakPath)) break except: if os.path.isfile(serverPath): print "An exception occurs when getting %s, so delete the zip." % serverPath os.system("rm -r %s" % serverPath) except: print "There is an exception when synchronizing data." if __name__ == "__main__": dataDict = {} sourceUrl = "http://1.2.3.4:8084/" while True: time.sleep(5) # 读取配置文件 commonUtils.readConfigFile("conf", dataDict) print dataDict CANDataList = dataDict["CANData"] VehicleDataList = dataDict["VehicleData"] DataPath = dataDict["DataPath"][0] #根据配置文件内容获取数据 for vehicle in VehicleDataList: bakPath = DataPath + os.sep + vehicle + os.sep + "bak" + os.sep if not os.path.isdir(bakPath): os.makedirs(bakPath) pathList = [] oldZipList = [] targetUrl = sourceUrl + "MON/" + vehicle for CAN in CANDataList: path = DataPath + os.sep + vehicle + os.sep + CAN + os.sep if not os.path.isdir(path): os.makedirs(path) pathList.append(path) # 查找已经获取过的zip包名 oldZipList.extend(os.listdir(path)) #处理已经获取过的zip包名 #oldZipFile = vehicle + "_" + CAN #oldZipList = [] #if os.path.exists(oldZipFile): # f = file(oldZipFile) # oldZipList = cPickle.load(f) # f.close() main(targetUrl, pathList, oldZipList, bakPath) #f = file(oldZipFile, "w") #cPickle.dump(templist, f) #f.close() 6.temp.log # -*- coding: UTF-8 -*- #!/usr/bin/python # Filename : obtainData.py import urllib2, sys, os, time import commonUtils def main(url, pathList, oldZipList, bakPath): try: #建立proxy连接 commonUtils.proxy_connect() # 把url页面源代码写入txt文本,并提取zip包名 f = urllib2.urlopen(url) content = f.read() commonUtils.createHtmlFile(content) zipNameList = commonUtils.getZipList("htmlFile.txt") zipList = [] #排除已经获取的zip包名 for i in zipNameList: logName = i[:-4] + ".log" if logName not in oldZipList: zipList.append(i) if len(zipList) == 0: print "There is no vehicle data to be synchronied." return #下载zip包并解压 for zipName in zipList: try: zipUrl = url + "/" + zipName path = None g = urllib2.urlopen(zipUrl) data = g.read() for i in pathList: if zipName.split("_")[0] in i: path = i break serverPath = path + zipName with open(serverPath, "wb") as code: code.write(data) t = 0 while True: if os.system("unzip -tq %s" % serverPath) == 0: if os.system("unzip -q %s -d %s" % (serverPath, path)) == 0: if os.system("rm %s" % serverPath) == 0: print "Succeeded in unzip and removing the zip." else: print "Failed to upzip %s" % serverPath break else: time.sleep(1) t += 1 print "time.sleep %ds" % t if t > 10: os.system("mv %s %s" % (serverPath, bakPath)) break except: if os.path.isfile(serverPath): print "An exception occurs when getting %s, so delete the zip." % serverPath os.system("rm -r %s" % serverPath) except: print "There is an exception when synchronizing data." if __name__ == "__main__": dataDict = {} sourceUrl = "http://1.2.3.4:8084/" while True: time.sleep(5) # 读取配置文件 commonUtils.readConfigFile("conf", dataDict) print dataDict CANDataList = dataDict["CANData"] VehicleDataList = dataDict["VehicleData"] DataPath = dataDict["DataPath"][0] #根据配置文件内容获取数据 for vehicle in VehicleDataList: bakPath = DataPath + os.sep + vehicle + os.sep + "bak" + os.sep if not os.path.isdir(bakPath): os.makedirs(bakPath) pathList = [] oldZipList = [] targetUrl = sourceUrl + "MON/" + vehicle for CAN in CANDataList: path = DataPath + os.sep + vehicle + os.sep + CAN + os.sep if not os.path.isdir(path): os.makedirs(path) pathList.append(path) # 查找已经获取过的zip包名 oldZipList.extend(os.listdir(path)) #处理已经获取过的zip包名 #oldZipFile = vehicle + "_" + CAN #oldZipList = [] #if os.path.exists(oldZipFile): # f = file(oldZipFile) # oldZipList = cPickle.load(f) # f.close() main(targetUrl, pathList, oldZipList, bakPath) #f = file(oldZipFile, "w") #cPickle.dump(templist, f) #f.close()