通过wireshark获取应用接口并使用爬虫爬取网站数据(三)
我的git地址唯空自取 源码请上git上下载,包含所需jar包
接上文
浏览一部分图片之后发现了个问题,图片还是太小普遍不超过300k,而且很多图片上面都有另外一个网站的水印
果断点进去看看,果然不一样。图片全是高清的
然后知道了原来那个应用里面的图片全是从这个网站里面爬的,而且还都是压缩过的文件,太无耻了。。。
找到高清图该下手了
于是仿照之前那个模式开始了
然后命途多舛,它的app竟然是网页改版的,而且接口不跟之前那个一样简单。于是决定更新一下自己的爬虫,从单个网页进行爬取,爬全站大图
下面是代码:
第一个是队列类,存放链接,之所以放访问过的是为了防止重复访问,设置成static是为了不同类之间共享
package com.feng.main; import java.util.LinkedList; import java.util.Queue; public class Queues { /** * 图片 */ public static Queue<String> imgUrlQueue = new LinkedList<String>(); /** * 网页 */ public static Queue<String> htmlUrlQueue = new LinkedList<String>(); /** * 下载后的图片 */ public static Queue<String> visitedImgUrlQueue = new LinkedList<String>(); /** * 下载后的网页 */ public static Queue<String> visitedHtmlUrlQueue = new LinkedList<String>(); }
然后这个类是为了获取网页实体,并将实体转换为String类型方便处理
package com.feng.main; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.client.HttpClient; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.HttpClients; public class HtmlContent { /** * 获取HttpEntity * @return HttpEntity网页实体 */ private HttpEntity getHttpEntity(String url){ HttpResponse response = null;//创建请求响应 //创建httpclient对象 HttpClient httpClient = HttpClients.createDefault(); HttpGet get = new HttpGet(url); RequestConfig requestConfig = RequestConfig.custom() .setSocketTimeout(5000) //设置请求超时时间 .setConnectionRequestTimeout(5000) //设置传输超时时间 .build(); get.setConfig(requestConfig);//设置请求的参数 // try { response = httpClient.execute(get); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } //获取返回状态 200为响应成功 // StatusLine state = response.getStatusLine(); //获取网页实体 HttpEntity httpEntity = response.getEntity(); return httpEntity; // try { // return httpEntity.getContent(); // } catch (IllegalStateException | IOException e) { // // TODO Auto-generated catch block // e.printStackTrace(); // } // return null; } /** * 获取整个html以String形式输出 * @param url * @return */ public String getContent(String url){ HttpEntity httpEntity = getHttpEntity(url); String content = ""; try { InputStream is = httpEntity.getContent(); InputStreamReader isr = new InputStreamReader(is); char[] c = new char[1024]; int l = 0; while((l = isr.read(c)) != -1){ content += new String(c,0,l); } isr.close(); is.close(); } catch (IllegalStateException | IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } return content; } }
下一个类是通过上面给的那个类传递过来的String类型的网页,从里面提取出来图片与html连接,并放入图片队列与网页队列中,这样就能保证每个网页只访问(下载)一次
同时也是获取链接的线程,使获取url与下载图片分开,这样就能使用双线程两者互不干扰同时也能通过共享的图片链接队列(imgUrlQueue)联系在一起
package com.feng.main; import java.util.regex.Matcher; import java.util.regex.Pattern; public class AddQueue extends Thread{ //这里添加个regex是为了防止找到外部链接而使程序无法停止 String regex = ""; AddQueue(String regex){ this.regex = regex; } public void run(){ System.out.println("开始执行add线程"); while(true){ if(Queues.htmlUrlQueue.size() <50 || Queues.imgUrlQueue.size() < 100){ //获取并移除 String url = Queues.htmlUrlQueue.poll(); String content = new HtmlContent().getContent(url); getHtmlUrl(content); getImagesUrl(content); //添加至已访问队列 Queues.visitedHtmlUrlQueue.offer(url); } } } /** * 获取所有页面链接 * @param context */ public void getHtmlUrl(String context){ //如何去除外部链接 String regex = "http://([\\w-]+\\.)+[\\w-]+(/[\\w-./?%&=]*)?"; Pattern p = Pattern.compile(regex); Matcher m = p.matcher(context); while(m.find()){ String url = m.group(); if(!Queues.visitedHtmlUrlQueue.contains(url) && url.contains(this.regex)){ Queues.htmlUrlQueue.offer(url); System.out.println("add Html url : "+url); } } } /** * 获取所有图片链接 * @param context */ public void getImagesUrl(String context){ //如何去除外部链接 String regex = "http://([\\w-]+\\.)+[\\w-]+(/[\\w-./?%&=]*)?(.jpg|.mp4|.rmvb|.png|.mkv|.gif|.bmp|.jpeg|.flv|.avi|.asf|.rm|.wmv)+"; Pattern p = Pattern.compile(regex); Matcher m = p.matcher(context); while(m.find()){ String url = m.group(); if(!Queues.visitedImgUrlQueue.contains(url) && url.contains(this.regex)){ Queues.imgUrlQueue.offer(url); System.out.println("add Image url : "+url); } } } }
最后是下载类,这个是上一篇文章里面的精简版
package com.feng.main; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.client.HttpClient; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.HttpClients; public class DownLoadImg { List<String> imgFormat = new ArrayList<String>(); DownLoadImg(){ imgFormat.add("jpg"); imgFormat.add("jpeg"); imgFormat.add("png"); imgFormat.add("gif"); imgFormat.add("bmp"); } /** * 获取HttpEntity * @return HttpEntity网页实体 */ private HttpEntity getHttpEntity(String url){ HttpResponse response = null;//创建请求响应 //创建httpclient对象 HttpClient httpClient = HttpClients.createDefault(); HttpGet get = new HttpGet(url); RequestConfig requestConfig = RequestConfig.custom() .setSocketTimeout(5000) //设置请求超时时间 .setConnectionRequestTimeout(5000) //设置传输超时时间 .build(); get.setConfig(requestConfig);//设置请求的参数 // try { response = httpClient.execute(get); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } //获取返回状态 200为响应成功 // StatusLine state = response.getStatusLine(); //获取网页实体 HttpEntity httpEntity = response.getEntity(); return httpEntity; // try { // return httpEntity.getContent(); // } catch (IllegalStateException | IOException e) { // // TODO Auto-generated catch block // e.printStackTrace(); // } // return null; } /** * 下载图片 * @param url * @param is */ public int downloadImage(String url){ try{ HttpEntity httpEntity = getHttpEntity(url); long len = httpEntity.getContentLength()/1024; System.out.println("下载的文件大小为:"+len+"k"); if(len < 150){ System.out.println("Warring:文件太小,不予下载--------"); return 0; } String realPath = getRealPath(url); String name = getName(url); System.out.println("文件夹路径:"+realPath); System.out.println("文件名字:"+name); InputStream is = httpEntity.getContent(); //此方法不行 // System.out.println(is.available()/1024+"k"); int l = 0; byte[] b = new byte[1024]; FileOutputStream fos = new FileOutputStream(new File(realPath+"/"+name)); while((l = is.read(b)) != -1){ fos.write(b, 0, l); } fos.flush(); fos.close(); is.close(); System.out.println("下载:"+url+"成功\n"); }catch(Exception e){ System.out.println("下载:"+url+"失败"); e.printStackTrace(); } return 1; } /** * 创建并把存储的位置返回回去 * @param url * @return */ private String getRealPath(String url){ Pattern p = Pattern.compile("images/[a-z]+/[a-z_0-9]+"); Matcher m = p.matcher(url); String format = getName(url).split("\\.")[1]; String path = null; //说明是图片 if(imgFormat.contains(format)){ path = "media/images/"; }else{ path = "media/video/"; } path += url.split("/")[(url.split("/").length-2)]; if(m.find()){ path = m.group(); }; //添加盘符 path = "D:/"+path; File file = new File(path); if(!file.exists()){ file.mkdirs(); } return path; } /** * 获取文件名 * @param url * @return */ private String getName(String url){ // s3.substring(s3.lastIndexOf("/")+1) return url.substring(url.lastIndexOf("/")+1); } }
然后是下载图片的线程
package com.feng.main; public class DownloadImages extends Thread{ public void run(){ System.out.println("开始下载"); while(Queues.imgUrlQueue.size() > 0){ //只要有数据就下载 //可以多线程执行下载 String url = Queues.imgUrlQueue.poll(); System.out.println("开始下载:"+url); //下载 new DownLoadImg().downloadImage(url); Queues.visitedImgUrlQueue.offer(url); } } }
最后就是主方法了
package com.feng.main; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.regex.Matcher; import java.util.regex.Pattern; public class MainTest { public static void main(String[] args) { String startUrl = "http://www.jdlingyu.net/cosplay/"; Pattern p = Pattern.compile("http://([\\w-]+\\.)+[\\w-]+"); Matcher m = p.matcher(startUrl); // down.start(startUrl); m.find(); String regex = m.group(); System.out.println("regex : "+regex); Queues.htmlUrlQueue.offer(startUrl); //在线程池开启三个线程 ExecutorService pool = Executors.newFixedThreadPool(3); Thread add = new AddQueue(regex); Thread down = new DownloadImages(); //加入线程池 pool.execute(add); try { //延时进行下载,防止后面执行下载时urlQueue中没有链接 Thread.sleep(5000); } catch (InterruptedException e) { e.printStackTrace(); } pool.execute(down); pool.execute(down); pool.shutdown(); } }
最后完成,开始下载。
在最后,成果展示
《完》