1
在新的类,我有一个方法:在第一我试图从一个网站提取所有链接,但只有一些链接被提取为什么?
class MyClient : WebClient
{
public bool HeadOnly { get; set; }
protected override WebRequest GetWebRequest(Uri address)
{
WebRequest req = base.GetWebRequest(address);
if (HeadOnly && req.Method == "GET")
{
req.Method = "HEAD";
}
return req;
}
}
public static HtmlAgilityPack.HtmlDocument getHtmlDocumentWebClient(string url, bool useProxy, string proxyIp, int proxyPort, string usename, string password)
{
try
{
doc = null;
using (MyClient clients = new MyClient())
{
clients.HeadOnly = false;
byte[] body = clients.DownloadData(url);
// note should be 0-length
string type = clients.ResponseHeaders["content-type"];
clients.HeadOnly = false;
// check 'tis not binary... we'll use text/, but could
// check for text/html
if (type == null)
{
return null;
}
else
{
if (type.StartsWith(@"text/html"))
{
string text = clients.DownloadString(url);
doc = new HtmlAgilityPack.HtmlDocument();
WebClient client = new WebClient();
//client.Headers.Add("user-agent", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; .NET CLR 1.0.3705;)");
client.Credentials = CredentialCache.DefaultCredentials;
client.Proxy = WebRequest.DefaultWebProxy;
if (useProxy)
{
//Proxy
if (!string.IsNullOrEmpty(proxyIp))
{
WebProxy p = new WebProxy(proxyIp, proxyPort);
if (!string.IsNullOrEmpty(usename))
{
if (password == null)
password = string.Empty;
NetworkCredential nc = new NetworkCredential(usename, password);
p.Credentials = nc;
}
}
}
doc.Load(client.OpenRead(url));
}
}
}
}
catch (Exception err)
{
}
return doc;
}
private static string GetUrl(string url)
{
string startTag = "Url: ";
string endTag = " ---";
int startTagWidth = startTag.Length;
int endTagWidth = endTag.Length;
int index = 0;
index = url.IndexOf(startTag, index);
int start = index + startTagWidth;
index = url.IndexOf(endTag, start + 1);
string g = url.Substring(start, index - start);
return g;
}
然后:
public List<string> test(string mainUrl, int levels)
{
List<string> csFiles = new List<string>();
wc = new System.Net.WebClient();
HtmlWeb hw = new HtmlWeb();
List<string> webSites;
csFiles.Add("temp string to know that something is happening in level = " + levels.ToString());
csFiles.Add("current site name in this level is : " + mainUrl);
try
{
HtmlAgilityPack.HtmlDocument doc = TimeOut.getHtmlDocumentWebClient(mainUrl, false, "", 0, "", "");
currentCrawlingSite.Add(mainUrl);
webSites = getLinks(doc);
在我有可变文档是从类超时调用的方法在哪里下载网址类我有这个方法:
private List<string> getLinks(HtmlAgilityPack.HtmlDocument document)
{
List<string> mainLinks = new List<string>();
var linkNodes = document.DocumentNode.SelectNodes("//a[@href]");
if (linkNodes != null)
{
foreach (HtmlNode link in linkNodes)
{
var href = link.Attributes["href"].Value;
if (href.StartsWith("http://") == true || href.StartsWith("https://") == true || href.StartsWith("www") == true) // filter for http
{
mainLinks.Add(href);
}
}
}
return mainLinks;
}
因此,例如,可以说主要的网址为:
https://github.com/jasonwupilly/Obsidian/tree/master/Obsidian
在那里我可以看到更多的10个链接。 但事实上,当我把一行后面的断点:webSites = getLinks(doc); 我看到里面只有7个链接。 网站的列表类型
为什么我看到的只有7个环节,如果主URL有超过10个环节,他们都开始用HTTP或HTTPS或www
我想,或许真的与方法: getLinks 是不对的。出于某种原因,它没有得到所有的链接。