##
# date
##
from elasticsearch import Elasticsearch
import requests as req
from selenium.webdriver import chrome
from urllib.parse import urlencode
import json
import yaml

from bs4 import BeautifulSoup

from time import localtime
import time
import os
import sys

# ========================================


class NaverNews:

    def __init__(self):

        self.elastiClient = NaverNews.elaInformation()

        urlSettingObj = NaverNews.urlRequestSetting()

        ## url 정보
        self.reqUrl = urlSettingObj.get("url")

        ## ex) fromat => 20190712
        self.currTimeObj = NaverNews.getCurrTime()

        self.urlInfo = {
            "etcParams": urlSettingObj.get("etcParam"),
            "page": None
        }

    """ Title 에 내가 원하는 단어가 있니??
    """
    def isTrue(self):

        ## ====================================================================
        # 경로 이동
        ## ====================================================================
        os.chdir(r"C:\Users\ezfarm\PycharmProjects\ElasticSearchProj\htmlObj")

        htmlPath = r"C:\Users\ezfarm\PycharmProjects\ElasticSearchProj\htmlObj"

        for htmlF in os.listdir():
            abstractPath = os.path.abspath(htmlF)
            print (abstractPath)

            ### =============================
            # html file read
            ### =============================
            try:

                 htmlFileRead = open(abstractPath)

            except FileNotFoundError as e:
                print (e)
                pass
            else:
                ### =============================
                # html file read
                ### =============================
                bsObject = BeautifulSoup(htmlFileRead,"html.parser")

                HEAD_LINE = bsObject.select("ul.type06_headline > li")

                for h in HEAD_LINE:
                    try:

                        headline = h.select("dl > dt")[1]
                    except IndexError as e:

                        try:

                            headline = h.select_one("dl > dt")
                        except:
                            print ("요청 error")
                            pass
                        else:
                            responseObj = self.textPreprocessing(headline.a.string)

                            if responseObj["isTrue"]:
                                self.elasticInsertDocuments(responseObj["title"],
                                                            h.select_one("dl > dd > span.lede").string)
                    else:
                        responseObj = self.textPreprocessing(headline.a.string)

                        if responseObj["isTrue"]:
                            self.elasticInsertDocuments(responseObj["title"],
                                                        h.select_one("dl > dd > span.lede").string)


    def textPreprocessing(self, txt):
        tmp = str(txt).strip().replace("\n", "")
        mark = {"title": tmp, "isTrue": False}

        for i in ["김정은", "이명박", "미사일"]:

            if i in tmp:
                mark["isTrue"] = True
                break

        return mark

    def elasticInsertDocuments(self, title, hObject):

        documents = {
            "title"   : title,
            "context" : hObject,
            "cllctdt" : self.currTimeObj
        }

        try:

            self.elastiClient.index (
                index    ="naver_headline_index",   # 적재할 index
                doc_type ="doc",
                body     =documents
            )
        except:
            print ("적재 실패 !!!")
            pass
        else:
            time.sleep(1.2)
            print("elasticsearch insert success !!!")
            print (documents)

    def doRequests(self):

        for n, p in enumerate(range(1, 95)):
            self.urlInfo["page"] = str(p)
            """
            mode=LSD&mid=sec&sid1=100&date=20190712&page=7
            """
            paramsEtc = self.urlInfo["etcParams"]  + "&" + \
                        "date=" + self.currTimeObj + "&" + \
                        "page=" + self.urlInfo["page"]

            requestUrl = self.reqUrl + "?" + paramsEtc

            try:

                html = req.get(requestUrl)
            except req.exceptions.RequestException as e:
                print (e)
                sys.exit(1)
            else:
                # print ("{} page 작업 중 ...".format(n+1))
                # bsObject = BeautifulSoup(html.text, "html.parser")
                htmlName = "html_file_{}.html".format(str(n+1))
                htmlFile = open(r"C:\Users\ezfarm\PycharmProjects\ElasticSearchProj\htmlObj\{}".format(htmlName),
                                "w")

                try:

                    htmlFile.write(html.text)
                except:
                    print ("html file write error")
                    pass
                else:
                    print ("{} 번째 데이터 파일 write success !!!".format(n+1))
                    htmlFile.close()

    """ reuqest setting 
    """

    @classmethod
    def urlRequestSetting(cls):

        try:

            f = open(r"C:\Users\ezfarm\PycharmProjects\ElasticSearchProj\conf\url.yml", "r", encoding="utf-8")

        except FileNotFoundError as e:
            print(e)
            sys.exit(1)
        else:
            yDoc = yaml.load(f, Loader=yaml.Loader)
            f.close()  # memory 해제
            return yDoc


    """ 검색 날짜 설정 
    """
    @classmethod
    def getCurrTime(cls):

        currObjTime = time.strftime("%Y%m%d", localtime())
        return currObjTime


    """ elasticsearch server가 살아 있는지 확인 
    """
    @classmethod
    def isAliveElastic(cls, elaAddress):

        try:

            req.get("http://" + elaAddress + ":9200")

        except req.exceptions.RequestException as e:
            """ server is die !!
            """
            print(e)
            sys.exit(1)
        else:
            print("elasticsearch server is alive !!!")
            return Elasticsearch(host=elaAddress)


    """ elasticsearch server address 정보 return
    """
    @classmethod
    def elaInformation(cls):

        path = r"C:\Users\ezfarm\PycharmProjects\ElasticSearchProj\conf\elainfo.json"

        try:

            f = open(path, "r", encoding="utf-8")
        except:
            sys.exit(1)
        else:
            jsonDoc = json.load(f)
            f.close()
            elasticNode = NaverNews.isAliveElastic(jsonDoc.get("ela"))
            return elasticNode


def main():
    elanode = NaverNews()
    elanode.isTrue()
    #elanode.doRequests()

if __name__ == "__main__":
    main()

'언어 > python' 카테고리의 다른 글

백준 2108  (0) 2019.12.08
from csv to json convert + logstash  (0) 2019.11.26
naver music 크롤링 + elasticsearch  (0) 2019.05.22
네이버 뉴스 크롤링 + 형태소  (0) 2019.05.01
페이스북 - python  (0) 2019.04.24

from time import localtime, strftime
from bs4 import BeautifulSoup
import requests
import json

from Ela.Elast import Elarv

class NMusic:

    def __init__(self):
        self.url = NMusic.getInformation()

    def getUrl(self):
        html = requests.get(self.url)
        if html.status_code == 200:
            bsObject = BeautifulSoup(html.text, "html.parser")
            print("title : {}".format(bsObject.title.string))
            top100 = bsObject.select_one("table.home_top100 > tbody")

            for r in range(1, 11):

                lst = top100.select_one("tr._tracklist_move._track_dsc.list{rank}".format(rank=r))
                # --------------------------------------------------------------
                rnk     = lst.select_one("td.ranking > span.num")               # - 순위
                nme     = lst.select_one("td.name > span.m_ell > a")            # - 곡명
                artist  = lst.select_one("td._artist > span.m_ell > a._artist") # - 뮤지션
                insrtDay= strftime("%Y%m%d", localtime())                       # - 삽입 년도

                d = {"rank" : rnk.string,
                     "name" : nme.string,
                     "artist" : artist.string,
                     "insertdate" : insrtDay}

                Elarv.insertDocuments(d)
                print ("적재 성공 !!!")
                # print (insrtDay)
                # --------------------------------------------------------------
                #print ("{ranking} => {songname} : {artist}".format(ranking = rnk.string, songname = nme.string, artist = artist.string))

    @classmethod
    def getInformation(cls):

        try:

            f = open(r"C:\Users\junhyeon.kim\Desktop\StuEla\clw\info.json", "r", encoding="utf-8")
        except FileNotFoundError as e:
            print (e)
        else:
            jsonDoc = dict(json.load(f)).get("url")
            f.close()
            return jsonDoc

def main():
    m = NMusic() # 객체 생성
    m.getUrl()

if __name__ == "__main__":
    main()
from elasticsearch import Elasticsearch

class Elarv:

    @classmethod
    def insertDocuments(cls, elements):
        el = Elasticsearch(hosts="192.168.240.10")
        el.index(index="nmusic", doc_type="doc", body=elements)

def main():
    enode = Elarv()

if __name__ == "__main__":
    main()

 

'언어 > python' 카테고리의 다른 글

from csv to json convert + logstash  (0) 2019.11.26
네이버 기사 크롤링 => elasticsearch 적재  (0) 2019.07.12
네이버 뉴스 크롤링 + 형태소  (0) 2019.05.01
페이스북 - python  (0) 2019.04.24
python + outlook  (0) 2019.03.31

from selenium import webdriver
from bs4 import BeautifulSoup
from konlpy.tag import Okt
import requests
import time
from openpyxl import Workbook
from Nature.Utils.Util import Util
##
# 2019-05-01
class News:

    def __init__(self):
        self.workBook = Workbook()  # excel work 객체
        self.url  = "https://news.naver.com/"
        self.chromeDriver = None
        self.josaList = set()
        #self.params = Util.getConfiguration()

    def getUrl(self):

        # Option-----------------------------------------------
        option = webdriver.ChromeOptions()
        option.add_argument("headless")
        option.add_argument("window-size=1920x1080")
        option.add_argument("disable-gpu")

        # -----------------------------------------------------
        self.chromeDriver = webdriver.Chrome(executable_path="C:\\Users\\junhyeon.kim\\Documents\\chrome_driver\\chromedriver.exe",
                                             options =option)

        self.chromeDriver.get(self.url)
        self.chromeDriver.implicitly_wait(3)

        # title 확인 -------------------------------------------
        print (self.chromeDriver.title) ; time.sleep(2)

        # 3 : 정치 / 4 : 경제 / 5 : 사회 / 6 : 생활/문화  / 7 : 세계  / 8 : it/과학
        for p in range(3, 9):

            # xx 항목으로 click
            self.chromeDriver.find_element_by_xpath('//*[@id="lnb"]/ul/li['+ str(p) +']/a/span[1]').click()
            print (" >>> {}".format(self.chromeDriver.title)) ; time.sleep(2)

            bsObject = BeautifulSoup(self.chromeDriver.page_source, "html.parser")

            cluster = bsObject.select("div.cluster > "
                                      "div.cluster_group > "
                                      "div.cluster_body > "
                                      "ul.cluster_list > "
                                      "li.cluster_item > "
                                      "div.cluster_text")
            for c in cluster:
                t = c.select_one("a")
                if t.string != None:
                    print ("title : {0} , requ : {1}".format(t.string, t.attrs))
                    html = requests.get(t.attrs["href"])

                    if html.status_code == 200:
                        bsObject = BeautifulSoup(html.text, "html.parser")
                        txt = bsObject.select_one("div#articleBodyContents")
                        # 가공
                        # 양쪽 공백 제거
                        # 개행 제거
                        resltText = str(txt.text).replace("\n", "")
                        resltText = resltText.replace("// flash 오류를 우회하기 위한 함수 추가function _flash_removeCallback() {}", "")
                        resltText = resltText.strip()
                        print (resltText)
                        self.detail(resltText)

                        print ("===========================")

        print (self.josaList)
        self.writeXl(self.josaList)
        self.destroy()

    def detail(self, text):

        okt = Okt()
        p = [x for x in okt.pos(text)]
        s = self.removeWord(p)
        self.josaList = self.josaList.union(s)

    def removeWord(self, d):
        """

        :param d:
        :return: set ( 외래어, 조사, 동사, 부사 )
        """
        r = set()
        for i in d:
            if i[1] == "Foreign" or \
               i[1] == "Josa"    or \
               i[1] == "Verb"    or \
               i[1] == "Adjective" or \
               i[1] == "Modifier":
                r.add(i[0])
        return r

    # 엑셀에 데이터 import
    def writeXl(self, wrdData):

        workSheet = self.workBook.active

        for n, w in enumerate(wrdData):
            workSheet.cell(row=n+1, column=1).value = w

        self.workBook.save(r"C:\Users\junhyeon.kim\Desktop\ezfarm\Nature\Result\stopWord.xlsx")
        self.workBook.close()

    def destroy(self):

        if self.chromeDriver != None:
            self.chromeDriver.close()

def main():
    n = News()
    n.getUrl()
if __name__ == "__main__":
    main()


'언어 > python' 카테고리의 다른 글

네이버 기사 크롤링 => elasticsearch 적재  (0) 2019.07.12
naver music 크롤링 + elasticsearch  (0) 2019.05.22
페이스북 - python  (0) 2019.04.24
python + outlook  (0) 2019.03.31
selenium_  (0) 2019.03.11

import os
from PIL import Image

from SIMSIM.Utils import Utils

class TestCode:

    def __init__(self):

        self.targetDirectory = Utils.getFile()
        self.elements = {} # dictionary

    def directorySearch(self):
        # directory 이동
        Utils.directoryMove(self.targetDirectory)
        directoryList = os.listdir(os.path.abspath(os.getcwd()))

        for f in directoryList:
            mrk = f.split("-")[0]

            if mrk not in self.elements.keys():
                self.elements[mrk] = [os.path.abspath(f)]
            else:
                self.elements[mrk].append(os.path.abspath(f))

        print (self.elements)

    def imageSize(self):

        for k, v in self.elements.items():
            new_dir = os.getcwd() + "\\" + k + "_dir"
            if not os.path.exists(new_dir):
                os.mkdir(new_dir)

            sizeDict = {"with": [], "height": []}

            for x in v:
                im = Image.open(x)
                w,h =  im.size
                sizeDict["with"].append(w); sizeDict["height"].append(w)

            height = max(sizeDict["height"])
            # image resize
            reSize = self.avgSize(sizeDict)

            # save image
            saveImageFile = Image.new("RGB", (reSize[0], reSize[1]*2), (255,255,255))
            yOffset = 0
            for x in v:
                rIm = Image.open(x)
                resizedImage = rIm.resize(reSize)
                saveImageFile.paste(resizedImage, (0, yOffset))
                yOffset += resizedImage.size[1]
            os.chdir(new_dir)
            saveImageFile.save("{}.jpg".format(k))

            # 상위 경로로 이동
            os.chdir("..")

    def avgSize(self, size):
        return min(size["with"]), min(size["height"])


def main():
    tnode = TestCode() # 객체 생성성
    tnode.directorySearch()
    tnode.imageSize()
if __name__ == "__main__":
    main()

'언어 > python' 카테고리의 다른 글

naver music 크롤링 + elasticsearch  (0) 2019.05.22
네이버 뉴스 크롤링 + 형태소  (0) 2019.05.01
python + outlook  (0) 2019.03.31
selenium_  (0) 2019.03.11
2019년 3월 9일 ( 주말 프로젝트 )  (0) 2019.03.09

python + outlook

언어/python2019. 3. 31. 14:53

import win32com.client as win32
import time

outlook = win32.Dispatch("Outlook.Application").GetNamespace("MAPI")

# 5 : 내가 보낸 메일
# 6 : 내가 받은 메일
inbox = outlook.GetDefaultFolder("6")

allBox = inbox.Items
# 몇 통의 메일이 있는지 확인
print (allBox.Count)

for msg in allBox:
# msg.Subject : 메일 제목
# SenderName : 보낸 사람 이름
# SenderEmailAddress : 보낸 사람 이메일
print ("To : {}|{} Text: {} ".format(msg.SenderName, msg.SenderEmailAddress, msg.Subject))

'언어 > python' 카테고리의 다른 글

네이버 뉴스 크롤링 + 형태소  (0) 2019.05.01
페이스북 - python  (0) 2019.04.24
selenium_  (0) 2019.03.11
2019년 3월 9일 ( 주말 프로젝트 )  (0) 2019.03.09
data crawling  (0) 2019.03.03

selenium_

언어/python2019. 3. 11. 22:59

나중에 사용할 테크닉

bar = self.chromeDriver.execute_script("return document.body.scrollHeight")
"""
while True:
print (b)
self.chromeDriver.execute_script("window.scrollTo({up}, {down});".format(up=u, down=b))
if b >= bar:
break
else:
try:

view_btn_more = self.chromeDriver.find_element_by_xpath('//*[@id="recruit_info_list"]/div[2]/a')

except:
pass
else:
view_btn_more.click()
break
u = b
b += 10


'언어 > python' 카테고리의 다른 글

페이스북 - python  (0) 2019.04.24
python + outlook  (0) 2019.03.31
2019년 3월 9일 ( 주말 프로젝트 )  (0) 2019.03.09
data crawling  (0) 2019.03.03
python + 지하철 + 이미지  (0) 2019.02.24

import requests
import yaml

class Attr:

@classmethod
def setting(cls):

try:

f = open(Attr.ymlFilePath(), "r", encoding="utf-8")
doc = dict(yaml.load(f))
except FileNotFoundError as e:
print (e)
exit(1)
else:
Attr.urlRequests(dict(doc.get("srv")))
return dict(doc.get("srv"))


@classmethod
def urlRequests(cls, srv):

try:

requests.get("http://{address}:{port}".format(address= srv.get("url"), port= srv.get("port")))
except requests.exceptions.ConnectionError as e:
print(e)
exit(1)
except requests.exceptions.RequestException as e:
print(e)
exit(1)


@classmethod
def ymlFilePath(cls):
filePath = "C:\\Users\\junhyeon.kim\\Desktop\\proj19\\configration\\srv.yml"
return filePath




from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
from elasticserver.Attribute import Attr
from requests import get
import os
import json
import pprint

class Elasvr:
attr = Attr.setting()
def __init__(self):
self.elasticSrvr = Elasticsearch(
hosts=[{"host":Elasvr.attr.get("url"),
"port":Elasvr.attr.get("port")}]
)


def srvHealthCheck(self):
srvInfo = dict(self.elasticSrvr.info())
health_check = dict(self.elasticSrvr.cluster.health())

result = json.dumps(
{
"name": srvInfo.get("name"),
"cluster_name": health_check.get("cluster_name"),
"status": health_check.get("status")
},
ensure_ascii=False,
indent=4
)

print (result)


def search(self, param_index, param_search):


srv = Search(using= self.elasticSrvr,
index= param_index).query("match", name=param_search)


#response = srv.execute().to_dict()["hits"]["hits"][0]
response = srv.execute().to_dict()["hits"]["hits"]
if response == []:
return response

else:
'''
_index, _type, _id, _score, _source

'''
print (response[0]["_source"])
return response[0]["_source"]

def __del__(self):
self.elasticSrvr.exists


def main():
enode = Elasvr()
enode.srvHealthCheck()
enode.search(param_index="hex_color_list", param_search="b")

if __name__ == "__main__":
main()







'언어 > python' 카테고리의 다른 글

python + outlook  (0) 2019.03.31
selenium_  (0) 2019.03.11
data crawling  (0) 2019.03.03
python + 지하철 + 이미지  (0) 2019.02.24
pysimplegui  (0) 2019.02.10

data crawling

언어/python2019. 3. 3. 14:09

from bs4 import BeautifulSoup

from openpyxl import Workbook

from json import load

import requests

import time


from facebook.TargetString import TargetStr


class Facebook:


    def __init__(self):

        self.requestUrl = Facebook.jsnoFileRead()

        self.bsObject = self.urlRequests()

        self.targetString = TargetStr.target_string

        self.wrkBook = Workbook() # 엑셀 파일 생성


    @classmethod

    def jsnoFileRead(cls):

        try:

            f = open("./info.json", "r")

            json_doc = dict(load(f))

            url = "{url}/{path}?{param}".format(

                    url   = json_doc.get("url"),

                    path  = json_doc.get("path"),

                    param = json_doc.get("param"))


        except FileNotFoundError as e:

            print (e)

            f.close()

            exit(1)

        else:

            f.close()

            return url


    def urlRequests(self):

        html = requests.get(self.requestUrl)

        if html.status_code == 200:

            return BeautifulSoup(html.text, "html.parser")

        else:

            exit(1)


    def urlParcing(self):

        # work sheet 생성

        wrkSheet = self.wrkBook.create_sheet("decoding_list")


        table = self.bsObject.select("div#module-codecs > "

                                     "div#standard-encodings > "

                                     "table.docutils > "

                                     "tbody > "

                                     "tr")


        encoding_list = [ t.select_one("td").string for t in table ]

        for n, i in enumerate(encoding_list):

            result_text = self.targetString.decode(i, "ignore")

            # wrkSheet.cell(row=n+2, column=2).value = i

            # wrkSheet.cell(row=n+2, column=3).value = str(result_text)

            time.sleep(1)

            print (i, result_text)


    def __del__(self):

        self.wrkBook.save("facebook.xlsx")


def main():

    fnode = Facebook()

    fnode.jsnoFileRead()

    fnode.urlParcing()


if __name__ == "__main__":

    main()



'언어 > python' 카테고리의 다른 글

selenium_  (0) 2019.03.11
2019년 3월 9일 ( 주말 프로젝트 )  (0) 2019.03.09
python + 지하철 + 이미지  (0) 2019.02.24
pysimplegui  (0) 2019.02.10
python + crawling + elasticsearch  (0) 2019.02.04




from urllib.parse import urlparse

from selenium import webdriver

from bs4 import BeautifulSoup

import json

import time

class X:


    def __init__(self):

        self.ele = {

            "중동역": {"lat":None, "lng":None, "check": "False"},

            "부천역": {"lat":None, "lng":None, "check": "False"},

            "소사역": {"lat":None, "lng":None, "check": "False"},

            "역곡역": {"lat": None, "lng": None, "check": "False"},

            "온수역": {"lat": None, "lng": None, "check": "False"},

            "오류동역": {"lat": None, "lng": None, "check": "False"},

            "개봉역": {"lat": None, "lng": None, "check": "False"},

            "구일역": {"lat": None, "lng": None, "check": "False"},

            "구로역": {"lat": None, "lng": None, "check": "False"},

            "가산디지털단지역": {"lat": None, "lng": None, "check": "False"},

            "독산역": {"lat": None, "lng": None, "check": "False"},

            "금천구청역": {"lat": None, "lng": None, "check": "False"},

            "석수역": {"lat": None, "lng": None, "check": "False"},

            "관악역": {"lat": None, "lng": None, "check": "False"},

            "안양역": {"lat": None, "lng": None, "check": "False"},

            "명학역": {"lat": None, "lng": None, "check": "False"},

            "금정역": {"lat": None, "lng": None, "check": "False"},

            "범계역": {"lat": None, "lng": None, "check": "False"},

            "평촌역": {"lat": None, "lng": None, "check": "False"},

            "인덕원역": {"lat": None, "lng": None, "check": "False"},

        }

        self.webdriv = webdriver.Chrome(X.jsonRead())


    @classmethod

    def jsonRead(self):


        path = ""

        try:

            with open("./driv_path.json", "r") as json_file:

                doc = dict(json.load(json_file))

                path = doc.get("path")

        except FileNotFoundError as e:

            print (e)


        return path



    def urlReq(self):

        self.webdriv.get(url="https://www.google.com/")

        # 암묵적으로 웹 자원 로드를 위해 3초까지 기다려 준다

        self.webdriv.implicitly_wait(3)


        for k in self.ele.keys():

            # new window open

            windows_before = self.webdriv.window_handles[0]

            self.webdriv.execute_script("window.open('');")


            windows_after  = self.webdriv.window_handles[1]

            self.webdriv.switch_to.window(window_name=windows_after)

            self.webdriv.get(url="https://www.google.com/")

            time.sleep(2)

            self.webdriv.find_element_by_xpath('//*[@id="tsf"]/div[2]/div/div[1]/div/div[1]/input')\

                .send_keys(k)


            try:


                self.webdriv.find_element_by_xpath('//*[@id="tsf"]/div[2]/div/div[2]/div[2]/div/center/input[1]')\

                    .click()

            except:

                self.webdriv.find_element_by_xpath('//*[@id="tsf"]/div[2]/div/div[3]/center/input[1]')\

                    .click()


            finally:

                # =================================

                time.sleep(2)

                # e = self.webdriv.find_elements_by_css_selector("#hdtb-msb-vis > div > a")

                bsobj = BeautifulSoup(self.webdriv.page_source, "html.parser")

                e = bsobj.select("#hdtb-msb-vis > div > a")

                print (e)


                for n, i in enumerate(e):

                    if i.string == "지도":

                        print (n+1, i)

                        self.webdriv.find_element_by_xpath('//*[@id="hdtb-msb-vis"]/div['+ str(n+2) +']/a').click()

                        time.sleep(10)

                        url = urlparse(self.webdriv.current_url)

                        r = str(url.path).split("/")[4]\

                            .replace("@", "")\

                            .replace(",17z", "")\

                            .split(",")


                        self.ele[k]["lat"], _   = float(r[0]),   "위도"

                        self.ele[k]["lng"], _   = float(r[1]),   "경도"

                        self.ele[k]["check"], _ = "True", "데이터 들어감"

                        print (k)

                        # =================================

                        self.webdriv.close()

                        self.webdriv.switch_to.window(windows_before)



    def mkJson(self):


        with open("data.json", "w", encoding="utf-8") as outfile:

            json.dump(self.ele, outfile, ensure_ascii=False, indent=4)


    # 소멸자

    def __del__(self):

        self.webdriv.close()



=================================================================

from gmplot import gmplot

from gps_proj.map.m2 import X

# 위도 : 37.4881796

# 경도 : 126.7682477


class M:

    node = X()

    @classmethod

    def first(cls):

        cls.node.urlReq()


    @classmethod

    def doScatterPoint(cls):

        gmap = gmplot.GoogleMapPlotter(37.493988, 126.767433, 10)


        lat = [ i.get('lat') for i in cls.node.ele.values()]

        lng = [ i.get('lng') for i in cls.node.ele.values()]

        # gmap.scatter(lat, lng, "#3B0B39", size=40, marker=False)

        gmap.plot(lat, lng, "cornflowerblue", edge_width=6)

        gmap.draw("t.html")


def main():

    M.first()

    M.doScatterPoint()



if __name__ == "__main__":

    main()



'언어 > python' 카테고리의 다른 글

2019년 3월 9일 ( 주말 프로젝트 )  (0) 2019.03.09
data crawling  (0) 2019.03.03
pysimplegui  (0) 2019.02.10
python + crawling + elasticsearch  (0) 2019.02.04
프로젝트 코드 일부분  (0) 2019.01.20

pysimplegui

언어/python2019. 2. 10. 16:48

import PySimpleGUI as sg

import requests

from bs4 import BeautifulSoup


class UrlClass:

    element = []

    @classmethod

    def urlRequests(cls, url_parameter):

        html = requests.get(url_parameter)

        if html.status_code == 200:

            bsobj_parameter = BeautifulSoup(html.text, "html.parser")

            ul_ah_l = bsobj_parameter.select("ul.ah_l")

            for i in ul_ah_l:

                if 'data-list' in i.attrs:

                    for j in i.select("li.ah_item > a:nth-of-type(1)"):

                        tmp_dict = {"number":j.select_one("span.ah_r").string,

                                    "doc":j.select_one("span.ah_k").string}

                        cls.element.append(tmp_dict)


        return cls.element



class GUI:

    @classmethod

    def graphic(cls):

        layout = [

            [sg.Text("데이터 수집 대상 정보 입력", background_color="#a2e8f0", font=("Consolas", 18), justification="center")],

            [sg.Text("url", size=(15, 1)), sg.InputText("url", key="_URL_")],

            [sg.Button("DO crawling"), sg.Button("Exit")]

        ]

        window = sg.Window("나의 첫 simpleGui").Layout(layout)


        while True:

            event, values = window.Read()

            if event == "Exit":

                break

            elif event == "DO crawling":

                print("values['_url_'] => {}\n".format(values['_URL_']))


                sg.Popup('The values are', UrlClass.urlRequests(values['_URL_']))

        window.Close()



GUI.graphic()

'언어 > python' 카테고리의 다른 글

data crawling  (0) 2019.03.03
python + 지하철 + 이미지  (0) 2019.02.24
python + crawling + elasticsearch  (0) 2019.02.04
프로젝트 코드 일부분  (0) 2019.01.20
프로젝트 디렉토리  (0) 2019.01.13