Logo Search packages:      
Sourcecode: nagstamon version File versions  Download package

def Nagstamon::nagstamonObjects::NagiosServer::FetchURL (   self,
  url,
  giveback = "dict",
  cgi_data = None 
)

get content of given url, cgi_data only used if present
giveback may be "dict", "html" or "none" 
"dict" FetchURL gives back a dict full of miserable hosts/services,
"html" it gives back pure HTML - useful for finding out IP or new version
"none" it gives back pure nothing - useful if for example acknowledging a service
existence of cgi_data forces urllib to use POST instead of GET requests

Definition at line 454 of file nagstamonObjects.py.

00454                                                            :
        """
        get content of given url, cgi_data only used if present
        giveback may be "dict", "html" or "none" 
        "dict" FetchURL gives back a dict full of miserable hosts/services,
        "html" it gives back pure HTML - useful for finding out IP or new version
        "none" it gives back pure nothing - useful if for example acknowledging a service
        existence of cgi_data forces urllib to use POST instead of GET requests
        """
        # using httppasswordmgrwithdefaultrealm because using password in plain
        # url like http://username:password@nagios-server causes trouble with
        # passwords containing special characters like "?"
        # see http://www.voidspace.org.uk/python/articles/authentication.shtml#doing-it-properly
        # attention: the example from above webseite is wrong, passman.add_password needs the 
        # WHOLE URL, with protocol!
        
        passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
        passman.add_password(None, url, self.username, self.password)
        auth_handler = urllib2.HTTPBasicAuthHandler(passman)

        # if something goes wrong with accessing the URL it can be caught
        try:
            # if there should be no proxy used use an empty proxy_handler - only necessary in Windows,
            # where IE proxy settings are used automatically if available
            # In UNIX $HTTP_PROXY will be used
            if str(self.use_proxy) == "False":
                proxy_handler = urllib2.ProxyHandler({})
                urlopener = urllib2.build_opener(auth_handler, proxy_handler)
            elif str(self.use_proxy) == "True":
                if str(self.use_proxy_from_os) == "True":
                    urlopener = urllib2.build_opener(auth_handler)
                else:
                    # if proxy from OS is not used there is to add a authenticated proxy handler
                    passman.add_password(None, self.proxy_address, self.proxy_username, self.proxy_password)
                    proxy_handler = urllib2.ProxyHandler({"http": self.proxy_address, "https": self.proxy_address})
                    proxy_auth_handler = urllib2.ProxyBasicAuthHandler(passman)
                    urlopener = urllib2.build_opener(proxy_handler, proxy_auth_handler, auth_handler)
            
            # create url opener
            urllib2.install_opener(urlopener)

            # use opener - if cgi_data is not empty urllib uses a POST request
            urlcontent = urllib2.urlopen(url, cgi_data)
            
            # give back pure HTML in case giveback is "html"
            if giveback == "html":
                return urlcontent.read()
            
            # give back pure nothing if giveback is "nothing" - useful for POST requests
            if giveback == "nothing":
                # do some cleanup
                del urlcontent
                del passman
                del auth_handler
                return None           
            
            # the heart of the whole Nagios-status-monitoring engine:
            # first step: parse the read HTML
            naghtml = lxml.etree.HTML(urlcontent.read())
                
            # second step: make pretty HTML of it
            nagprettyhtml = lxml.etree.tostring(copy.copy(naghtml), pretty_print=True)
            
            # third step: clean HTML from tags which embarass libxml2 2.7
            # only possible when module lxml.html.clean has been loaded
            if sys.modules.has_key("lxml.html.clean"):
                # clean html from tags which libxml2 2.7 is worried about
                # this is the case with all tags that do not need a closing end tag like link, br, img
                cleaner = lxml.html.clean.Cleaner(remove_tags=["link", "br", "img"], page_structure=True, style=False)
                nagprettyhtml = copy.copy(cleaner.clean_html(nagprettyhtml))
                
                # lousy workaround for libxml2 2.7 which worries about attributes without value
                # we hope that nobody names a server '" nowrap>' - chances are pretty small because this "name"
                # contains unallowed characters and is far from common sense
                nagprettyhtml = nagprettyhtml.replace('" nowrap>', '">')

            # fourth step: make objects of tags for easy access
            htobj = copy.copy(lxml.objectify.fromstring(nagprettyhtml))
            
        except:
            # do some cleanup
            del passman
            del auth_handler
            return "ERROR"

        #do some cleanup
        del passman
        del auth_handler
        del urlcontent
        del cleaner
        del naghtml
        del nagprettyhtml

        # give back HTML object from Nagios webseite
        return htobj
    
    
    def GetHost(self, host):


Generated by  Doxygen 1.6.0   Back to index