a
    xd3                     @   s  d Z ddlZddlZddlZddlmZ ddlmZmZm	Z	m
Z
mZmZ ddlZddlmZmZ ddlmZmZmZ ddlmZ ed	Zejd
ZdZedZG dd de	ZG dd dZ e  Z!ee!j"de#e
e$ edddZ%ee!j&dd Z&G dd dZ'e#e#dddZ(dS )a   `tldextract` accurately separates a URL's subdomain, domain, and public suffix,
using the Public Suffix List (PSL).

    >>> import tldextract

    >>> tldextract.extract('http://forums.news.cnn.com/')
    ExtractResult(subdomain='forums.news', domain='cnn', suffix='com')

    >>> tldextract.extract('http://forums.bbc.co.uk/') # United Kingdom
    ExtractResult(subdomain='forums', domain='bbc', suffix='co.uk')

    >>> tldextract.extract('http://www.worldbank.org.kg/') # Kyrgyzstan
    ExtractResult(subdomain='www', domain='worldbank', suffix='org.kg')

`ExtractResult` is a namedtuple, so it's simple to access the parts you want.

    >>> ext = tldextract.extract('http://forums.bbc.co.uk')
    >>> (ext.subdomain, ext.domain, ext.suffix)
    ('forums', 'bbc', 'co.uk')
    >>> # rejoin subdomain and domain
    >>> '.'.join(ext[:2])
    'forums.bbc'
    >>> # a common alias
    >>> ext.registered_domain
    'bbc.co.uk'

Note subdomain and suffix are _optional_. Not all URL-like inputs have a
subdomain or a valid suffix.

    >>> tldextract.extract('google.com')
    ExtractResult(subdomain='', domain='google', suffix='com')

    >>> tldextract.extract('google.notavalidsuffix')
    ExtractResult(subdomain='google', domain='notavalidsuffix', suffix='')

    >>> tldextract.extract('http://127.0.0.1:8080/deployed/')
    ExtractResult(subdomain='', domain='127.0.0.1', suffix='')

If you want to rejoin the whole namedtuple, regardless of whether a subdomain
or suffix were found:

    >>> ext = tldextract.extract('http://127.0.0.1:8080/deployed/')
    >>> # this has unwanted dots
    >>> '.'.join(ext)
    '.127.0.0.1.'
    >>> # join part only if truthy
    >>> '.'.join(part for part in ext if part)
    '127.0.0.1'
    N)wraps)	FrozenSetList
NamedTupleOptionalSequenceUnion   )	DiskCacheget_cache_dir)IP_RE	SCHEME_RElooks_like_ip)get_suffix_listsZ
tldextractZTLDEXTRACT_CACHE_TIMEOUT)z4https://publicsuffix.org/list/public_suffix_list.datzQhttps://raw.githubusercontent.com/publicsuffix/list/master/public_suffix_list.datu   [.。．｡]c                   @   s`   e Zd ZU dZeed< eed< eed< eedddZeeddd	Zeedd
dZ	dS )ExtractResultz4namedtuple of a URL's subdomain, domain, and suffix.	subdomaindomainsuffixreturnc                 C   s    | j r| jr| j d | j S dS )z
        Joins the domain and suffix fields with a dot, if they're both set.

        >>> extract('http://forums.bbc.co.uk').registered_domain
        'bbc.co.uk'
        >>> extract('http://localhost:8080').registered_domain
        ''
        . )r   r   self r   M/var/www/html/Ranjet/env/lib/python3.9/site-packages/tldextract/tldextract.pyregistered_domainS   s    
zExtractResult.registered_domainc                 C   s$   | j r | jr ddd | D S dS )z
        Returns a Fully Qualified Domain Name, if there is a proper domain/suffix.

        >>> extract('http://forums.bbc.co.uk/path/to/file').fqdn
        'forums.bbc.co.uk'
        >>> extract('http://localhost:8080').fqdn
        ''
        r   c                 s   s   | ]}|r|V  qd S Nr   ).0ir   r   r   	<genexpr>n       z%ExtractResult.fqdn.<locals>.<genexpr>r   )r   r   joinr   r   r   r   fqdna   s    
zExtractResult.fqdnc                 C   s"   | j s| jst| jr| jS dS )a  
        Returns the ipv4 if that is what the presented domain/url is

        >>> extract('http://127.0.0.1/path/to/file').ipv4
        '127.0.0.1'
        >>> extract('http://127.0.0.1.1/path/to/file').ipv4
        ''
        >>> extract('http://256.1.1.1').ipv4
        ''
        r   )r   r   r   matchr   r   r   r   r   ipv4q   s    zExtractResult.ipv4N)
__name__
__module____qualname____doc__str__annotations__propertyr   r#   r%   r   r   r   r   r   L   s   
r   c                
   @   s   e Zd ZdZe edddefee e	e e
e
e	e eeedf ddddZdeee
 ed	d
dZde
ddddZeee dddZddddZdS )
TLDExtractzSA callable for extracting, subdomain, domain, and suffix components from
    a URL.TFr   N)	cache_dirsuffix_list_urlsfallback_to_snapshotinclude_psl_private_domainsextra_suffixescache_fetch_timeoutr   c                 C   sr   |pd}t dd |D | _|| _| js:|s:| js:td|| _|| _d| _t|tr^t	|n|| _
t|| _dS )a  
        Constructs a callable for extracting subdomain, domain, and suffix
        components from a URL.

        Upon calling it, it first checks for a JSON in `cache_dir`. By default,
        the `cache_dir` will live in the tldextract directory. You can disable
        the caching functionality of this module by setting `cache_dir` to `None`.

        If the cached version does not exist (such as on the first run), HTTP request the URLs in
        `suffix_list_urls` in order, until one returns public suffix list data. To disable HTTP
        requests, set this to an empty sequence.

        The default list of URLs point to the latest version of the Mozilla Public Suffix List and
        its mirror, but any similar document could be specified. Local files can be specified by
        using the `file://` protocol. (See `urllib2` documentation.)

        If there is no cached version loaded and no data is found from the `suffix_list_urls`,
        the module will fall back to the included TLD set snapshot. If you do not want
        this behavior, you may set `fallback_to_snapshot` to False, and an exception will be
        raised instead.

        The Public Suffix List includes a list of "private domains" as TLDs,
        such as blogspot.com. These do not fit `tldextract`'s definition of a
        suffix, so these domains are excluded by default. If you'd like them
        included instead, set `include_psl_private_domains` to True.

        You can pass additional suffixes in `extra_suffixes` argument without changing list URL

        cache_fetch_timeout is passed unmodified to the underlying request object
        per the requests documentation here:
        http://docs.python-requests.org/en/master/user/advanced/#timeouts

        cache_fetch_timeout can also be set to a single value with the
        environment variable TLDEXTRACT_CACHE_TIMEOUT, like so:

        TLDEXTRACT_CACHE_TIMEOUT="1.2"

        When set this way, the same timeout value will be used for both connect
        and read timeouts
        r   c                 s   s   | ]}|  r|  V  qd S r   )strip)r   urlr   r   r   r       s   z&TLDExtract.__init__.<locals>.<genexpr>zThe arguments you have provided disable all ways for tldextract to obtain data. Please provide a suffix list data, a cache_dir, or set `fallback_to_snapshot` to `True`.N)tupler/   r0   
ValueErrorr1   r2   
_extractor
isinstancer*   floatr3   r
   _cache)r   r.   r/   r0   r1   r2   r3   r   r   r   __init__   s"    1

zTLDExtract.__init__r5   r1   r   c           
      C   s   t d|dd dd dd dd dd  d	}t|}d
d |D }|  j||d}d		||d }|s|rt
|rtd|dS |rd		|d|d  nd}|r||d  nd}	t||	|S )a  
        Takes a string URL and splits it into its subdomain, domain, and
        suffix (effective TLD, gTLD, ccTLD, etc.) component.

        >>> extract = TLDExtract()
        >>> extract('http://forums.news.cnn.com/')
        ExtractResult(subdomain='forums.news', domain='cnn', suffix='com')
        >>> extract('http://forums.bbc.co.uk/')
        ExtractResult(subdomain='forums', domain='bbc', suffix='co.uk')
        r   /r   ?#@:r   c                 S   s   g | ]}t |qS r   )_decode_punycode)r   labelr   r   r   
<listcomp>   r!   z'TLDExtract.__call__.<locals>.<listcomp>r1   Nr	   )r   sub	partitionsplitr4   rstrip_UNICODE_DOTS_RE_get_tld_extractorsuffix_indexr"   r   r   )
r   r5   r1   netloclabelsZtranslationsrN   r   r   r   r   r   r   __call__   sF    
zTLDExtract.__call__)	fetch_nowr   c                 C   s    d| _ | j  |r|   dS )z/Force fetch the latest suffix list definitions.N)r8   r;   clearrM   )r   rR   r   r   r   update   s    
zTLDExtract.updater   c                 C   s   t |   S )z
        Returns the list of tld's used by default

        This will vary based on `include_psl_private_domains` and `extra_suffixes`
        )listrM   tldsr   r   r   r   rV      s    zTLDExtract.tlds_PublicSuffixListTLDExtractorc                 C   s`   | j r| j S t| j| j| j| jd\}}t||| jgs@tdt	||t
| j| jd| _ | j S )a'  Get or compute this object's TLDExtractor. Looks up the TLDExtractor
        in roughly the following order, based on the settings passed to
        __init__:

        1. Memoized on `self`
        2. Local system _cache file
        3. Remote PSL, over HTTP
        4. Bundled PSL snapshot file)cacheurlsr3   r0   z)No tlds set. Cannot proceed without tlds.public_tldsprivate_tlds
extra_tldsr1   )r8   r   r;   r/   r3   r0   anyr2   r7   rW   rU   r1   )r   r[   r\   r   r   r   rM     s"    

zTLDExtract._get_tld_extractor)N)F)r&   r'   r(   r)   r   PUBLIC_SUFFIX_LIST_URLSCACHE_TIMEOUTr   r*   r   boolr   r:   r<   r   rQ   rT   r,   r   rV   rM   r   r   r   r   r-      s2   J (r-   Fr=   c                 C   s   t | |dS )NrG   )TLD_EXTRACTOR)r5   r1   r   r   r   extract+  s    rc   c                  O   s   t j| i |S r   )rb   rT   )argskwargsr   r   r   rT   2  s    rT   c                   @   sh   e Zd ZdZdee ee ee edddZdee e	e ddd	Z
dee ee ed
ddZdS )rW   zAWrapper around this project's main algo for PSL
    lookups.
    FrZ   c                 C   s6   || _ || _|| _t|| | | _t|| | _d S r   )r1   r[   r\   	frozensettlds_incl_privatetlds_excl_private)r   r[   r\   r]   r1   r   r   r   r<   =  s
    z&_PublicSuffixListTLDExtractor.__init__N)r1   r   c                 C   s   |du r| j }|r| jS | jS )z,Get the currently filtered list of suffixes.N)r1   rg   rh   )r   r1   r   r   r   rV   K  s    z"_PublicSuffixListTLDExtractor.tlds)	lower_splr1   r   c           	      C   s   |  |}t|}t|D ]l}d||d }d| }||v rL|d   S ||v r\|  S dd||d d  }||v r|  S q|S )zdReturns the index of the first suffix label.
        Returns len(spl) if no suffix is found
        r   N!r	   z*.)rV   lenranger"   )	r   ri   r1   rV   lengthr   Z	maybe_tldZexception_tldZwildcard_tldr   r   r   rN   X  s    

z*_PublicSuffixListTLDExtractor.suffix_index)F)N)N)r&   r'   r(   r)   r   r*   ra   r<   r   r   rV   intrN   r   r   r   r   rW   8  s$   	   rW   )rE   r   c              	   C   s>   |   }|d}|r:zt|W S  ttfy8   Y n0 |S )Nzxn--)lower
startswithidnadecodeUnicodeError
IndexError)rE   ZloweredZlooks_like_punyr   r   r   rD   p  s    
rD   )F))r)   loggingosre	functoolsr   typingr   r   r   r   r   r   rq   rX   r
   r   remoter   r   r   Zsuffix_listr   	getLoggerZLOGenvirongetr`   r_   compilerL   r   r-   rb   rQ   r*   ra   rc   rT   rW   rD   r   r   r   r   <module>   s6   2 

6 ' 
8