当前位置:网站首页>Crawler text data cleaning

Crawler text data cleaning

2022-07-31 01:38:00 In the sea fishing


def filter_chars(text):
    """过滤无用字符 :param text: 文本 """
    # Find all non-Chinese in the text,English and numeric characters
    add_chars = set(re.findall(r'[^\u4e00-\u9fa5a-zA-Z0-9]', text))
    extra_chars = set(r"""!!¥$%*()()-——【】::“”";;'‘’,.?,.?、""")
    add_chars = add_chars.difference(extra_chars)
# tab 是/t
    # Replace special character combinations
    text = re.sub('{IMG:.?.?.?}', '', text)
    text = re.sub(r'<!--IMG_\d+-->', '', text)
    text = re.sub('(https?|ftp|file)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]', '', text)  # Filter URLs
    text = re.sub('<a[^>]*>', '', text).replace("</a>", "")  # 过滤a标签
    text = text.replace("</P>", "")
    text = text.replace("nbsp;", "")
    text = re.sub('<P[^>]*>', '', text, flags=re.IGNORECASE).replace("</p>", "")  # 过滤P标签
    text = re.sub('<strong[^>]*>', ',', text).replace("</strong>", "")  # 过滤strong标签
    text = re.sub('<br>', ',', text)  # 过滤br标签
    text = re.sub('www.[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]', '', text).replace("()", "")  # 过滤www开头的网址
    text = re.sub(r'\s', '', text)  # 过滤不可见字符
    text = re.sub('Ⅴ', 'V', text)

    # 清洗
    for c in add_chars:
        text = text.replace(c, '')
    return text
原网站

版权声明
本文为[In the sea fishing]所创,转载请带上原文链接,感谢
https://yzsam.com/2022/212/202207310125267994.html