plagiat_1.v2.py 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. import os
  2. from difflib import SequenceMatcher
  3. from tqdm import tqdm
  4. import datetime
  5. import requests
  6. # download stopwords corpus, you need to run it once
  7. import nltk
  8. #nltk.download("stopwords")
  9. from nltk.corpus import stopwords
  10. import pymorphy2
  11. from string import punctuation
  12. from thefuzz import fuzz
  13. # ------------------------------- НАСТРОЙКИ ------------
  14. # директория файла (на уровень выше, для структуры репозиториев 2 сем. 2022-23)
  15. BASE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
  16. # проверяемая директория
  17. # LECTION_DIR = os.path.join("ISRPO", "Лекции")
  18. # LECTION_DIR = os.path.join("EASvZI", "Лекции")
  19. LECTION_DIR = os.path.join("TZI", "Лекции", "ПМ3.2")
  20. # ссылка для проверки
  21. url = "http://213.155.192.79:3001/kxdr/TZI/raw/8cae4615a64b92595cf8d1710c70f98bc56f32b7/2022-23/%d0%94%d0%b8%d1%84.%d0%b7%d0%b0%d1%87%d0%b5%d1%82_2%d1%81%d0%b5%d0%bc/sultan.md"
  22. # ------------------------------- / НАСТРОЙКИ ------------
  23. #Create lemmatizer and stopwords list
  24. morph = pymorphy2.MorphAnalyzer()
  25. russian_stopwords = stopwords.words("russian")
  26. #Preprocess function
  27. def preprocess_text(text):
  28. translator = str.maketrans(punctuation, ' '*len(punctuation))
  29. words = text.translate(translator)
  30. words = words.lower().split()
  31. # очистка от прилегающего к слову мусора (слово, "или так")
  32. clear_words = []
  33. for word in words:
  34. clear_word = ""
  35. for s in word:
  36. if not s in punctuation:
  37. clear_word = clear_word + s
  38. clear_words.append(clear_word)
  39. tokens = []
  40. tokens = [morph.parse(token)[0].normal_form for token in clear_words if token not in russian_stopwords\
  41. and token != " " \
  42. and token.strip() not in punctuation \
  43. ]
  44. text = " ".join(tokens)
  45. return tokens, text
  46. print()
  47. now = datetime.datetime.now().strftime('%d-%m-%Y %H:%M')
  48. out_str = f"Время проверки: {now} \n"
  49. # print(out_str)
  50. response = requests.get(url)
  51. post_html = response.text
  52. post_list = post_html.split("\n")
  53. # проверяем правильность оформления 1й строки
  54. header_exist = True
  55. line_1 = post_list[0].strip()
  56. line_1 = line_1.replace(chr(65279), "")
  57. if (line_1[0:2]) != "# ":
  58. print(f"Заголовок статьи не найден: '{line_1[0:1]} {line_1[1:2]}' вместо '# '")
  59. print(f"{ord(line_1[0:1])} {ord(line_1[1:2])} вместо {ord('#')} {ord(' ')}")
  60. header_exist = False
  61. # наличие вопросов и списка литературы
  62. quest_exist = False
  63. source_exist = False
  64. for post_line in post_list:
  65. if (post_line[0:2] == "##"):
  66. if ("Вопросы" in post_line):
  67. quest_exist = True
  68. if ("Список" in post_line) and ("литературы" in post_line):
  69. source_exist = True
  70. if not (quest_exist):
  71. print("Вопросы не найдены")
  72. if not (source_exist):
  73. print("Список литературы не найден")
  74. header_text = line_1.replace("# ", "")
  75. header_text = header_text.replace(".", "")
  76. header_text = header_text.strip()
  77. # ищем другие лекции по этой теме
  78. readme_path = os.path.join(BASE_DIR, LECTION_DIR, "README.md")
  79. try:
  80. with open(readme_path, encoding="utf-8") as f:
  81. readme_html = f.read()
  82. except:
  83. with open(readme_path, encoding="cp1251") as f:
  84. readme_html = f.read()
  85. """
  86. █ █ █████ ███████
  87. █ █ ██ ██ ██ ██
  88. █ █ ███████ ███████
  89. █ █ ██ ██ ██ ██
  90. ██ ██ ██ ██ ██
  91. """
  92. lection_exist = False
  93. variants_exist = False
  94. in_lections = False # начало поиска вариантов
  95. readme_list = readme_html.split("\n")
  96. for readme_str in readme_list:
  97. readme_str = readme_str.strip()
  98. readme_str_list = readme_str.split(" ")
  99. lection_number = readme_str_list[0]
  100. readme_str_list.pop(0)
  101. name_str = " ".join(readme_str_list)
  102. name_str = name_str.replace(".", "")
  103. name_str = name_str.strip()
  104. if len(name_str)>0:
  105. """
  106. print(lection_number)
  107. print(name_str)
  108. print(header_text)
  109. print(f"{ord(name_str[0:1])} {ord(name_str[1:2])} {ord(name_str[2:3])} вместо {ord(header_text[0:1])} {ord(header_text[1:2])} {ord(header_text[2:3])}")
  110. print(fuzz.partial_ratio(name_str, header_text))
  111. print()
  112. """
  113. if (str(name_str).lower() == str(header_text).lower()):
  114. print("Лекция найдена в readme")
  115. lection_exist = True
  116. in_lections = True
  117. post_tokens, post_uniq_text = preprocess_text(post_html)
  118. print(f"количество уникальных слов: {len(set(post_tokens))}")
  119. print()
  120. # ищем конец списка вариантов лекций (пустая строка)
  121. if lection_exist:
  122. if (readme_str == ""):
  123. in_lections = False
  124. # следующие после названия лекции строки
  125. if in_lections and (str(name_str).lower() != str(header_text).lower()):
  126. variants_exist = True
  127. variant_name, t = readme_str.split("]")
  128. variant_name = variant_name.strip("[")
  129. print(f"проверяю {variant_name}")
  130. t, variant_uri = readme_str.split("(")
  131. variant_uri = variant_uri.replace("),", "")
  132. variant_uri = variant_uri.replace(")", "")
  133. variant_uri = variant_uri.strip()
  134. variant_path = os.path.join(BASE_DIR, LECTION_DIR, variant_uri)
  135. try:
  136. with open(variant_path, encoding="utf-8") as f:
  137. variant_html = f.read()
  138. except:
  139. with open(variant_path, encoding="cp1251") as f:
  140. variant_html = f.read()
  141. variant_tokens, variant_uniq_text = preprocess_text(variant_html)
  142. print(f"количество уникальных слов варианта: {len(set(variant_tokens))}")
  143. # пересечение множеств
  144. min_tokens_len = min([len(set(post_tokens)), len(set(variant_tokens))])
  145. c = list(set(post_tokens) & set(variant_tokens))
  146. ratio = (1 - (len(c) / min_tokens_len)) * 100
  147. print(f"количество совпадающих слов: {len(c)} / {ratio:.2f}%")
  148. print()
  149. if not(lection_exist):
  150. print("Лекция не найдена в readme")
  151. if not(variants_exist):
  152. print("Вариантов не найдено")
  153. exit()
  154. files_paths = []
  155. dirs = os.listdir(BASE_DIR)
  156. for dir in dirs:
  157. dir_path = os.path.join(BASE_DIR, dir)
  158. if os.path.isdir(dir_path) and (dir != "__pycache__"):
  159. files = os.listdir(dir_path)
  160. for file in files:
  161. file_path = os.path.join(BASE_DIR, dir, file)
  162. filename, fileext = os.path.splitext(file)
  163. if os.path.isfile(file_path) and (fileext=='.md'):
  164. files_paths.append(file_path)
  165. out_str = ""
  166. max_ratio = 0
  167. max_ratio_file = ""
  168. for file_1 in tqdm(files_paths):
  169. small_filename_1 = str(file_1).replace(BASE_DIR, "").strip("\\")
  170. try:
  171. with open(file_1, encoding="utf-8") as f_1:
  172. str1 = f_1.read()
  173. except:
  174. with open(file_1, encoding="cp1251") as f_1:
  175. str1 = f_1.read()
  176. f_1.close()
  177. with open(file_1, 'w', encoding="utf-8") as f_1:
  178. f_1.write(str1)
  179. f_1.close()
  180. ratio = int(SequenceMatcher(None, str1.lower(), post_html.lower()).ratio() * 100)
  181. if (ratio > 70):
  182. out_str += f"{small_filename_1}\n"
  183. out_str += f"ratio = {ratio}\n"
  184. if (ratio > max_ratio):
  185. max_ratio = ratio
  186. max_ratio_file = small_filename_1
  187. print(out_str)
  188. print()
  189. print(f"max ratio: {max_ratio}%")
  190. print(f"max ratio file: {max_ratio_file}")
  191. print("success")