#import os
#import numpy as np
#import pandas as pd
#import nltk
#import re
#import string
#from nltk.tokenize import sent_tokenize
#nltk.download()
#os.chdir("/Users/lisaherzog/Google Drive/UM/Smart Services/Thesis/Thesis/Code/Feature Set3/Grammatical Parsing")
#from pandas import ExcelWriter
#from pandas import ExcelFile
#Data = pd.read_excel('Negative Text Fragments.xlsx')
#Text =Data['Neg.Text'].tolist()
#Sent_Text = [None]*289
#for i in range(0,289):
#txt = Text[i]
#tokenized = sent_tokenize(txt)
#Sent_Text.append(tokenized)
#Sent_Text = Sent_Text[289:578]
#from nltk.parse.stanford import StanfordDependencyParser
#path_to_jar = '/Users/lisaherzog/Google Drive/UM/Smart Services/Thesis/Thesis/Stanford Grammatical Parser/stanford-parser-full-2015-04-20/stanford-parser.jar'
#path_to_models_jar = '/Users/lisaherzog/Google Drive/UM/Smart Services/Thesis/Thesis/Stanford Grammatical Parser/stanford-parser-full-2015-04-20/stanford-parser-3.5.2-models.jar'
#dependency_parser = StanfordDependencyParser(path_to_jar=path_to_jar, path_to_models_jar=path_to_models_jar)
#Reviews= []
#Length= len(Sent_Text)
#for i in range(0,Length):
#Extract = Sent_Text[i]
#Extract_Length=len(Extract)
#dependencies = []
#Reviews.append(dependencies)
#for j in range(0,Extract_Length):
#Sent_Extract = str(Extract[j])
#result = dependency_parser.raw_parse(Sent_Extract)
#dep = result.__next__()
#resultList = list(dep.triples())
#dependencies.append(resultList)
Extract only those reviews that have the relationship “neg”
#Negations= []
#Length= len(Reviews)
#for k in range(0,Length):
#Extract = Reviews[k]
#Extract_Length=len(Extract)
#Sentences = []
#Negations.append(Sentences)
#for l in range(0,Extract_Length):
#Sent_Extract = Extract[l]
#Sent_Length=len(Sent_Extract)
#Triples = []
#Sentences.append(Triples)
#Store only those Triples containing negations
#for m in range(0,Sent_Length):
#Triple = Sent_Extract[m]
#Relation= Triple[1]
#if(Relation=="neg"):
#Word= Triple[0][0]
#Result= [k,l,Word]
#else:
#Result= None
#Triples.append(Result)
#Review_IDs = []
#Sentence_IDs=[]
#Final_Negated_Fragment = []
#Length_Final = len(Negations_Simplified)
#for r in range (0, Length_Final):
#Extract = Negations_Simplified[r] # WORKS
#Review_ID = Extract[0] # WORKS
#Sentence_ID= Extract[1] # WORKS
#Word = Extract[2] # WORKS
#ADD= "_NOT"# WORKS
#NEW_WORD= Word+'_NOT'# WORKS
#Review_IDs.append(Review_ID)
#Sentence_IDs.append(Sentence_ID)
#Sentence = Sent_Text[Review_ID][Sentence_ID] #WORKS
#New_Sentence = Sentence
#New_Sentence = New_Sentence.replace(Word, NEW_WORD)
#print(New_Sentence)#Works
#Final_Negated_Fragment.append(New_Sentence)
```