1717
1818ANNOTATE_NAME = 'Moon'
1919ANNOTATE_CONTENT = 'A cow jumped over the %s.' % (ANNOTATE_NAME ,)
20- ANNOTATE_POLARITY = 1
20+ ANNOTATE_SCORE = 1
2121ANNOTATE_MAGNITUDE = 0.2
2222ANNOTATE_SALIENCE = 0.11793101
2323ANNOTATE_WIKI_URL = 'http://en.wikipedia.org/wiki/Natural_satellite'
@@ -286,20 +286,20 @@ def test_analyze_entities(self):
286286 client ._connection .api_request .assert_called_once_with (
287287 path = 'analyzeEntities' , method = 'POST' , data = expected )
288288
289- def _verify_sentiment (self , sentiment , polarity , magnitude ):
289+ def _verify_sentiment (self , sentiment , score , magnitude ):
290290 from google .cloud .language .sentiment import Sentiment
291291
292292 self .assertIsInstance (sentiment , Sentiment )
293- self .assertEqual (sentiment .polarity , polarity )
293+ self .assertEqual (sentiment .score , score )
294294 self .assertEqual (sentiment .magnitude , magnitude )
295295
296296 def test_analyze_sentiment (self ):
297297 content = 'All the pretty horses.'
298- polarity = 1
298+ score = 1
299299 magnitude = 0.6
300300 response = {
301301 'documentSentiment' : {
302- 'polarity ' : polarity ,
302+ 'score ' : score ,
303303 'magnitude' : magnitude ,
304304 },
305305 'language' : 'en-US' ,
@@ -308,13 +308,118 @@ def test_analyze_sentiment(self):
308308 document = self ._make_one (client , content )
309309
310310 sentiment = document .analyze_sentiment ()
311- self ._verify_sentiment (sentiment , polarity , magnitude )
311+ self ._verify_sentiment (sentiment , score , magnitude )
312312
313313 # Verify the request.
314314 expected = self ._expected_data (content )
315315 client ._connection .api_request .assert_called_once_with (
316316 path = 'analyzeSentiment' , method = 'POST' , data = expected )
317317
318+ def _verify_token (self , token , text_content , part_of_speech , lemma ):
319+ from google .cloud .language .syntax import Token
320+
321+ self .assertIsInstance (token , Token )
322+ self .assertEqual (token .text_content , text_content )
323+ self .assertEqual (token .part_of_speech , part_of_speech )
324+ self .assertEqual (token .lemma , lemma )
325+
326+ def test_analyze_syntax (self ):
327+ from google .cloud .language .document import Encoding
328+ from google .cloud .language .syntax import PartOfSpeech
329+
330+ name1 = 'R-O-C-K'
331+ name2 = 'USA'
332+ content = name1 + ' in the ' + name2
333+ response = {
334+ 'sentences' : [
335+ {
336+ 'text' : {
337+ 'content' : 'R-O-C-K in the USA' ,
338+ 'beginOffset' : - 1 ,
339+ },
340+ 'sentiment' : None ,
341+ }
342+ ],
343+ 'tokens' : [
344+ {
345+ 'text' : {
346+ 'content' : 'R-O-C-K' ,
347+ 'beginOffset' : - 1 ,
348+ },
349+ 'partOfSpeech' : {
350+ 'tag' : 'NOUN' ,
351+ },
352+ 'dependencyEdge' : {
353+ 'headTokenIndex' : 0 ,
354+ 'label' : 'ROOT' ,
355+ },
356+ 'lemma' : 'R-O-C-K' ,
357+ },
358+ {
359+ 'text' : {
360+ 'content' : 'in' ,
361+ 'beginOffset' : - 1 ,
362+ },
363+ 'partOfSpeech' : {
364+ 'tag' : 'ADP' ,
365+ },
366+ 'dependencyEdge' : {
367+ 'headTokenIndex' : 0 ,
368+ 'label' : 'PREP' ,
369+ },
370+ 'lemma' : 'in' ,
371+ },
372+ {
373+ 'text' : {
374+ 'content' : 'the' ,
375+ 'beginOffset' : - 1 ,
376+ },
377+ 'partOfSpeech' : {
378+ 'tag' : 'DET' ,
379+ },
380+ 'dependencyEdge' : {
381+ 'headTokenIndex' : 3 ,
382+ 'label' : 'DET' ,
383+ },
384+ 'lemma' : 'the' ,
385+ },
386+ {
387+ 'text' : {
388+ 'content' : 'USA' ,
389+ 'beginOffset' : - 1 ,
390+ },
391+ 'partOfSpeech' : {
392+ 'tag' : 'NOUN' ,
393+ },
394+ 'dependencyEdge' : {
395+ 'headTokenIndex' : 1 ,
396+ 'label' : 'POBJ' ,
397+ },
398+ 'lemma' : 'USA' ,
399+ },
400+ ],
401+ 'language' : 'en-US' ,
402+ }
403+ client = make_mock_client (response )
404+ document = self ._make_one (client , content )
405+
406+ tokens = document .analyze_syntax ()
407+ self .assertEqual (len (tokens ), 4 )
408+ token1 = tokens [0 ]
409+ self ._verify_token (token1 , name1 , PartOfSpeech .NOUN , name1 )
410+ token2 = tokens [1 ]
411+ self ._verify_token (token2 , 'in' , PartOfSpeech .ADPOSITION , 'in' )
412+ token3 = tokens [2 ]
413+ self ._verify_token (token3 , 'the' , PartOfSpeech .DETERMINER , 'the' )
414+ token4 = tokens [3 ]
415+ self ._verify_token (token4 , name2 , PartOfSpeech .NOUN , name2 )
416+
417+ # Verify the request.
418+ expected = self ._expected_data (
419+ content , encoding_type = Encoding .UTF8 )
420+ client ._connection .api_request .assert_called_once_with (
421+ path = 'analyzeSyntax' , method = 'POST' , data = expected )
422+
318423 def _verify_sentences (self , include_syntax , annotations ):
319424 from google .cloud .language .syntax import Sentence
320425
@@ -357,7 +462,7 @@ def _annotate_text_helper(self, include_sentiment,
357462 }
358463 if include_sentiment :
359464 response ['documentSentiment' ] = {
360- 'polarity ' : ANNOTATE_POLARITY ,
465+ 'score ' : ANNOTATE_SCORE ,
361466 'magnitude' : ANNOTATE_MAGNITUDE ,
362467 }
363468
@@ -375,7 +480,7 @@ def _annotate_text_helper(self, include_sentiment,
375480 # Sentiment
376481 if include_sentiment :
377482 self ._verify_sentiment (annotations .sentiment ,
378- ANNOTATE_POLARITY , ANNOTATE_MAGNITUDE )
483+ ANNOTATE_SCORE , ANNOTATE_MAGNITUDE )
379484 else :
380485 self .assertIsNone (annotations .sentiment )
381486 # Entity
0 commit comments