@@ -187,7 +187,7 @@ def _make_new_metadata(
187187 translated_indexes : Optional [List [int ]],
188188 safe_for_cache : bool ,
189189 keymap_by_result_column_idx : Any ,
190- ) -> CursorResultMetaData :
190+ ) -> Self :
191191 new_obj = self .__class__ .__new__ (self .__class__ )
192192 new_obj ._unpickled = unpickled
193193 new_obj ._processors = processors
@@ -200,7 +200,7 @@ def _make_new_metadata(
200200 new_obj ._key_to_index = self ._make_key_to_index (keymap , MD_INDEX )
201201 return new_obj
202202
203- def _remove_processors (self ) -> CursorResultMetaData :
203+ def _remove_processors (self ) -> Self :
204204 assert not self ._tuplefilter
205205 return self ._make_new_metadata (
206206 unpickled = self ._unpickled ,
@@ -216,9 +216,7 @@ def _remove_processors(self) -> CursorResultMetaData:
216216 keymap_by_result_column_idx = self ._keymap_by_result_column_idx ,
217217 )
218218
219- def _splice_horizontally (
220- self , other : CursorResultMetaData
221- ) -> CursorResultMetaData :
219+ def _splice_horizontally (self , other : CursorResultMetaData ) -> Self :
222220 assert not self ._tuplefilter
223221
224222 keymap = dict (self ._keymap )
@@ -252,7 +250,7 @@ def _splice_horizontally(
252250 },
253251 )
254252
255- def _reduce (self , keys : Sequence [_KeyIndexType ]) -> ResultMetaData :
253+ def _reduce (self , keys : Sequence [_KeyIndexType ]) -> Self :
256254 recs = list (self ._metadata_for_keys (keys ))
257255
258256 indexes = [rec [MD_INDEX ] for rec in recs ]
@@ -284,7 +282,7 @@ def _reduce(self, keys: Sequence[_KeyIndexType]) -> ResultMetaData:
284282 keymap_by_result_column_idx = self ._keymap_by_result_column_idx ,
285283 )
286284
287- def _adapt_to_context (self , context : ExecutionContext ) -> ResultMetaData :
285+ def _adapt_to_context (self , context : ExecutionContext ) -> Self :
288286 """When using a cached Compiled construct that has a _result_map,
289287 for a new statement that used the cached Compiled, we need to ensure
290288 the keymap has the Column objects from our new statement as keys.
@@ -350,6 +348,8 @@ def __init__(
350348 self ,
351349 parent : CursorResult [Unpack [TupleAny ]],
352350 cursor_description : _DBAPICursorDescription ,
351+ * ,
352+ driver_column_names : bool = False ,
353353 ):
354354 context = parent .context
355355 self ._tuplefilter = None
@@ -383,6 +383,7 @@ def __init__(
383383 textual_ordered ,
384384 ad_hoc_textual ,
385385 loose_column_name_matching ,
386+ driver_column_names ,
386387 )
387388
388389 # processors in key order which are used when building up
@@ -474,15 +475,20 @@ def __init__(
474475 for metadata_entry in raw
475476 }
476477
477- # update keymap with "translated" names. In SQLAlchemy this is a
478- # sqlite only thing, and in fact impacting only extremely old SQLite
479- # versions unlikely to be present in modern Python versions.
480- # however, the pyhive third party dialect is
481- # also using this hook, which means others still might use it as well.
482- # I dislike having this awkward hook here but as long as we need
483- # to use names in cursor.description in some cases we need to have
484- # some hook to accomplish this.
485- if not num_ctx_cols and context ._translate_colname :
478+ # update keymap with "translated" names.
479+ # the "translated" name thing has a long history:
480+ # 1. originally, it was used to fix an issue in very old SQLite
481+ # versions prior to 3.10.0. This code is still there in the
482+ # sqlite dialect.
483+ # 2. Next, the pyhive third party dialect started using this hook
484+ # for some driver related issue on their end.
485+ # 3. Most recently, the "driver_column_names" execution option has
486+ # taken advantage of this hook to get raw DBAPI col names in the
487+ # result keys without disrupting the usual merge process.
488+
489+ if driver_column_names or (
490+ not num_ctx_cols and context ._translate_colname
491+ ):
486492 self ._keymap .update (
487493 {
488494 metadata_entry [MD_UNTRANSLATED ]: self ._keymap [
@@ -505,6 +511,7 @@ def _merge_cursor_description(
505511 textual_ordered ,
506512 ad_hoc_textual ,
507513 loose_column_name_matching ,
514+ driver_column_names ,
508515 ):
509516 """Merge a cursor.description with compiled result column information.
510517
@@ -566,16 +573,19 @@ def _merge_cursor_description(
566573 and cols_are_ordered
567574 and not textual_ordered
568575 and num_ctx_cols == len (cursor_description )
576+ and not driver_column_names
569577 ):
570578 self ._keys = [elem [0 ] for elem in result_columns ]
571579 # pure positional 1-1 case; doesn't need to read
572580 # the names from cursor.description
573581
574582 # most common case for Core and ORM
575583
576- # this metadata is safe to cache because we are guaranteed
584+ # this metadata is safe to
585+ # cache because we are guaranteed
577586 # to have the columns in the same order for new executions
578587 self ._safe_for_cache = True
588+
579589 return [
580590 (
581591 idx ,
@@ -599,10 +609,13 @@ def _merge_cursor_description(
599609 if textual_ordered or (
600610 ad_hoc_textual and len (cursor_description ) == num_ctx_cols
601611 ):
602- self ._safe_for_cache = True
612+ self ._safe_for_cache = not driver_column_names
603613 # textual positional case
604614 raw_iterator = self ._merge_textual_cols_by_position (
605- context , cursor_description , result_columns
615+ context ,
616+ cursor_description ,
617+ result_columns ,
618+ driver_column_names ,
606619 )
607620 elif num_ctx_cols :
608621 # compiled SQL with a mismatch of description cols
@@ -615,13 +628,14 @@ def _merge_cursor_description(
615628 cursor_description ,
616629 result_columns ,
617630 loose_column_name_matching ,
631+ driver_column_names ,
618632 )
619633 else :
620634 # no compiled SQL, just a raw string, order of columns
621635 # can change for "select *"
622636 self ._safe_for_cache = False
623637 raw_iterator = self ._merge_cols_by_none (
624- context , cursor_description
638+ context , cursor_description , driver_column_names
625639 )
626640
627641 return [
@@ -647,39 +661,53 @@ def _merge_cursor_description(
647661 ) in raw_iterator
648662 ]
649663
650- def _colnames_from_description (self , context , cursor_description ):
664+ def _colnames_from_description (
665+ self , context , cursor_description , driver_column_names
666+ ):
651667 """Extract column names and data types from a cursor.description.
652668
653669 Applies unicode decoding, column translation, "normalization",
654670 and case sensitivity rules to the names based on the dialect.
655671
656672 """
657-
658673 dialect = context .dialect
659674 translate_colname = context ._translate_colname
660675 normalize_name = (
661676 dialect .normalize_name if dialect .requires_name_normalize else None
662677 )
663- untranslated = None
664678
665679 self ._keys = []
666680
681+ untranslated = None
682+
667683 for idx , rec in enumerate (cursor_description ):
668- colname = rec [0 ]
684+ colname = unnormalized = rec [0 ]
669685 coltype = rec [1 ]
670686
671687 if translate_colname :
688+ # a None here for "untranslated" means "the dialect did not
689+ # change the column name and the untranslated case can be
690+ # ignored". otherwise "untranslated" is expected to be the
691+ # original, unchanged colname (e.g. is == to "unnormalized")
672692 colname , untranslated = translate_colname (colname )
673693
694+ assert untranslated is None or untranslated == unnormalized
695+
674696 if normalize_name :
675697 colname = normalize_name (colname )
676698
677- self ._keys .append (colname )
699+ if driver_column_names :
700+ self ._keys .append (unnormalized )
678701
679- yield idx , colname , untranslated , coltype
702+ yield idx , colname , unnormalized , coltype
703+
704+ else :
705+ self ._keys .append (colname )
706+
707+ yield idx , colname , untranslated , coltype
680708
681709 def _merge_textual_cols_by_position (
682- self , context , cursor_description , result_columns
710+ self , context , cursor_description , result_columns , driver_column_names
683711 ):
684712 num_ctx_cols = len (result_columns )
685713
@@ -696,7 +724,9 @@ def _merge_textual_cols_by_position(
696724 colname ,
697725 untranslated ,
698726 coltype ,
699- ) in self ._colnames_from_description (context , cursor_description ):
727+ ) in self ._colnames_from_description (
728+ context , cursor_description , driver_column_names
729+ ):
700730 if idx < num_ctx_cols :
701731 ctx_rec = result_columns [idx ]
702732 obj = ctx_rec [RM_OBJECTS ]
@@ -720,6 +750,7 @@ def _merge_cols_by_name(
720750 cursor_description ,
721751 result_columns ,
722752 loose_column_name_matching ,
753+ driver_column_names ,
723754 ):
724755 match_map = self ._create_description_match_map (
725756 result_columns , loose_column_name_matching
@@ -731,7 +762,9 @@ def _merge_cols_by_name(
731762 colname ,
732763 untranslated ,
733764 coltype ,
734- ) in self ._colnames_from_description (context , cursor_description ):
765+ ) in self ._colnames_from_description (
766+ context , cursor_description , driver_column_names
767+ ):
735768 try :
736769 ctx_rec = match_map [colname ]
737770 except KeyError :
@@ -771,6 +804,7 @@ def _create_description_match_map(
771804 ] = {}
772805 for ridx , elem in enumerate (result_columns ):
773806 key = elem [RM_RENDERED_NAME ]
807+
774808 if key in d :
775809 # conflicting keyname - just add the column-linked objects
776810 # to the existing record. if there is a duplicate column
@@ -794,13 +828,17 @@ def _create_description_match_map(
794828 )
795829 return d
796830
797- def _merge_cols_by_none (self , context , cursor_description ):
831+ def _merge_cols_by_none (
832+ self , context , cursor_description , driver_column_names
833+ ):
798834 for (
799835 idx ,
800836 colname ,
801837 untranslated ,
802838 coltype ,
803- ) in self ._colnames_from_description (context , cursor_description ):
839+ ) in self ._colnames_from_description (
840+ context , cursor_description , driver_column_names
841+ ):
804842 yield (
805843 idx ,
806844 None ,
@@ -1489,10 +1527,20 @@ def _make_row_2(row):
14891527 self ._metadata = self ._no_result_metadata
14901528
14911529 def _init_metadata (self , context , cursor_description ):
1530+ driver_column_names = context .execution_options .get (
1531+ "driver_column_names" , False
1532+ )
14921533 if context .compiled :
14931534 compiled = context .compiled
14941535
1495- if compiled ._cached_metadata :
1536+ metadata : CursorResultMetaData
1537+
1538+ if driver_column_names :
1539+ metadata = CursorResultMetaData (
1540+ self , cursor_description , driver_column_names = True
1541+ )
1542+ assert not metadata ._safe_for_cache
1543+ elif compiled ._cached_metadata :
14961544 metadata = compiled ._cached_metadata
14971545 else :
14981546 metadata = CursorResultMetaData (self , cursor_description )
@@ -1527,7 +1575,9 @@ def _init_metadata(self, context, cursor_description):
15271575
15281576 else :
15291577 self ._metadata = metadata = CursorResultMetaData (
1530- self , cursor_description
1578+ self ,
1579+ cursor_description ,
1580+ driver_column_names = driver_column_names ,
15311581 )
15321582 if self ._echo :
15331583 context .connection ._log_debug (
0 commit comments